mm: do_fault(): extract to call vm_ops->do_fault() to separate function
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 3 Apr 2014 21:48:10 +0000 (14:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Apr 2014 23:21:03 +0000 (16:21 -0700)
Extract code to vm_ops->do_fault() and basic error handling to separate
function.  The code will be reused.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory.c

index 144e8cd078052f01795bfcb1e065ead476805c04..af76397c2c5499bf0fac5cf92d3eca0cb24bdafc 100644 (file)
@@ -3286,6 +3286,37 @@ oom:
        return VM_FAULT_OOM;
 }
 
+static int __do_fault(struct vm_area_struct *vma, unsigned long address,
+               pgoff_t pgoff, unsigned int flags, struct page **page)
+{
+       struct vm_fault vmf;
+       int ret;
+
+       vmf.virtual_address = (void __user *)(address & PAGE_MASK);
+       vmf.pgoff = pgoff;
+       vmf.flags = flags;
+       vmf.page = NULL;
+
+       ret = vma->vm_ops->fault(vma, &vmf);
+       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
+               return ret;
+
+       if (unlikely(PageHWPoison(vmf.page))) {
+               if (ret & VM_FAULT_LOCKED)
+                       unlock_page(vmf.page);
+               page_cache_release(vmf.page);
+               return VM_FAULT_HWPOISON;
+       }
+
+       if (unlikely(!(ret & VM_FAULT_LOCKED)))
+               lock_page(vmf.page);
+       else
+               VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
+
+       *page = vmf.page;
+       return ret;
+}
+
 /*
  * do_fault() tries to create a new page mapping. It aggressively
  * tries to share with existing pages, but makes a separate copy if
@@ -3305,12 +3336,11 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        pte_t *page_table;
        spinlock_t *ptl;
-       struct page *page;
+       struct page *page, *fault_page;
        struct page *cow_page;
        pte_t entry;
        int anon = 0;
        struct page *dirty_page = NULL;
-       struct vm_fault vmf;
        int ret;
        int page_mkwrite = 0;
 
@@ -3334,42 +3364,19 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        } else
                cow_page = NULL;
 
-       vmf.virtual_address = (void __user *)(address & PAGE_MASK);
-       vmf.pgoff = pgoff;
-       vmf.flags = flags;
-       vmf.page = NULL;
-
-       ret = vma->vm_ops->fault(vma, &vmf);
-       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
-                           VM_FAULT_RETRY)))
+       ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
                goto uncharge_out;
 
-       if (unlikely(PageHWPoison(vmf.page))) {
-               if (ret & VM_FAULT_LOCKED)
-                       unlock_page(vmf.page);
-               ret = VM_FAULT_HWPOISON;
-               page_cache_release(vmf.page);
-               goto uncharge_out;
-       }
-
-       /*
-        * For consistency in subsequent calls, make the faulted page always
-        * locked.
-        */
-       if (unlikely(!(ret & VM_FAULT_LOCKED)))
-               lock_page(vmf.page);
-       else
-               VM_BUG_ON_PAGE(!PageLocked(vmf.page), vmf.page);
-
        /*
         * Should we do an early C-O-W break?
         */
-       page = vmf.page;
+       page = fault_page;
        if (flags & FAULT_FLAG_WRITE) {
                if (!(vma->vm_flags & VM_SHARED)) {
                        page = cow_page;
                        anon = 1;
-                       copy_user_highpage(page, vmf.page, address, vma);
+                       copy_user_highpage(page, fault_page, address, vma);
                        __SetPageUptodate(page);
                } else {
                        /*
@@ -3378,8 +3385,15 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                         * to become writable
                         */
                        if (vma->vm_ops->page_mkwrite) {
+                               struct vm_fault vmf;
                                int tmp;
 
+                               vmf.virtual_address =
+                                       (void __user *)(address & PAGE_MASK);
+                               vmf.pgoff = pgoff;
+                               vmf.flags = flags;
+                               vmf.page = fault_page;
+
                                unlock_page(page);
                                vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
                                tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
@@ -3469,9 +3483,9 @@ static int do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                if (vma->vm_file && !page_mkwrite)
                        file_update_time(vma->vm_file);
        } else {
-               unlock_page(vmf.page);
+               unlock_page(fault_page);
                if (anon)
-                       page_cache_release(vmf.page);
+                       page_cache_release(fault_page);
        }
 
        return ret;