mm: introduce do_read_fault()
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Thu, 3 Apr 2014 21:48:11 +0000 (14:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 3 Apr 2014 23:21:03 +0000 (16:21 -0700)
Introduce do_read_fault().  The function does what do_fault() does for
read page faults.

Unlike do_fault(), do_read_fault() is pretty clean and straightforward.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memory.c

index af76397c2c5499bf0fac5cf92d3eca0cb24bdafc..56784e9a7151fc5b49be65fdcc9df7038fc58cb3 100644 (file)
@@ -3317,6 +3317,43 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
        return ret;
 }
 
+static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+               unsigned long address, pmd_t *pmd,
+               pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
+{
+       struct page *fault_page;
+       spinlock_t *ptl;
+       pte_t entry, *pte;
+       int ret;
+
+       ret = __do_fault(vma, address, pgoff, flags, &fault_page);
+       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
+               return ret;
+
+       pte = pte_offset_map_lock(mm, pmd, address, &ptl);
+       if (unlikely(!pte_same(*pte, orig_pte))) {
+               pte_unmap_unlock(pte, ptl);
+               unlock_page(fault_page);
+               page_cache_release(fault_page);
+               return ret;
+       }
+
+       flush_icache_page(vma, fault_page);
+       entry = mk_pte(fault_page, vma->vm_page_prot);
+       if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
+               pte_mksoft_dirty(entry);
+       inc_mm_counter_fast(mm, MM_FILEPAGES);
+       page_add_file_rmap(fault_page);
+       set_pte_at(mm, address, pte, entry);
+
+       /* no need to invalidate: a not-present page won't be cached */
+       update_mmu_cache(vma, address, pte);
+       pte_unmap_unlock(pte, ptl);
+       unlock_page(fault_page);
+
+       return ret;
+}
+
 /*
  * do_fault() tries to create a new page mapping. It aggressively
  * tries to share with existing pages, but makes a separate copy if
@@ -3510,6 +3547,9 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
 
        pte_unmap(page_table);
+       if (!(flags & FAULT_FLAG_WRITE))
+               return do_read_fault(mm, vma, address, pmd, pgoff, flags,
+                               orig_pte);
        return do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
 
@@ -3542,6 +3582,9 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        pgoff = pte_to_pgoff(orig_pte);
+       if (!(flags & FAULT_FLAG_WRITE))
+               return do_read_fault(mm, vma, address, pmd, pgoff, flags,
+                               orig_pte);
        return do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }