Merge tag 'for-3.13' of git://git.openrisc.net/~jonas/linux
[linux-drm-fsl-dcu.git] / mm / memory.c
index 1f2287eaa88e94da2062f8aeef04e46a737f70eb..0409e8f43fa0f3719fadf3f19afda45164bbaa94 100644 (file)
@@ -382,7 +382,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
        pgtable_t token = pmd_pgtable(*pmd);
        pmd_clear(pmd);
        pte_free_tlb(tlb, token, addr);
-       tlb->mm->nr_ptes--;
+       atomic_long_dec(&tlb->mm->nr_ptes);
 }
 
 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
@@ -453,8 +453,6 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 
 /*
  * This function frees user-level page tables of a process.
- *
- * Must be called with pagetable lock held.
  */
 void free_pgd_range(struct mmu_gather *tlb,
                        unsigned long addr, unsigned long end,
@@ -552,6 +550,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
                pmd_t *pmd, unsigned long address)
 {
+       spinlock_t *ptl;
        pgtable_t new = pte_alloc_one(mm, address);
        int wait_split_huge_page;
        if (!new)
@@ -572,15 +571,15 @@ int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
         */
        smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
 
-       spin_lock(&mm->page_table_lock);
+       ptl = pmd_lock(mm, pmd);
        wait_split_huge_page = 0;
        if (likely(pmd_none(*pmd))) {   /* Has another populated it ? */
-               mm->nr_ptes++;
+               atomic_long_inc(&mm->nr_ptes);
                pmd_populate(mm, pmd, new);
                new = NULL;
        } else if (unlikely(pmd_trans_splitting(*pmd)))
                wait_split_huge_page = 1;
-       spin_unlock(&mm->page_table_lock);
+       spin_unlock(ptl);
        if (new)
                pte_free(mm, new);
        if (wait_split_huge_page)
@@ -681,7 +680,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
        if (vma->vm_ops)
                printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n",
                       vma->vm_ops->fault);
-       if (vma->vm_file && vma->vm_file->f_op)
+       if (vma->vm_file)
                printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n",
                       vma->vm_file->f_op->mmap);
        dump_stack();
@@ -1518,20 +1517,20 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
                        split_huge_page_pmd(vma, address, pmd);
                        goto split_fallthrough;
                }
-               spin_lock(&mm->page_table_lock);
+               ptl = pmd_lock(mm, pmd);
                if (likely(pmd_trans_huge(*pmd))) {
                        if (unlikely(pmd_trans_splitting(*pmd))) {
-                               spin_unlock(&mm->page_table_lock);
+                               spin_unlock(ptl);
                                wait_split_huge_page(vma->anon_vma, pmd);
                        } else {
                                page = follow_trans_huge_pmd(vma, address,
                                                             pmd, flags);
-                               spin_unlock(&mm->page_table_lock);
+                               spin_unlock(ptl);
                                *page_mask = HPAGE_PMD_NR - 1;
                                goto out;
                        }
                } else
-                       spin_unlock(&mm->page_table_lock);
+                       spin_unlock(ptl);
                /* fall through */
        }
 split_fallthrough:
@@ -4271,3 +4270,28 @@ void copy_user_huge_page(struct page *dst, struct page *src,
        }
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+
+#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
+static struct kmem_cache *page_ptl_cachep;
+void __init ptlock_cache_init(void)
+{
+       page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
+                       SLAB_PANIC, NULL);
+}
+
+bool ptlock_alloc(struct page *page)
+{
+       spinlock_t *ptl;
+
+       ptl = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
+       if (!ptl)
+               return false;
+       page->ptl = ptl;
+       return true;
+}
+
+void ptlock_free(struct page *page)
+{
+       kfree(page->ptl);
+}
+#endif