Merge branch 'for-next' of git://git.pengutronix.de/git/ukl/linux into devel-stable
[linux-drm-fsl-dcu.git] / arch / arm / mm / mmu.c
index 9f0610243bd6cd3357c847a5e5d392353579f471..c6d45c87540ef3fdb7df6c9ba75a94ce6a42b1d7 100644 (file)
@@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
 static unsigned int ecc_mask __initdata = 0;
 pgprot_t pgprot_user;
 pgprot_t pgprot_kernel;
+pgprot_t pgprot_hyp_device;
+pgprot_t pgprot_s2;
+pgprot_t pgprot_s2_device;
 
 EXPORT_SYMBOL(pgprot_user);
 EXPORT_SYMBOL(pgprot_kernel);
@@ -66,37 +69,50 @@ struct cachepolicy {
        unsigned int    cr_mask;
        pmdval_t        pmd;
        pteval_t        pte;
+       pteval_t        pte_s2;
 };
 
+#ifdef CONFIG_ARM_LPAE
+#define s2_policy(policy)      policy
+#else
+#define s2_policy(policy)      0
+#endif
+
 static struct cachepolicy cache_policies[] __initdata = {
        {
                .policy         = "uncached",
                .cr_mask        = CR_W|CR_C,
                .pmd            = PMD_SECT_UNCACHED,
                .pte            = L_PTE_MT_UNCACHED,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_UNCACHED),
        }, {
                .policy         = "buffered",
                .cr_mask        = CR_C,
                .pmd            = PMD_SECT_BUFFERED,
                .pte            = L_PTE_MT_BUFFERABLE,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_UNCACHED),
        }, {
                .policy         = "writethrough",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WT,
                .pte            = L_PTE_MT_WRITETHROUGH,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
        }, {
                .policy         = "writeback",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WB,
                .pte            = L_PTE_MT_WRITEBACK,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_WRITEBACK),
        }, {
                .policy         = "writealloc",
                .cr_mask        = 0,
                .pmd            = PMD_SECT_WBWA,
                .pte            = L_PTE_MT_WRITEALLOC,
+               .pte_s2         = s2_policy(L_PTE_S2_MT_WRITEBACK),
        }
 };
 
+#ifdef CONFIG_CPU_CP15
 /*
  * These are useful for identifying cache coherency
  * problems by allowing the cache or the cache and
@@ -195,6 +211,22 @@ void adjust_cr(unsigned long mask, unsigned long set)
 }
 #endif
 
+#else /* ifdef CONFIG_CPU_CP15 */
+
+static int __init early_cachepolicy(char *p)
+{
+       pr_warning("cachepolicy kernel parameter not supported without cp15\n");
+}
+early_param("cachepolicy", early_cachepolicy);
+
+static int __init noalign_setup(char *__unused)
+{
+       pr_warning("noalign kernel parameter not supported without cp15\n");
+}
+__setup("noalign", noalign_setup);
+
+#endif /* ifdef CONFIG_CPU_CP15 / else */
+
 #define PROT_PTE_DEVICE                L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
 #define PROT_SECT_DEVICE       PMD_TYPE_SECT|PMD_SECT_AP_WRITE
 
@@ -283,7 +315,7 @@ static struct mem_type mem_types[] = {
        },
        [MT_MEMORY_SO] = {
                .prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
-                               L_PTE_MT_UNCACHED,
+                               L_PTE_MT_UNCACHED | L_PTE_XN,
                .prot_l1   = PMD_TYPE_TABLE,
                .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
                                PMD_SECT_UNCACHED | PMD_SECT_XN,
@@ -310,6 +342,7 @@ static void __init build_mem_type_table(void)
        struct cachepolicy *cp;
        unsigned int cr = get_cr();
        pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
+       pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
        int cpu_arch = cpu_architecture();
        int i;
 
@@ -421,6 +454,8 @@ static void __init build_mem_type_table(void)
         */
        cp = &cache_policies[cachepolicy];
        vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
+       s2_pgprot = cp->pte_s2;
+       hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
 
        /*
         * ARMv6 and above have extended page tables.
@@ -444,6 +479,7 @@ static void __init build_mem_type_table(void)
                        user_pgprot |= L_PTE_SHARED;
                        kern_pgprot |= L_PTE_SHARED;
                        vecs_pgprot |= L_PTE_SHARED;
+                       s2_pgprot |= L_PTE_SHARED;
                        mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
                        mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
                        mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
@@ -498,6 +534,9 @@ static void __init build_mem_type_table(void)
        pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
        pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
                                 L_PTE_DIRTY | kern_pgprot);
+       pgprot_s2  = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
+       pgprot_s2_device  = __pgprot(s2_device_pgprot);
+       pgprot_hyp_device  = __pgprot(hyp_device_pgprot);
 
        mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
        mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
@@ -757,21 +796,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
 {
        struct map_desc *md;
        struct vm_struct *vm;
+       struct static_vm *svm;
 
        if (!nr)
                return;
 
-       vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
+       svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
 
        for (md = io_desc; nr; md++, nr--) {
                create_mapping(md);
+
+               vm = &svm->vm;
                vm->addr = (void *)(md->virtual & PAGE_MASK);
                vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
                vm->phys_addr = __pfn_to_phys(md->pfn);
                vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
                vm->flags |= VM_ARM_MTYPE(md->type);
                vm->caller = iotable_init;
-               vm_area_add_early(vm++);
+               add_static_vm_early(svm++);
        }
 }
 
@@ -779,13 +821,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
                                  void *caller)
 {
        struct vm_struct *vm;
+       struct static_vm *svm;
+
+       svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
 
-       vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+       vm = &svm->vm;
        vm->addr = (void *)addr;
        vm->size = size;
        vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
        vm->caller = caller;
-       vm_area_add_early(vm);
+       add_static_vm_early(svm);
 }
 
 #ifndef CONFIG_ARM_LPAE
@@ -810,14 +855,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
 
 static void __init fill_pmd_gaps(void)
 {
+       struct static_vm *svm;
        struct vm_struct *vm;
        unsigned long addr, next = 0;
        pmd_t *pmd;
 
-       /* we're still single threaded hence no lock needed here */
-       for (vm = vmlist; vm; vm = vm->next) {
-               if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
-                       continue;
+       list_for_each_entry(svm, &static_vmlist, list) {
+               vm = &svm->vm;
                addr = (unsigned long)vm->addr;
                if (addr < next)
                        continue;
@@ -857,19 +901,12 @@ static void __init fill_pmd_gaps(void)
 #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
 static void __init pci_reserve_io(void)
 {
-       struct vm_struct *vm;
-       unsigned long addr;
+       struct static_vm *svm;
 
-       /* we're still single threaded hence no lock needed here */
-       for (vm = vmlist; vm; vm = vm->next) {
-               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
-                       continue;
-               addr = (unsigned long)vm->addr;
-               addr &= ~(SZ_2M - 1);
-               if (addr == PCI_IO_VIRT_BASE)
-                       return;
+       svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
+       if (svm)
+               return;
 
-       }
        vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
 }
 #else