X-Git-Url: http://git.agner.ch/gitweb/?p=linux-drm-fsl-dcu.git;a=blobdiff_plain;f=arch%2Fx86_64%2Fmm%2Finit.c;h=ec31534eb104875649847948c32f816c6951d5a8;hp=3e16fe08150ec6f3bb7a94e7bc6ec12a2e5193a7;hb=f8abea8f8c24ecdad6d6861bffb912f23f2741cd;hpb=ebdea46fecae40c4d7effcd33f40918a37a1df4b diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 3e16fe08150e..ec31534eb104 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -406,9 +406,12 @@ void __cpuinit zap_low_mappings(int cpu) #ifndef CONFIG_NUMA void __init paging_init(void) { - unsigned long max_zone_pfns[MAX_NR_ZONES] = {MAX_DMA_PFN, - MAX_DMA32_PFN, - end_pfn}; + unsigned long max_zone_pfns[MAX_NR_ZONES]; + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; + max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN; + max_zone_pfns[ZONE_NORMAL] = end_pfn; + memory_present(0, 0, end_pfn); sparse_init(); free_area_init_nodes(max_zone_pfns); @@ -462,19 +465,6 @@ void online_page(struct page *page) } #ifdef CONFIG_MEMORY_HOTPLUG -/* - * XXX: memory_add_physaddr_to_nid() is to find node id from physical address - * via probe interface of sysfs. If acpi notifies hot-add event, then it - * can tell node id by searching dsdt. But, probe interface doesn't have - * node id. So, return 0 as node id at this time. - */ -#ifdef CONFIG_NUMA -int memory_add_physaddr_to_nid(u64 start) -{ - return 0; -} -#endif - /* * Memory is added always to NORMAL zone. This means you will never get * additional DMA/DMA32 memory. @@ -487,12 +477,12 @@ int arch_add_memory(int nid, u64 start, u64 size) unsigned long nr_pages = size >> PAGE_SHIFT; int ret; + init_memory_mapping(start, (start + size -1)); + ret = __add_pages(zone, start_pfn, nr_pages); if (ret) goto error; - init_memory_mapping(start, (start + size -1)); - return ret; error: printk("%s: Problem encountered in __add_pages!\n", __func__); @@ -506,7 +496,17 @@ int remove_memory(u64 start, u64 size) } EXPORT_SYMBOL_GPL(remove_memory); -#else /* CONFIG_MEMORY_HOTPLUG */ +#if !defined(CONFIG_ACPI_NUMA) && defined(CONFIG_NUMA) +int memory_add_physaddr_to_nid(u64 start) +{ + return 0; +} +EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); +#endif + +#endif /* CONFIG_MEMORY_HOTPLUG */ + +#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE /* * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, * just online the pages. @@ -532,7 +532,7 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) } return err; } -#endif /* CONFIG_MEMORY_HOTPLUG */ +#endif static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, kcore_vsyscall; @@ -648,9 +648,22 @@ void free_initrd_mem(unsigned long start, unsigned long end) void __init reserve_bootmem_generic(unsigned long phys, unsigned len) { - /* Should check here against the e820 map to avoid double free */ #ifdef CONFIG_NUMA int nid = phys_to_nid(phys); +#endif + unsigned long pfn = phys >> PAGE_SHIFT; + if (pfn >= end_pfn) { + /* This can happen with kdump kernels when accessing firmware + tables. */ + if (pfn < end_pfn_map) + return; + printk(KERN_ERR "reserve_bootmem: illegal reserve %lx %u\n", + phys, len); + return; + } + + /* Should check here against the e820 map to avoid double free */ +#ifdef CONFIG_NUMA reserve_bootmem_node(NODE_DATA(nid), phys, len); #else reserve_bootmem(phys, len); @@ -698,33 +711,44 @@ int kern_addr_valid(unsigned long addr) extern int exception_trace, page_fault_trace; static ctl_table debug_table2[] = { - { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL, - proc_dointvec }, - { 0, } + { + .ctl_name = 99, + .procname = "exception-trace", + .data = &exception_trace, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + {} }; static ctl_table debug_root_table2[] = { - { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555, - .child = debug_table2 }, - { 0 }, + { + .ctl_name = CTL_DEBUG, + .procname = "debug", + .mode = 0555, + .child = debug_table2 + }, + {} }; static __init int x8664_sysctl_init(void) { - register_sysctl_table(debug_root_table2, 1); + register_sysctl_table(debug_root_table2); return 0; } __initcall(x8664_sysctl_init); #endif -/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only +/* A pseudo VMA to allow ptrace access for the vsyscall page. This only covers the 64bit vsyscall page now. 32bit has a real VMA now and does not need special handling anymore. */ static struct vm_area_struct gate_vma = { .vm_start = VSYSCALL_START, - .vm_end = VSYSCALL_END, - .vm_page_prot = PAGE_READONLY + .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES << PAGE_SHIFT), + .vm_page_prot = PAGE_READONLY_EXEC, + .vm_flags = VM_READ | VM_EXEC }; struct vm_area_struct *get_gate_vma(struct task_struct *tsk)