Merge remote-tracking branch 'origin/x86/boot' into x86/mm2
authorH. Peter Anvin <hpa@linux.intel.com>
Tue, 29 Jan 2013 22:59:09 +0000 (14:59 -0800)
committerH. Peter Anvin <hpa@linux.intel.com>
Tue, 29 Jan 2013 23:10:15 +0000 (15:10 -0800)
Coming patches to x86/mm2 require the changes and advanced baseline in
x86/boot.

Resolved Conflicts:
arch/x86/kernel/setup.c
mm/nobootmem.c

Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
12 files changed:
1  2 
arch/sparc/mm/init_64.c
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/setup.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/xen/mmu.c
include/linux/mm.h
mm/nobootmem.c

Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
index 6d29d1fcf068cf82250668e0990e207413bdd72c,00f6c1472b850472e5f9759dd5ad9613f6c026be..268193746cd86efb66b1845021d1688388502102
@@@ -919,8 -986,36 +993,10 @@@ void __init setup_arch(char **cmdline_p
  
        setup_real_mode();
  
 -      init_gbpages();
+       trim_platform_memory_ranges();
 +      init_mem_mapping();
  
 -      /* max_pfn_mapped is updated here */
 -      max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
 -      max_pfn_mapped = max_low_pfn_mapped;
 -
 -#ifdef CONFIG_X86_64
 -      if (max_pfn > max_low_pfn) {
 -              int i;
 -              unsigned long start, end;
 -              unsigned long start_pfn, end_pfn;
 -
 -              for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn,
 -                                                       NULL) {
 -
 -                      end = PFN_PHYS(end_pfn);
 -                      if (end <= (1UL<<32))
 -                              continue;
 -
 -                      start = PFN_PHYS(start_pfn);
 -                      max_pfn_mapped = init_memory_mapping(
 -                                              max((1UL<<32), start), end);
 -              }
 -
 -              /* can we preseve max_low_pfn ?*/
 -              max_low_pfn = max_pfn;
 -      }
 -#endif
        memblock.current_limit = get_max_mapped();
        dma_contiguous_reserve(0);
  
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc mm/nobootmem.c
index ecc2f13d557d3b28f5bb82b3d5dca2e621cfcf9c,b8294fc03df869153378f47f41f0ecd595c10887..03d152a76acf5e18396b74e62a3372e55b469750
@@@ -137,6 -137,37 +137,22 @@@ unsigned long __init free_low_memory_co
        return count;
  }
  
 -/**
 - * free_all_bootmem_node - release a node's free pages to the buddy allocator
 - * @pgdat: node to be released
 - *
 - * Returns the number of pages actually released.
 - */
 -unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 -{
 -      register_page_bootmem_info_node(pgdat);
 -      reset_node_lowmem_managed_pages(pgdat);
 -
 -      /* free_low_memory_core_early(MAX_NUMNODES) will be called later */
 -      return 0;
 -}
 -
+ static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
+ {
+       struct zone *z;
+       /*
+        * In free_area_init_core(), highmem zone's managed_pages is set to
+        * present_pages, and bootmem allocator doesn't allocate from highmem
+        * zones. So there's no need to recalculate managed_pages because all
+        * highmem pages will be managed by the buddy system. Here highmem
+        * zone also includes highmem movable zone.
+        */
+       for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
+               if (!is_highmem(z))
+                       z->managed_pages = 0;
+ }
  /**
   * free_all_bootmem - release free pages to the buddy allocator
   *