x86-64: finish cleanup_highmaps()'s job wrt. _brk_end
authorJan Beulich <jbeulich@novell.com>
Wed, 6 May 2009 12:06:47 +0000 (13:06 +0100)
committerH. Peter Anvin <hpa@zytor.com>
Fri, 8 May 2009 04:51:34 +0000 (21:51 -0700)
With the introduction of the .brk section, special care must be taken
that no unused page table entries remain if _brk_end and _end are
separated by a 2M page boundary. cleanup_highmap() runs very early and
hence cannot take care of that, hence potential entries needing to be
removed past _brk_end must be cleared once the brk allocator has done
its job.

[ Impact: avoids undesirable TLB aliases ]

Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
arch/x86/mm/init.c

index fd3da1dda1c9e5033af44cbdaf432239784558f2..ae4f7b5d71040566f7b30af16e6c00f20c82c098 100644 (file)
@@ -7,6 +7,7 @@
 #include <asm/page.h>
 #include <asm/page_types.h>
 #include <asm/sections.h>
+#include <asm/setup.h>
 #include <asm/system.h>
 #include <asm/tlbflush.h>
 
@@ -304,8 +305,23 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
 #endif
 
 #ifdef CONFIG_X86_64
-       if (!after_bootmem)
+       if (!after_bootmem && !start) {
+               pud_t *pud;
+               pmd_t *pmd;
+
                mmu_cr4_features = read_cr4();
+
+               /*
+                * _brk_end cannot change anymore, but it and _end may be
+                * located on different 2M pages. cleanup_highmap(), however,
+                * can only consider _end when it runs, so destroy any
+                * mappings beyond _brk_end here.
+                */
+               pud = pud_offset(pgd_offset_k(_brk_end), _brk_end);
+               pmd = pmd_offset(pud, _brk_end - 1);
+               while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1))
+                       pmd_clear(pmd);
+       }
 #endif
        __flush_tlb_all();