Merge branches 'x86/acpi', 'x86/apic', 'x86/cpudetect', 'x86/headers', 'x86/paravirt...
authorIngo Molnar <mingo@elte.hu>
Tue, 17 Feb 2009 11:07:00 +0000 (12:07 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 17 Feb 2009 11:07:00 +0000 (12:07 +0100)
1  2  3  4  5  6  7 
Makefile
arch/x86/kernel/apic.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/traps.c

diff --cc Makefile
index 681c1d23b4d4f813703667516f9966d9d701d915,22d758495ad29424f88361e9b1c782e1529bad94,c06e250eca18cf5b3176a8af6a2295c5e5738a56,77a006dae2daa379972436249de657cc2be18930,77a006dae2daa379972436249de657cc2be18930,7715b2c14fb48d59fbd05c4bee18d66df2188bc3,77a006dae2daa379972436249de657cc2be18930..b280cfcf1efe630c0ec183c7a3e280e555c00222
+++ b/Makefile
@@@@@@@@ -1,7 -1,7 -1,7 -1,7 -1,7 -1,7 -1,7 +1,7 @@@@@@@@
       VERSION = 2
       PATCHLEVEL = 6
       SUBLEVEL = 29
-  -- -EXTRAVERSION = -rc4
  -    EXTRAVERSION = -rc1
     - EXTRAVERSION = -rc3
+ +++++EXTRAVERSION = -rc5
       NAME = Erotic Pickled Herring
       
       # *DOCUMENTATION*
Simple merge
index 83492b1f93b11c5e0b851300ffdb3e314eaacc9e,cbcdb796d47f8b981e7cc42af74d4ed7725f0982,32093d08d872afee12ba5b038d9de68155c2a43f,e8f4a386bd9d7ef5c9c6a378baa10dec4ceb381e,e8f4a386bd9d7ef5c9c6a378baa10dec4ceb381e,83492b1f93b11c5e0b851300ffdb3e314eaacc9e,e8f4a386bd9d7ef5c9c6a378baa10dec4ceb381e..4db150ed446d1c64523ae25160c9267db5b1cdda
@@@@@@@@ -110,9 -121,9 -110,9 -122,10 -122,10 -110,9 -122,10 +122,10 @@@@@@@@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_
        [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
       
        [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
- -  -  [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
- -  - } };
+ +  +  [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
+++  +  GDT_STACK_CANARY_INIT
       #endif
+ +  + } };
       EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
       
       #ifdef CONFIG_X86_32
@@@@@@@@ -212,6 -223,49 -212,49 -225,49 -225,49 -212,6 -225,49 +225,49 @@@@@@@@ static inline void squash_the_stupid_se
       }
       #endif
       
 - -- -             ((s32)df->feature < 0 ?
 - -- -              (u32)df->feature > (u32)c->extended_cpuid_level :
 - -- -              (s32)df->feature > (s32)c->cpuid_level)) {
+    + /*
+    +  * Some CPU features depend on higher CPUID levels, which may not always
+    +  * be available due to CPUID level capping or broken virtualization
+    +  * software.  Add those features to this table to auto-disable them.
+    +  */
+    + struct cpuid_dependent_feature {
+    +  u32 feature;
+    +  u32 level;
+    + };
+    + static const struct cpuid_dependent_feature __cpuinitconst
+    + cpuid_dependent_features[] = {
+    +  { X86_FEATURE_MWAIT,            0x00000005 },
+    +  { X86_FEATURE_DCA,              0x00000009 },
+    +  { X86_FEATURE_XSAVE,            0x0000000d },
+    +  { 0, 0 }
+    + };
+    + 
+    + static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
+    + {
+    +  const struct cpuid_dependent_feature *df;
+    +  for (df = cpuid_dependent_features; df->feature; df++) {
+    +          /*
+    +           * Note: cpuid_level is set to -1 if unavailable, but
+    +           * extended_extended_level is set to 0 if unavailable
+    +           * and the legitimate extended levels are all negative
+    +           * when signed; hence the weird messing around with
+    +           * signs here...
+    +           */
+    +          if (cpu_has(c, df->feature) &&
 - -- -}        
++ ++++             ((s32)df->level < 0 ?
++ ++++              (u32)df->level > (u32)c->extended_cpuid_level :
++ ++++              (s32)df->level > (s32)c->cpuid_level)) {
+    +                  clear_cpu_cap(c, df->feature);
+    +                  if (warn)
+    +                          printk(KERN_WARNING
+    +                                 "CPU: CPU feature %s disabled "
+    +                                 "due to lack of CPUID level 0x%x\n",
+    +                                 x86_cap_flags[df->feature],
+    +                                 df->level);
+    +          }
+    +  }
++ ++++}
+    + 
       /*
        * Naming convention should be: <Name> [(<Codename>)]
        * This table only is used unless init_<vendor>() below doesn't set it;
@@@@@@@@ -242,9 -296,19 -285,9 -298,20 -298,20 -242,9 -298,20 +298,20 @@@@@@@@ static char __cpuinit *table_lookup_mod
       
       __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
       
+ +  + void load_percpu_segment(int cpu)
+ +  + {
+ +  + #ifdef CONFIG_X86_32
+ +  +  loadsegment(fs, __KERNEL_PERCPU);
+ +  + #else
+ +  +  loadsegment(gs, 0);
+ +  +  wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
+ +  + #endif
+++  +  load_stack_canary_segment();
+ +  + }
+ +  + 
       /* Current gdt points %fs at the "master" per-cpu area: after this,
        * it's on the real one. */
- -  - void switch_to_new_gdt(void)
+ +  + void switch_to_new_gdt(int cpu)
       {
        struct desc_ptr gdt_descr;
       
@@@@@@@@ -395,13 -455,8 -438,13 -458,8 -458,8 -395,13 -458,8 +458,8 @@@@@@@@ void __cpuinit detect_ht(struct cpuinfo
       
                core_bits = get_count_order(c->x86_max_cores);
       
- -  - #ifdef CONFIG_X86_64
- -  -          c->cpu_core_id = phys_pkg_id(index_msb) &
  -                                            ((1 << core_bits) - 1);
  -    #else
  -             c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
+ +  +          c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
                                               ((1 << core_bits) - 1);
-    - #else
-    -          c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
-    -                                         ((1 << core_bits) - 1);
- -  - #endif
        }
       
       out:
@@@@@@@@ -877,54 -934,26 -922,54 -937,22 -937,22 -877,54 -937,22 +937,22 @@@@@@@@ static __init int setup_disablecpuid(ch
       __setup("clearcpuid=", setup_disablecpuid);
       
       #ifdef CONFIG_X86_64
- -  - struct x8664_pda **_cpu_pda __read_mostly;
- -  - EXPORT_SYMBOL(_cpu_pda);
- -  - 
       struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
       
- -  - static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
-    - 
-    - void __cpuinit pda_init(int cpu)
-    - {
-    -  struct x8664_pda *pda = cpu_pda(cpu);
+ +  + DEFINE_PER_CPU_FIRST(union irq_stack_union,
+ +  +               irq_stack_union) __aligned(PAGE_SIZE);
 -     #ifdef CONFIG_SMP
 -     DEFINE_PER_CPU(char *, irq_stack_ptr);   /* will be set during per cpu init */
 -     #else
+ +  + DEFINE_PER_CPU(char *, irq_stack_ptr) =
 -      per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
 -     #endif
+++  +  init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
       
  -    void __cpuinit pda_init(int cpu)
  -    {
  -     struct x8664_pda *pda = cpu_pda(cpu);
  -    
- -  -  /* Setup up data that may be needed in __get_free_pages early */
- -  -  loadsegment(fs, 0);
- -  -  loadsegment(gs, 0);
- -  -  /* Memory clobbers used to order PDA accessed */
- -  -  mb();
- -  -  wrmsrl(MSR_GS_BASE, pda);
- -  -  mb();
- -  - 
- -  -  pda->cpunumber = cpu;
- -  -  pda->irqcount = -1;
- -  -  pda->kernelstack = (unsigned long)stack_thread_info() -
- -  -                           PDA_STACKOFFSET + THREAD_SIZE;
- -  -  pda->active_mm = &init_mm;
- -  -  pda->mmu_state = 0;
- -  - 
- -  -  if (cpu == 0) {
- -  -          /* others are initialized in smpboot.c */
- -  -          pda->pcurrent = &init_task;
- -  -          pda->irqstackptr = boot_cpu_stack;
- -  -          pda->irqstackptr += IRQSTACKSIZE - 64;
- -  -  } else {
- -  -          if (!pda->irqstackptr) {
- -  -                  pda->irqstackptr = (char *)
- -  -                          __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
- -  -                  if (!pda->irqstackptr)
- -  -                          panic("cannot allocate irqstack for cpu %d",
- -  -                                cpu);
- -  -                  pda->irqstackptr += IRQSTACKSIZE - 64;
- -  -          }
+ +  + DEFINE_PER_CPU(unsigned long, kernel_stack) =
+ +  +  (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+ +  + EXPORT_PER_CPU_SYMBOL(kernel_stack);
       
- -  -          if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
- -  -                  pda->nodenumber = cpu_to_node(cpu);
- -  -  }
- -  - }
+ +  + DEFINE_PER_CPU(unsigned int, irq_count) = -1;
       
- -  - static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
- -  -                            DEBUG_STKSZ] __page_aligned_bss;
+ +  + static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+ +  +  [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
+ +  +  __aligned(PAGE_SIZE);
       
       extern asmlinkage void ignore_sysret(void);
       
@@@@@@@@ -957,9 -986,9 -1002,9 -985,13 -985,13 -957,9 -985,13 +985,13 @@@@@@@@ unsigned long kernel_eflags
        */
       DEFINE_PER_CPU(struct orig_ist, orig_ist);
       
---  - #else
+++  + #else    /* x86_64 */
+    + 
 --    /* Make sure %fs is initialized properly in idle threads */
+++  + #ifdef CONFIG_CC_STACKPROTECTOR
+++  + DEFINE_PER_CPU(unsigned long, stack_canary);
+++  + #endif
 ++    
-    - /* Make sure %fs is initialized properly in idle threads */
+++  + /* Make sure %fs and %gs are initialized properly in idle threads */
       struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
       {
        memset(regs, 0, sizeof(struct pt_regs));
index 98c2d055284b32fb2ce6ff6776b9cb049c103ae0,0d032d2d8a184e8b8d6921306f2b2041e2f4b23c,98c2d055284b32fb2ce6ff6776b9cb049c103ae0,bde57f0f161638354dc5a9b346969b2901ccc49a,bde57f0f161638354dc5a9b346969b2901ccc49a,a9e7548e17906f885f28e4763128e749c7a76481,bde57f0f161638354dc5a9b346969b2901ccc49a..acb8c0585ab9f04a2d79457822d083e7de78f8b3
@@@@@@@@ -906,16 -905,19 -906,16 -905,20 -905,20 -914,19 -905,20 +913,20 @@@@@@@@ void math_emulate(struct math_emu_info 
       }
       #endif /* CONFIG_MATH_EMULATION */
       
 -   - dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs)
 +   + dotraplinkage void __kprobes
- -    do_device_not_available(struct pt_regs *regs, long error)
+++  + do_device_not_available(struct pt_regs *regs, long error_code)
       {
       #ifdef CONFIG_X86_32
        if (read_cr0() & X86_CR0_EM) {
 -   -          conditional_sti(&regs);
+ +             struct math_emu_info info = { };
+ +    
- -             math_emulate(0);
 +   +          conditional_sti(regs);
 -   -          info.regs = &regs;
+ +    
+++  +          info.regs = regs;
+ +             math_emulate(&info);
        } else {
                math_state_restore(); /* interrupts still off */
 -   -          conditional_sti(&regs);
 +   +          conditional_sti(regs);
        }
       #else
        math_state_restore();