Merge linux-2.6 with linux-acpi-2.6
authorLen Brown <len.brown@intel.com>
Thu, 8 Sep 2005 05:45:47 +0000 (01:45 -0400)
committerLen Brown <len.brown@intel.com>
Thu, 8 Sep 2005 05:45:47 +0000 (01:45 -0400)
19 files changed:
1  2 
arch/i386/Kconfig
arch/i386/kernel/Makefile
arch/i386/kernel/acpi/boot.c
arch/i386/kernel/io_apic.c
arch/i386/kernel/mpparse.c
arch/i386/kernel/setup.c
arch/i386/mach-es7000/es7000plat.c
arch/ia64/Kconfig
arch/x86_64/Kconfig
arch/x86_64/kernel/Makefile
arch/x86_64/kernel/genapic.c
arch/x86_64/kernel/io_apic.c
arch/x86_64/kernel/setup.c
drivers/char/hpet.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/pnp/pnpacpi/rsparser.c
drivers/serial/Kconfig
include/asm-i386/mpspec.h
kernel/power/Kconfig

diff --combined arch/i386/Kconfig
index 9ba33490874238e0a308101e4e44487dc956047b,5d51b38bd70d2d0bdc86ed14ee613e3f4786c993..b22f003eaa6d782c02523685501a22753c28a50e
@@@ -14,6 -14,10 +14,10 @@@ config X8
          486, 586, Pentiums, and various instruction-set-compatible chips by
          AMD, Cyrix, and others.
  
+ config SEMAPHORE_SLEEPERS
+       bool
+       default y
  config MMU
        bool
        default y
@@@ -33,6 -37,10 +37,10 @@@ config GENERIC_IOMA
        bool
        default y
  
+ config ARCH_MAY_HAVE_PC_FDC
+       bool
+       default y
  source "init/Kconfig"
  
  menu "Processor type and features"
@@@ -754,6 -762,7 +762,7 @@@ config NUM
        depends on SMP && HIGHMEM64G && (X86_NUMAQ || X86_GENERICARCH || (X86_SUMMIT && ACPI))
        default n if X86_PC
        default y if (X86_NUMAQ || X86_SUMMIT)
+       select SPARSEMEM_STATIC
  
  # Need comments to help the hapless user trying to turn on NUMA support
  comment "NUMA (NUMA-Q) requires SMP, 64GB highmem support"
@@@ -1203,6 -1212,7 +1212,6 @@@ config PCI_DIREC
  config PCI_MMCONFIG
        bool
        depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
 -      select ACPI_BOOT
        default y
  
  source "drivers/pci/pcie/Kconfig"
@@@ -1312,6 -1322,11 +1321,11 @@@ config GENERIC_IRQ_PROB
        bool
        default y
  
+ config GENERIC_PENDING_IRQ
+       bool
+       depends on GENERIC_HARDIRQS && SMP
+       default y
  config X86_SMP
        bool
        depends on SMP && !X86_VOYAGER
index c52b4fad011beafeb2001bde39198465c00d2403,64682a0edacf5c54490fe5928c1ce716f82be522..f10de0f2c5e622258517b978afac886378a04333
@@@ -7,11 -7,11 +7,11 @@@ extra-y := head.o init_task.o vmlinux.l
  obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o vm86.o \
                ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_i386.o \
                pci-dma.o i386_ksyms.o i387.o dmi_scan.o bootflag.o \
-               doublefault.o quirks.o
+               doublefault.o quirks.o i8237.o
  
  obj-y                         += cpu/
  obj-y                         += timers/
 -obj-$(CONFIG_ACPI_BOOT)               += acpi/
 +obj-$(CONFIG_ACPI)            += acpi/
  obj-$(CONFIG_X86_BIOS_REBOOT) += reboot.o
  obj-$(CONFIG_MCA)             += mca.o
  obj-$(CONFIG_X86_MSR)         += msr.o
index 0fb23c30eb98cf0566ad8ebcd89e3ecd512ab317,34ee500c26e59a4a992dd6c89dbb5cd68ae3bd92..a63351c085c66dceeafb990ba75214963fad1cea
  
  #ifdef        CONFIG_X86_64
  
 -static inline void  acpi_madt_oem_check(char *oem_id, char *oem_table_id) { }
 +static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
 +{
 +}
  extern void __init clustered_apic_check(void);
 -static inline int ioapic_setup_disabled(void) { return 0; }
 +static inline int ioapic_setup_disabled(void)
 +{
 +      return 0;
 +}
 +
  #include <asm/proto.h>
  
 -#else /* X86 */
 +#else                         /* X86 */
  
  #ifdef        CONFIG_X86_LOCAL_APIC
  #include <mach_apic.h>
  #include <mach_mpparse.h>
 -#endif        /* CONFIG_X86_LOCAL_APIC */
 +#endif                                /* CONFIG_X86_LOCAL_APIC */
  
 -#endif        /* X86 */
 +#endif                                /* X86 */
  
  #define BAD_MADT_ENTRY(entry, end) (                                      \
                (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
  
  #define PREFIX                        "ACPI: "
  
 -#ifdef CONFIG_ACPI_PCI
  int acpi_noirq __initdata;    /* skip ACPI IRQ initialization */
 -int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
 -#else
 -int acpi_noirq __initdata = 1;
 -int acpi_pci_disabled __initdata = 1;
 -#endif
 +int acpi_pci_disabled __initdata;     /* skip ACPI PCI scan and IRQ initialization */
  int acpi_ht __initdata = 1;   /* enable HT */
  
  int acpi_lapic;
@@@ -89,7 -88,7 +89,7 @@@ static u64 acpi_lapic_addr __initdata 
  
  #define MAX_MADT_ENTRIES      256
  u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
 -                      { [0 ... MAX_MADT_ENTRIES-1] = 0xff };
 +    {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
  EXPORT_SYMBOL(x86_acpiid_to_apicid);
  
  /* --------------------------------------------------------------------------
   * The default interrupt routing model is PIC (8259).  This gets
   * overriden if IOAPICs are enumerated (below).
   */
 -enum acpi_irq_model_id                acpi_irq_model = ACPI_IRQ_MODEL_PIC;
 +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
  
  #ifdef        CONFIG_X86_64
  
  char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
  {
        if (!phys_addr || !size)
 -      return NULL;
 +              return NULL;
  
        if (phys_addr < (end_pfn_map << PAGE_SHIFT))
                return __va(phys_addr);
@@@ -135,8 -134,8 +135,8 @@@ char *__acpi_map_table(unsigned long ph
        unsigned long base, offset, mapped_size;
        int idx;
  
 -      if (phys + size < 8*1024*1024) 
 -              return __va(phys); 
 +      if (phys + size < 8 * 1024 * 1024)
 +              return __va(phys);
  
        offset = phys & (PAGE_SIZE - 1);
        mapped_size = PAGE_SIZE - offset;
                mapped_size += PAGE_SIZE;
        }
  
 -      return ((unsigned char *) base + offset);
 +      return ((unsigned char *)base + offset);
  }
  #endif
  
@@@ -173,7 -172,7 +173,7 @@@ int __init acpi_parse_mcfg(unsigned lon
        if (!phys_addr || !size)
                return -EINVAL;
  
 -      mcfg = (struct acpi_table_mcfg *) __acpi_map_table(phys_addr, size);
 +      mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
        if (!mcfg) {
                printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
                return -ENODEV;
  
        return 0;
  }
 -#endif /* CONFIG_PCI_MMCONFIG */
 +#endif                                /* CONFIG_PCI_MMCONFIG */
  
  #ifdef CONFIG_X86_LOCAL_APIC
 -static int __init
 -acpi_parse_madt (
 -      unsigned long           phys_addr,
 -      unsigned long           size)
 +static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
  {
 -      struct acpi_table_madt  *madt = NULL;
 +      struct acpi_table_madt *madt = NULL;
  
        if (!phys_addr || !size)
                return -EINVAL;
  
 -      madt = (struct acpi_table_madt *) __acpi_map_table(phys_addr, size);
 +      madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
        if (!madt) {
                printk(KERN_WARNING PREFIX "Unable to map MADT\n");
                return -ENODEV;
                acpi_lapic_addr = (u64) madt->lapic_address;
  
                printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
 -                      madt->lapic_address);
 +                     madt->lapic_address);
        }
  
        acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
 -      
 +
        return 0;
  }
  
 -
  static int __init
 -acpi_parse_lapic (
 -      acpi_table_entry_header *header, const unsigned long end)
 +acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
  {
 -      struct acpi_table_lapic *processor = NULL;
 +      struct acpi_table_lapic *processor = NULL;
  
 -      processor = (struct acpi_table_lapic*) header;
 +      processor = (struct acpi_table_lapic *)header;
  
        if (BAD_MADT_ENTRY(processor, end))
                return -EINVAL;
  
        x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
  
 -      mp_register_lapic (
 -              processor->id,                                     /* APIC ID */
 -              processor->flags.enabled);                        /* Enabled? */
 +      mp_register_lapic(processor->id,        /* APIC ID */
 +                        processor->flags.enabled);    /* Enabled? */
  
        return 0;
  }
  
  static int __init
 -acpi_parse_lapic_addr_ovr (
 -      acpi_table_entry_header *header, const unsigned long end)
 +acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
 +                        const unsigned long end)
  {
        struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
  
 -      lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr*) header;
 +      lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
  
        if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
                return -EINVAL;
  }
  
  static int __init
 -acpi_parse_lapic_nmi (
 -      acpi_table_entry_header *header, const unsigned long end)
 +acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
  {
        struct acpi_table_lapic_nmi *lapic_nmi = NULL;
  
 -      lapic_nmi = (struct acpi_table_lapic_nmi*) header;
 +      lapic_nmi = (struct acpi_table_lapic_nmi *)header;
  
        if (BAD_MADT_ENTRY(lapic_nmi, end))
                return -EINVAL;
        return 0;
  }
  
 +#endif                                /*CONFIG_X86_LOCAL_APIC */
  
 -#endif /*CONFIG_X86_LOCAL_APIC*/
 -
 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
 +#ifdef CONFIG_X86_IO_APIC
  
  static int __init
 -acpi_parse_ioapic (
 -      acpi_table_entry_header *header, const unsigned long end)
 +acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
  {
        struct acpi_table_ioapic *ioapic = NULL;
  
 -      ioapic = (struct acpi_table_ioapic*) header;
 +      ioapic = (struct acpi_table_ioapic *)header;
  
        if (BAD_MADT_ENTRY(ioapic, end))
                return -EINVAL;
 - 
 +
        acpi_table_print_madt_entry(header);
  
 -      mp_register_ioapic (
 -              ioapic->id,
 -              ioapic->address,
 -              ioapic->global_irq_base);
 - 
 +      mp_register_ioapic(ioapic->id,
 +                         ioapic->address, ioapic->global_irq_base);
 +
        return 0;
  }
  
  /*
   * Parse Interrupt Source Override for the ACPI SCI
   */
 -static void
 -acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
 +static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
  {
        if (trigger == 0)       /* compatible SCI trigger is level */
                trigger = 3;
                polarity = acpi_sci_flags.polarity;
  
        /*
 -       * mp_config_acpi_legacy_irqs() already setup IRQs < 16
 +       * mp_config_acpi_legacy_irqs() already setup IRQs < 16
         * If GSI is < 16, this will update its flags,
         * else it will create a new mp_irqs[] entry.
         */
  }
  
  static int __init
 -acpi_parse_int_src_ovr (
 -      acpi_table_entry_header *header, const unsigned long end)
 +acpi_parse_int_src_ovr(acpi_table_entry_header * header,
 +                     const unsigned long end)
  {
        struct acpi_table_int_src_ovr *intsrc = NULL;
  
 -      intsrc = (struct acpi_table_int_src_ovr*) header;
 +      intsrc = (struct acpi_table_int_src_ovr *)header;
  
        if (BAD_MADT_ENTRY(intsrc, end))
                return -EINVAL;
  
        if (intsrc->bus_irq == acpi_fadt.sci_int) {
                acpi_sci_ioapic_setup(intsrc->global_irq,
 -                      intsrc->flags.polarity, intsrc->flags.trigger);
 +                                    intsrc->flags.polarity,
 +                                    intsrc->flags.trigger);
                return 0;
        }
  
        if (acpi_skip_timer_override &&
 -              intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
 -                      printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
 -                      return 0;
 +          intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
 +              printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
 +              return 0;
        }
  
 -      mp_override_legacy_irq (
 -              intsrc->bus_irq,
 -              intsrc->flags.polarity,
 -              intsrc->flags.trigger,
 -              intsrc->global_irq);
 +      mp_override_legacy_irq(intsrc->bus_irq,
 +                             intsrc->flags.polarity,
 +                             intsrc->flags.trigger, intsrc->global_irq);
  
        return 0;
  }
  
 -
  static int __init
 -acpi_parse_nmi_src (
 -      acpi_table_entry_header *header, const unsigned long end)
 +acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
  {
        struct acpi_table_nmi_src *nmi_src = NULL;
  
 -      nmi_src = (struct acpi_table_nmi_src*) header;
 +      nmi_src = (struct acpi_table_nmi_src *)header;
  
        if (BAD_MADT_ENTRY(nmi_src, end))
                return -EINVAL;
        return 0;
  }
  
 -#endif /* CONFIG_X86_IO_APIC */
 -
 -#ifdef        CONFIG_ACPI_BUS
 +#endif                                /* CONFIG_X86_IO_APIC */
  
  /*
   * acpi_pic_sci_set_trigger()
   * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
   */
  
 -void __init
 -acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
 +void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
  {
        unsigned int mask = 1 << irq;
        unsigned int old, new;
         * routing tables..
         */
        switch (trigger) {
 -      case 1: /* Edge - clear */
 +      case 1:         /* Edge - clear */
                new &= ~mask;
                break;
 -      case 3: /* Level - set */
 +      case 3:         /* Level - set */
                new |= mask;
                break;
        }
        outb(new >> 8, 0x4d1);
  }
  
 -
 -#endif /* CONFIG_ACPI_BUS */
 -
  int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
  {
  #ifdef CONFIG_X86_IO_APIC
        if (use_pci_vector() && !platform_legacy_irq(gsi))
 -              *irq = IO_APIC_VECTOR(gsi);
 +              *irq = IO_APIC_VECTOR(gsi);
        else
  #endif
                *irq = gsi;
        return 0;
  }
  
 -unsigned int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
 +/*
 + * success: return IRQ number (>=0)
 + * failure: return < 0
 + */
 +int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
  {
        unsigned int irq;
        unsigned int plat_gsi = gsi;
                extern void eisa_set_level_irq(unsigned int irq);
  
                if (edge_level == ACPI_LEVEL_SENSITIVE)
 -                              eisa_set_level_irq(gsi);
 +                      eisa_set_level_irq(gsi);
        }
  #endif
  
        acpi_gsi_to_irq(plat_gsi, &irq);
        return irq;
  }
 +
  EXPORT_SYMBOL(acpi_register_gsi);
  
  /*
   *  ACPI based hotplug support for CPU
   */
  #ifdef CONFIG_ACPI_HOTPLUG_CPU
 -int
 -acpi_map_lsapic(acpi_handle handle, int *pcpu)
 +int acpi_map_lsapic(acpi_handle handle, int *pcpu)
  {
        /* TBD */
        return -EINVAL;
  }
 -EXPORT_SYMBOL(acpi_map_lsapic);
  
 +EXPORT_SYMBOL(acpi_map_lsapic);
  
 -int
 -acpi_unmap_lsapic(int cpu)
 +int acpi_unmap_lsapic(int cpu)
  {
        /* TBD */
        return -EINVAL;
  }
 +
  EXPORT_SYMBOL(acpi_unmap_lsapic);
 -#endif /* CONFIG_ACPI_HOTPLUG_CPU */
 +#endif                                /* CONFIG_ACPI_HOTPLUG_CPU */
  
 -int
 -acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
 +int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
  {
        /* TBD */
        return -EINVAL;
  }
 +
  EXPORT_SYMBOL(acpi_register_ioapic);
  
 -int
 -acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
 +int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
  {
        /* TBD */
        return -EINVAL;
  }
 +
  EXPORT_SYMBOL(acpi_unregister_ioapic);
  
  static unsigned long __init
 -acpi_scan_rsdp (
 -      unsigned long           start,
 -      unsigned long           length)
 +acpi_scan_rsdp(unsigned long start, unsigned long length)
  {
 -      unsigned long           offset = 0;
 -      unsigned long           sig_len = sizeof("RSD PTR ") - 1;
 +      unsigned long offset = 0;
 +      unsigned long sig_len = sizeof("RSD PTR ") - 1;
  
        /*
         * Scan all 16-byte boundaries of the physical memory region for the
         * RSDP signature.
         */
        for (offset = 0; offset < length; offset += 16) {
 -              if (strncmp((char *) (start + offset), "RSD PTR ", sig_len))
 +              if (strncmp((char *)(start + offset), "RSD PTR ", sig_len))
                        continue;
                return (start + offset);
        }
@@@ -557,19 -575,20 +557,19 @@@ static int __init acpi_parse_sbf(unsign
        struct acpi_table_sbf *sb;
  
        if (!phys_addr || !size)
 -      return -EINVAL;
 +              return -EINVAL;
  
 -      sb = (struct acpi_table_sbf *) __acpi_map_table(phys_addr, size);
 +      sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
        if (!sb) {
                printk(KERN_WARNING PREFIX "Unable to map SBF\n");
                return -ENODEV;
        }
  
 -      sbf_port = sb->sbf_cmos; /* Save CMOS port */
 +      sbf_port = sb->sbf_cmos;        /* Save CMOS port */
  
        return 0;
  }
  
 -
  #ifdef CONFIG_HPET_TIMER
  
  static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
        if (!phys || !size)
                return -EINVAL;
  
 -      hpet_tbl = (struct acpi_table_hpet *) __acpi_map_table(phys, size);
 +      hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
        if (!hpet_tbl) {
                printk(KERN_WARNING PREFIX "Unable to map HPET\n");
                return -ENODEV;
                       "memory.\n");
                return -1;
        }
 -
  #ifdef        CONFIG_X86_64
 -        vxtime.hpet_address = hpet_tbl->addr.addrl |
 -                ((long) hpet_tbl->addr.addrh << 32);
 +      vxtime.hpet_address = hpet_tbl->addr.addrl |
 +          ((long)hpet_tbl->addr.addrh << 32);
  
 -        printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
 -               hpet_tbl->id, vxtime.hpet_address);
 -#else /* X86 */
 +      printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
 +             hpet_tbl->id, vxtime.hpet_address);
 +#else                         /* X86 */
        {
                extern unsigned long hpet_address;
  
                hpet_address = hpet_tbl->addr.addrl;
                printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
 -                      hpet_tbl->id, hpet_address);
 +                     hpet_tbl->id, hpet_address);
        }
 -#endif        /* X86 */
 +#endif                                /* X86 */
  
        return 0;
  }
@@@ -620,25 -640,28 +620,25 @@@ static int __init acpi_parse_fadt(unsig
  {
        struct fadt_descriptor_rev2 *fadt = NULL;
  
 -      fadt = (struct fadt_descriptor_rev2*) __acpi_map_table(phys,size);
 -      if(!fadt) {
 +      fadt = (struct fadt_descriptor_rev2 *)__acpi_map_table(phys, size);
 +      if (!fadt) {
                printk(KERN_WARNING PREFIX "Unable to map FADT\n");
                return 0;
        }
 -
 -#ifdef        CONFIG_ACPI_INTERPRETER
        /* initialize sci_int early for INT_SRC_OVR MADT parsing */
        acpi_fadt.sci_int = fadt->sci_int;
 -#endif
  
 -#ifdef CONFIG_ACPI_BUS
        /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
        acpi_fadt.revision = fadt->revision;
 -      acpi_fadt.force_apic_physical_destination_mode = fadt->force_apic_physical_destination_mode;
 -#endif
 +      acpi_fadt.force_apic_physical_destination_mode =
 +          fadt->force_apic_physical_destination_mode;
  
  #ifdef CONFIG_X86_PM_TIMER
        /* detect the location of the ACPI PM Timer */
        if (fadt->revision >= FADT2_REVISION_ID) {
                /* FADT rev. 2 */
 -              if (fadt->xpm_tmr_blk.address_space_id != ACPI_ADR_SPACE_SYSTEM_IO)
 +              if (fadt->xpm_tmr_blk.address_space_id !=
 +                  ACPI_ADR_SPACE_SYSTEM_IO)
                        return 0;
  
                pmtmr_ioport = fadt->xpm_tmr_blk.address;
                pmtmr_ioport = fadt->V1_pm_tmr_blk;
        }
        if (pmtmr_ioport)
 -              printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n", pmtmr_ioport);
 +              printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
 +                     pmtmr_ioport);
  #endif
        return 0;
  }
  
 -
 -unsigned long __init
 -acpi_find_rsdp (void)
 +unsigned long __init acpi_find_rsdp(void)
  {
 -      unsigned long           rsdp_phys = 0;
 +      unsigned long rsdp_phys = 0;
  
        if (efi_enabled) {
                if (efi.acpi20)
         * Scan memory looking for the RSDP signature. First search EBDA (low
         * memory) paragraphs and then search upper memory (E0000-FFFFF).
         */
 -      rsdp_phys = acpi_scan_rsdp (0, 0x400);
 +      rsdp_phys = acpi_scan_rsdp(0, 0x400);
        if (!rsdp_phys)
 -              rsdp_phys = acpi_scan_rsdp (0xE0000, 0x20000);
 +              rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
  
        return rsdp_phys;
  }
   * Parse LAPIC entries in MADT
   * returns 0 on success, < 0 on error
   */
 -static int __init
 -acpi_parse_madt_lapic_entries(void)
 +static int __init acpi_parse_madt_lapic_entries(void)
  {
        int count;
  
         * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
         */
  
 -      count = acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr, 0);
 +      count =
 +          acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
 +                                acpi_parse_lapic_addr_ovr, 0);
        if (count < 0) {
 -              printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
 +              printk(KERN_ERR PREFIX
 +                     "Error parsing LAPIC address override entry\n");
                return count;
        }
  
        mp_register_lapic_address(acpi_lapic_addr);
  
        count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
 -                                     MAX_APICS);
 -      if (!count) { 
 +                                    MAX_APICS);
 +      if (!count) {
                printk(KERN_ERR PREFIX "No LAPIC entries present\n");
                /* TBD: Cleanup to allow fallback to MPS */
                return -ENODEV;
 -      }
 -      else if (count < 0) {
 +      } else if (count < 0) {
                printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
                /* TBD: Cleanup to allow fallback to MPS */
                return count;
        }
  
 -      count = acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
 +      count =
 +          acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
        if (count < 0) {
                printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
                /* TBD: Cleanup to allow fallback to MPS */
        }
        return 0;
  }
 -#endif /* CONFIG_X86_LOCAL_APIC */
 +#endif                                /* CONFIG_X86_LOCAL_APIC */
  
 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI_INTERPRETER)
 +#ifdef        CONFIG_X86_IO_APIC
  /*
   * Parse IOAPIC related entries in MADT
   * returns 0 on success, < 0 on error
   */
 -static int __init
 -acpi_parse_madt_ioapic_entries(void)
 +static int __init acpi_parse_madt_ioapic_entries(void)
  {
        int count;
  
         */
        if (acpi_disabled || acpi_noirq) {
                return -ENODEV;
 -        }
 +      }
  
        /*
 -       * if "noapic" boot option, don't look for IO-APICs
 +       * if "noapic" boot option, don't look for IO-APICs
         */
        if (skip_ioapic_setup) {
                printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
 -                      "due to 'noapic' option.\n");
 +                     "due to 'noapic' option.\n");
                return -ENODEV;
        }
  
 -      count = acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic, MAX_IO_APICS);
 +      count =
 +          acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
 +                                MAX_IO_APICS);
        if (!count) {
                printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
                return -ENODEV;
 -      }
 -      else if (count < 0) {
 +      } else if (count < 0) {
                printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
                return count;
        }
  
 -      count = acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr, NR_IRQ_VECTORS);
 +      count =
 +          acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
 +                                NR_IRQ_VECTORS);
        if (count < 0) {
 -              printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
 +              printk(KERN_ERR PREFIX
 +                     "Error parsing interrupt source overrides entry\n");
                /* TBD: Cleanup to allow fallback to MPS */
                return count;
        }
        /* Fill in identity legacy mapings where no override */
        mp_config_acpi_legacy_irqs();
  
 -      count = acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src, NR_IRQ_VECTORS);
 +      count =
 +          acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
 +                                NR_IRQ_VECTORS);
        if (count < 0) {
                printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
                /* TBD: Cleanup to allow fallback to MPS */
@@@ -797,9 -814,11 +797,9 @@@ static inline int acpi_parse_madt_ioapi
  {
        return -1;
  }
 -#endif /* !(CONFIG_X86_IO_APIC && CONFIG_ACPI_INTERPRETER) */
 +#endif        /* !CONFIG_X86_IO_APIC */
  
 -
 -static void __init
 -acpi_process_madt(void)
 +static void __init acpi_process_madt(void)
  {
  #ifdef CONFIG_X86_LOCAL_APIC
        int count, error;
                if (!error) {
                        acpi_lapic = 1;
  
+ #ifdef CONFIG_X86_GENERICARCH
+                       generic_bigsmp_probe();
+ #endif
                        /*
                         * Parse MADT IO-APIC entries
                         */
                        /*
                         * Dell Precision Workstation 410, 610 come here.
                         */
 -                      printk(KERN_ERR PREFIX "Invalid BIOS MADT, disabling ACPI\n");
 +                      printk(KERN_ERR PREFIX
 +                             "Invalid BIOS MADT, disabling ACPI\n");
                        disable_acpi();
                }
        }
@@@ -844,6 -865,7 +847,6 @@@ extern int acpi_force
  
  #ifdef __i386__
  
 -#ifdef        CONFIG_ACPI_PCI
  static int __init disable_acpi_irq(struct dmi_system_id *d)
  {
        if (!acpi_force) {
@@@ -863,11 -885,12 +866,11 @@@ static int __init disable_acpi_pci(stru
        }
        return 0;
  }
 -#endif
  
  static int __init dmi_disable_acpi(struct dmi_system_id *d)
  {
        if (!acpi_force) {
 -              printk(KERN_NOTICE "%s detected: acpi off\n",d->ident);
 +              printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
                disable_acpi();
        } else {
                printk(KERN_NOTICE
  static int __init force_acpi_ht(struct dmi_system_id *d)
  {
        if (!acpi_force) {
 -              printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", d->ident);
 +              printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
 +                     d->ident);
                disable_acpi();
                acpi_ht = 1;
        } else {
@@@ -902,155 -924,155 +905,155 @@@ static struct dmi_system_id __initdata 
         * Boxes that need ACPI disabled
         */
        {
 -              .callback = dmi_disable_acpi,
 -              .ident = "IBM Thinkpad",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
 -                      DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
 -              },
 -      },
 +       .callback = dmi_disable_acpi,
 +       .ident = "IBM Thinkpad",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
 +                   DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
 +                   },
 +       },
  
        /*
         * Boxes that need acpi=ht
         */
        {
 -              .callback = force_acpi_ht,
 -              .ident = "FSC Primergy T850",
 -              .matches = {
 -                      DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
 -                      DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "FSC Primergy T850",
 +       .matches = {
 +                   DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
 +                   DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "DELL GX240",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
 -                      DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "DELL GX240",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
 +                   DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "HP VISUALIZE NT Workstation",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
 -                      DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "HP VISUALIZE NT Workstation",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
 +                   DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "Compaq Workstation W8000",
 -              .matches = {
 -                      DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
 -                      DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "Compaq Workstation W8000",
 +       .matches = {
 +                   DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
 +                   DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "ASUS P4B266",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 -                      DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "ASUS P4B266",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 +                   DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "ASUS P2B-DS",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 -                      DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "ASUS P2B-DS",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 +                   DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "ASUS CUR-DLS",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 -                      DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "ASUS CUR-DLS",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 +                   DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "ABIT i440BX-W83977",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
 -                      DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "ABIT i440BX-W83977",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
 +                   DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "IBM Bladecenter",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
 -                      DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "IBM Bladecenter",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
 +                   DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "IBM eServer xSeries 360",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
 -                      DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "IBM eServer xSeries 360",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
 +                   DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "IBM eserver xSeries 330",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
 -                      DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
 -              },
 -      },
 +       .callback = force_acpi_ht,
 +       .ident = "IBM eserver xSeries 330",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
 +                   DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
 +                   },
 +       },
        {
 -              .callback = force_acpi_ht,
 -              .ident = "IBM eserver xSeries 440",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
 -                      DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
 -              },
 -      },
 -
 -#ifdef        CONFIG_ACPI_PCI
 +       .callback = force_acpi_ht,
 +       .ident = "IBM eserver xSeries 440",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
 +                   DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
 +                   },
 +       },
 +
        /*
         * Boxes that need ACPI PCI IRQ routing disabled
         */
        {
 -              .callback = disable_acpi_irq,
 -              .ident = "ASUS A7V",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
 -                      DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
 -                      /* newer BIOS, Revision 1011, does work */
 -                      DMI_MATCH(DMI_BIOS_VERSION, "ASUS A7V ACPI BIOS Revision 1007"),
 -              },
 -      },
 +       .callback = disable_acpi_irq,
 +       .ident = "ASUS A7V",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
 +                   DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
 +                   /* newer BIOS, Revision 1011, does work */
 +                   DMI_MATCH(DMI_BIOS_VERSION,
 +                             "ASUS A7V ACPI BIOS Revision 1007"),
 +                   },
 +       },
  
        /*
         * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
         */
 -      {       /* _BBN 0 bug */
 -              .callback = disable_acpi_pci,
 -              .ident = "ASUS PR-DLS",
 -              .matches = {
 -                      DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 -                      DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
 -                      DMI_MATCH(DMI_BIOS_VERSION, "ASUS PR-DLS ACPI BIOS Revision 1010"),
 -                      DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
 -              },
 -      },
 +      {                       /* _BBN 0 bug */
 +       .callback = disable_acpi_pci,
 +       .ident = "ASUS PR-DLS",
 +       .matches = {
 +                   DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
 +                   DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
 +                   DMI_MATCH(DMI_BIOS_VERSION,
 +                             "ASUS PR-DLS ACPI BIOS Revision 1010"),
 +                   DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
 +                   },
 +       },
        {
 -              .callback = disable_acpi_pci,
 -              .ident = "Acer TravelMate 36x Laptop",
 -              .matches = {
 -                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
 -                      DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
 -              },
 -      },
 -#endif
 -      { }
 +       .callback = disable_acpi_pci,
 +       .ident = "Acer TravelMate 36x Laptop",
 +       .matches = {
 +                   DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
 +                   DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
 +                   },
 +       },
 +      {}
  };
  
 -#endif        /* __i386__ */
 +#endif                                /* __i386__ */
  
  /*
   * acpi_boot_table_init() and acpi_boot_init()
   *    !0: failure
   */
  
 -int __init
 -acpi_boot_table_init(void)
 +int __init acpi_boot_table_init(void)
  {
        int error;
  
         * One exception: acpi=ht continues far enough to enumerate LAPICs
         */
        if (acpi_disabled && !acpi_ht)
 -               return 1;
 +              return 1;
  
        /* 
         * Initialize the ACPI boot-time table parser.
                disable_acpi();
                return error;
        }
 -
  #ifdef __i386__
        check_acpi_pci();
  #endif
        return 0;
  }
  
 -
  int __init acpi_boot_init(void)
  {
        /*
         * One exception: acpi=ht continues far enough to enumerate LAPICs
         */
        if (acpi_disabled && !acpi_ht)
 -               return 1;
 +              return 1;
  
        acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
  
  
        return 0;
  }
 -
index ebedd2e216708a5ed52616cd41407c8b559d3b93,0e727e6da5c981743f66bf4fdf74a925f0482b7f..889eda2d7b17db55bcaee783424622b6c38079b1
@@@ -33,6 -33,7 +33,7 @@@
  #include <linux/acpi.h>
  #include <linux/module.h>
  #include <linux/sysdev.h>
  #include <asm/io.h>
  #include <asm/smp.h>
  #include <asm/desc.h>
@@@ -77,7 -78,7 +78,7 @@@ static struct irq_pin_list 
        int apic, pin, next;
  } irq_2_pin[PIN_MAP_SIZE];
  
- int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
+ int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
  #ifdef CONFIG_PCI_MSI
  #define vector_to_irq(vector)         \
        (platform_legacy_irq(vector) ? vector : vector_irq[vector])
@@@ -222,13 -223,21 +223,21 @@@ static void clear_IO_APIC (void
                        clear_IO_APIC_pin(apic, pin);
  }
  
+ #ifdef CONFIG_SMP
  static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
  {
        unsigned long flags;
        int pin;
        struct irq_pin_list *entry = irq_2_pin + irq;
        unsigned int apicid_value;
+       cpumask_t tmp;
        
+       cpus_and(tmp, cpumask, cpu_online_map);
+       if (cpus_empty(tmp))
+               tmp = TARGET_CPUS;
+       cpus_and(cpumask, tmp, CPU_MASK_ALL);
        apicid_value = cpu_mask_to_apicid(cpumask);
        /* Prepare to do the io_apic_write */
        apicid_value = apicid_value << 24;
                        break;
                entry = irq_2_pin + entry->next;
        }
+       set_irq_info(irq, cpumask);
        spin_unlock_irqrestore(&ioapic_lock, flags);
  }
  
  #  define Dprintk(x...) 
  # endif
  
- cpumask_t __cacheline_aligned pending_irq_balance_cpumask[NR_IRQS];
  
  #define IRQBALANCE_CHECK_ARCH -999
  static int irqbalance_disabled = IRQBALANCE_CHECK_ARCH;
@@@ -328,12 -337,7 +337,7 @@@ static inline void balance_irq(int cpu
        cpus_and(allowed_mask, cpu_online_map, irq_affinity[irq]);
        new_cpu = move(cpu, allowed_mask, now, 1);
        if (cpu != new_cpu) {
-               irq_desc_t *desc = irq_desc + irq;
-               unsigned long flags;
-               spin_lock_irqsave(&desc->lock, flags);
-               pending_irq_balance_cpumask[irq] = cpumask_of_cpu(new_cpu);
-               spin_unlock_irqrestore(&desc->lock, flags);
+               set_pending_irq(irq, cpumask_of_cpu(new_cpu));
        }
  }
  
@@@ -528,16 -532,12 +532,12 @@@ tryanotherirq
        cpus_and(tmp, target_cpu_mask, allowed_mask);
  
        if (!cpus_empty(tmp)) {
-               irq_desc_t *desc = irq_desc + selected_irq;
-               unsigned long flags;
  
                Dprintk("irq = %d moved to cpu = %d\n",
                                selected_irq, min_loaded);
                /* mark for change destination */
-               spin_lock_irqsave(&desc->lock, flags);
-               pending_irq_balance_cpumask[selected_irq] =
-                                       cpumask_of_cpu(min_loaded);
-               spin_unlock_irqrestore(&desc->lock, flags);
+               set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
                /* Since we made a change, come back sooner to 
                 * check for more variation.
                 */
@@@ -568,7 -568,8 +568,8 @@@ static int balanced_irq(void *unused
        
        /* push everything to CPU 0 to give us a starting point.  */
        for (i = 0 ; i < NR_IRQS ; i++) {
-               pending_irq_balance_cpumask[i] = cpumask_of_cpu(0);
+               pending_irq_cpumask[i] = cpumask_of_cpu(0);
+               set_pending_irq(i, cpumask_of_cpu(0));
        }
  
        for ( ; ; ) {
@@@ -647,20 -648,9 +648,9 @@@ int __init irqbalance_disable(char *str
  
  __setup("noirqbalance", irqbalance_disable);
  
- static inline void move_irq(int irq)
- {
-       /* note - we hold the desc->lock */
-       if (unlikely(!cpus_empty(pending_irq_balance_cpumask[irq]))) {
-               set_ioapic_affinity_irq(irq, pending_irq_balance_cpumask[irq]);
-               cpus_clear(pending_irq_balance_cpumask[irq]);
-       }
- }
  late_initcall(balanced_irq_init);
- #else /* !CONFIG_IRQBALANCE */
- static inline void move_irq(int irq) { }
  #endif /* CONFIG_IRQBALANCE */
+ #endif /* CONFIG_SMP */
  
  #ifndef CONFIG_SMP
  void fastcall send_IPI_self(int vector)
@@@ -820,6 -810,7 +810,7 @@@ EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vecto
   * we need to reprogram the ioredtbls to cater for the cpus which have come online
   * so mask in all cases should simply be TARGET_CPUS
   */
+ #ifdef CONFIG_SMP
  void __init setup_ioapic_dest(void)
  {
        int pin, ioapic, irq, irq_entry;
  
        }
  }
+ #endif
  
  /*
   * EISA Edge/Level control register, ELCR
@@@ -1127,7 -1119,7 +1119,7 @@@ static inline int IO_APIC_irq_trigger(i
  }
  
  /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
- u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
+ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
  
  int assign_irq_vector(int irq)
  {
@@@ -1249,6 -1241,7 +1241,7 @@@ static void __init setup_IO_APIC_irqs(v
                spin_lock_irqsave(&ioapic_lock, flags);
                io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
                io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
+               set_native_irq_info(irq, TARGET_CPUS);
                spin_unlock_irqrestore(&ioapic_lock, flags);
        }
        }
@@@ -1944,6 -1937,7 +1937,7 @@@ static void ack_edge_ioapic_vector(unsi
  {
        int irq = vector_to_irq(vector);
  
+       move_irq(vector);
        ack_edge_ioapic_irq(irq);
  }
  
@@@ -1958,6 -1952,7 +1952,7 @@@ static void end_level_ioapic_vector (un
  {
        int irq = vector_to_irq(vector);
  
+       move_irq(vector);
        end_level_ioapic_irq(irq);
  }
  
@@@ -1975,14 -1970,17 +1970,17 @@@ static void unmask_IO_APIC_vector (unsi
        unmask_IO_APIC_irq(irq);
  }
  
+ #ifdef CONFIG_SMP
  static void set_ioapic_affinity_vector (unsigned int vector,
                                        cpumask_t cpu_mask)
  {
        int irq = vector_to_irq(vector);
  
+       set_native_irq_info(vector, cpu_mask);
        set_ioapic_affinity_irq(irq, cpu_mask);
  }
  #endif
+ #endif
  
  /*
   * Level and edge triggered IO-APIC interrupts need different handling,
   * edge-triggered handler, without risking IRQ storms and other ugly
   * races.
   */
- static struct hw_interrupt_type ioapic_edge_type = {
+ static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
        .typename       = "IO-APIC-edge",
        .startup        = startup_edge_ioapic,
        .shutdown       = shutdown_edge_ioapic,
        .disable        = disable_edge_ioapic,
        .ack            = ack_edge_ioapic,
        .end            = end_edge_ioapic,
+ #ifdef CONFIG_SMP
        .set_affinity   = set_ioapic_affinity,
+ #endif
  };
  
- static struct hw_interrupt_type ioapic_level_type = {
+ static struct hw_interrupt_type ioapic_level_type __read_mostly = {
        .typename       = "IO-APIC-level",
        .startup        = startup_level_ioapic,
        .shutdown       = shutdown_level_ioapic,
        .disable        = disable_level_ioapic,
        .ack            = mask_and_ack_level_ioapic,
        .end            = end_level_ioapic,
+ #ifdef CONFIG_SMP
        .set_affinity   = set_ioapic_affinity,
+ #endif
  };
  
  static inline void init_IO_APIC_traps(void)
@@@ -2074,7 -2076,7 +2076,7 @@@ static void ack_lapic_irq (unsigned in
  
  static void end_lapic_irq (unsigned int i) { /* nothing */ }
  
- static struct hw_interrupt_type lapic_irq_type = {
+ static struct hw_interrupt_type lapic_irq_type __read_mostly = {
        .typename       = "local-APIC-edge",
        .startup        = NULL, /* startup_irq() not used for IRQ0 */
        .shutdown       = NULL, /* shutdown_irq() not used for IRQ0 */
@@@ -2421,7 -2423,7 +2423,7 @@@ device_initcall(ioapic_init_sysfs)
                            ACPI-based IOAPIC Configuration
     -------------------------------------------------------------------------- */
  
 -#ifdef CONFIG_ACPI_BOOT
 +#ifdef CONFIG_ACPI
  
  int __init io_apic_get_unique_id (int ioapic, int apic_id)
  {
@@@ -2569,9 -2571,10 +2571,10 @@@ int io_apic_set_pci_routing (int ioapic
        spin_lock_irqsave(&ioapic_lock, flags);
        io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
        io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
+       set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
        spin_unlock_irqrestore(&ioapic_lock, flags);
  
        return 0;
  }
  
 -#endif /*CONFIG_ACPI_BOOT*/
 +#endif /* CONFIG_ACPI */
index 97dbf289dbd5e0cc8a6e9fb1828cc68eed144842,5d0b9a8fc43dc1e1b1f1da28b560b24c5552abca..cafaeffe381857bd19de265957f9782496063f48
@@@ -65,6 -65,8 +65,8 @@@ int nr_ioapics
  int pic_mode;
  unsigned long mp_lapic_addr;
  
+ unsigned int def_to_bigsmp = 0;
  /* Processor that is doing the boot up */
  unsigned int boot_cpu_physical_apicid = -1U;
  /* Internal processor count */
@@@ -120,7 -122,7 +122,7 @@@ static int MP_valid_apicid(int apicid, 
  
  static void __init MP_processor_info (struct mpc_config_processor *m)
  {
-       int ver, apicid;
+       int ver, apicid, cpu, found_bsp = 0;
        physid_mask_t tmp;
        
        if (!(m->mpc_cpuflag & CPU_ENABLED))
        if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
                Dprintk("    Bootup CPU\n");
                boot_cpu_physical_apicid = m->mpc_apicid;
+               found_bsp = 1;
        }
  
        if (num_processors >= NR_CPUS) {
                return;
        }
  
+       if (found_bsp)
+               cpu = 0;
+       else
+               cpu = num_processors - 1;
+       cpu_set(cpu, cpu_possible_map);
        tmp = apicid_to_cpu_present(apicid);
        physids_or(phys_cpu_present_map, phys_cpu_present_map, tmp);
        
                ver = 0x10;
        }
        apic_version[m->mpc_apicid] = ver;
+       if ((num_processors > 8) &&
+           APIC_XAPIC(ver) &&
+           (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL))
+               def_to_bigsmp = 1;
+       else
+               def_to_bigsmp = 0;
        bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
  }
  
@@@ -653,6 -668,8 +668,6 @@@ void __init get_smp_config (void
        struct intel_mp_floating *mpf = mpf_found;
  
        /*
 -       * ACPI may be used to obtain the entire SMP configuration or just to 
 -       * enumerate/configure processors (CONFIG_ACPI_BOOT).  Note that 
         * ACPI supports both logical (e.g. Hyper-Threading) and physical 
         * processors, where MPS only supports physical.
         */
@@@ -808,7 -825,7 +823,7 @@@ void __init find_smp_config (void
                              ACPI-based MP Configuration
     -------------------------------------------------------------------------- */
  
 -#ifdef CONFIG_ACPI_BOOT
 +#ifdef CONFIG_ACPI
  
  void __init mp_register_lapic_address (
        u64                     address)
@@@ -854,7 -871,7 +869,7 @@@ void __init mp_register_lapic 
        MP_processor_info(&processor);
  }
  
 -#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT))
 +#ifdef        CONFIG_X86_IO_APIC
  
  #define MP_ISA_BUS            0
  #define MP_MAX_IOAPIC_PIN     127
@@@ -1069,9 -1086,11 +1084,9 @@@ int mp_register_gsi (u32 gsi, int edge_
         */
        static int              gsi_to_irq[MAX_GSI_NUM];
  
 -#ifdef CONFIG_ACPI_BUS
        /* Don't set up the ACPI SCI because it's already set up */
        if (acpi_fadt.sci_int == gsi)
                return gsi;
 -#endif
  
        ioapic = mp_find_ioapic(gsi);
        if (ioapic < 0) {
                if (gsi < MAX_GSI_NUM) {
                        if (gsi > 15)
                                gsi = pci_irq++;
 -#ifdef CONFIG_ACPI_BUS
                        /*
                         * Don't assign IRQ used by ACPI SCI
                         */
                        if (gsi == acpi_fadt.sci_int)
                                gsi = pci_irq++;
 -#endif
                        gsi_to_irq[irq] = gsi;
                } else {
                        printk(KERN_ERR "GSI %u is too high\n", gsi);
        return gsi;
  }
  
 -#endif /*CONFIG_X86_IO_APIC && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)*/
 -#endif /*CONFIG_ACPI_BOOT*/
 +#endif /* CONFIG_X86_IO_APIC */
 +#endif /* CONFIG_ACPI */
diff --combined arch/i386/kernel/setup.c
index d52eda399a7adf575b8a9f34480fbe81a6d4e6a6,e29fd5aeaf8e4fa9dfa5f73281e17b7079c153f9..a659d274914cf26ccbc7431900d6423a391f66b1
@@@ -82,19 -82,19 +82,19 @@@ EXPORT_SYMBOL(efi_enabled)
  /* cpu data as detected by the assembly code in head.S */
  struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
  /* common cpu data for all cpus */
- struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
+ struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
  EXPORT_SYMBOL(boot_cpu_data);
  
  unsigned long mmu_cr4_features;
  
 -#ifdef        CONFIG_ACPI_INTERPRETER
 +#ifdef        CONFIG_ACPI
        int acpi_disabled = 0;
  #else
        int acpi_disabled = 1;
  #endif
  EXPORT_SYMBOL(acpi_disabled);
  
 -#ifdef        CONFIG_ACPI_BOOT
 +#ifdef        CONFIG_ACPI
  int __initdata acpi_force = 0;
  extern acpi_interrupt_flags   acpi_sci_flags;
  #endif
@@@ -370,12 -370,16 +370,16 @@@ static void __init limit_regions(unsign
        int i;
  
        if (efi_enabled) {
-               for (i = 0; i < memmap.nr_map; i++) {
-                       current_addr = memmap.map[i].phys_addr +
-                                      (memmap.map[i].num_pages << 12);
-                       if (memmap.map[i].type == EFI_CONVENTIONAL_MEMORY) {
+               efi_memory_desc_t *md;
+               void *p;
+               for (p = memmap.map, i = 0; p < memmap.map_end;
+                       p += memmap.desc_size, i++) {
+                       md = p;
+                       current_addr = md->phys_addr + (md->num_pages << 12);
+                       if (md->type == EFI_CONVENTIONAL_MEMORY) {
                                if (current_addr >= size) {
-                                       memmap.map[i].num_pages -=
+                                       md->num_pages -=
                                                (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
                                        memmap.nr_map = i + 1;
                                        return;
@@@ -794,7 -798,7 +798,7 @@@ static void __init parse_cmdline_early 
                }
  #endif
  
 -#ifdef CONFIG_ACPI_BOOT
 +#ifdef CONFIG_ACPI
                /* "acpi=off" disables both ACPI table parsing and interpreter */
                else if (!memcmp(from, "acpi=off", 8)) {
                        disable_acpi();
                else if (!memcmp(from, "noapic", 6))
                        disable_ioapic_setup();
  #endif /* CONFIG_X86_LOCAL_APIC */
 -#endif /* CONFIG_ACPI_BOOT */
 +#endif /* CONFIG_ACPI */
  
  #ifdef CONFIG_X86_LOCAL_APIC
                /* enable local APIC */
@@@ -1575,14 -1579,20 +1579,20 @@@ void __init setup_arch(char **cmdline_p
        if (efi_enabled)
                efi_map_memmap();
  
 -#ifdef CONFIG_ACPI_BOOT
 +#ifdef CONFIG_ACPI
        /*
         * Parse the ACPI tables for possible boot-time SMP configuration.
         */
        acpi_boot_table_init();
        acpi_boot_init();
- #endif
  
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
+       if (def_to_bigsmp)
+               printk(KERN_WARNING "More than 8 CPUs detected and "
+                       "CONFIG_X86_PC cannot handle it.\nUse "
+                       "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
+ #endif
+ #endif
  #ifdef CONFIG_X86_LOCAL_APIC
        if (smp_found_config)
                get_smp_config();
index f549c0efdb9f76fe8e70c3abc906b2749eaa3f3e,2000bdca2fc2cc3a93a9c9393012bd5db707d91d..dc6660511b075e2b10292c5343a0a2b65a3eddbc
@@@ -51,7 -51,7 +51,7 @@@ struct mip_reg                *host_reg
  int                   mip_port;
  unsigned long         mip_addr, host_addr;
  
 -#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_ACPI_BOOT))
 +#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_ACPI)
  
  /*
   * GSI override for ES7000 platforms.
@@@ -73,14 -73,31 +73,31 @@@ es7000_rename_gsi(int ioapic, int gsi
        return gsi;
  }
  
 -#endif // (CONFIG_X86_IO_APIC) && (CONFIG_ACPI_INTERPRETER || CONFIG_ACPI_BOOT)
 +#endif        /* (CONFIG_X86_IO_APIC) && (CONFIG_ACPI) */
  
+ void __init
+ setup_unisys ()
+ {
+       /*
+        * Determine the generation of the ES7000 currently running.
+        *
+        * es7000_plat = 1 if the machine is a 5xx ES7000 box
+        * es7000_plat = 2 if the machine is a x86_64 ES7000 box
+        *
+        */
+       if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
+               es7000_plat = 2;
+       else
+               es7000_plat = 1;
+       ioapic_renumber_irq = es7000_rename_gsi;
+ }
  /*
   * Parse the OEM Table
   */
  
  int __init
- parse_unisys_oem (char *oemptr, int oem_entries)
+ parse_unisys_oem (char *oemptr)
  {
        int                     i;
        int                     success = 0;
  
        tp += 8;
  
-       for (i=0; i <= oem_entries; i++) {
+       for (i=0; i <= 6; i++) {
                type = *tp++;
                size = *tp++;
                tp -= 2;
                default:
                        break;
                }
-               if (i == 6) break;
                tp += size;
        }
  
        if (success < 2) {
                es7000_plat = 0;
-       } else {
-               printk("\nEnabling ES7000 specific features...\n");
-               /*
-                * Determine the generation of the ES7000 currently running.
-                *
-                * es7000_plat = 0 if the machine is NOT a Unisys ES7000 box
-                * es7000_plat = 1 if the machine is a 5xx ES7000 box
-                * es7000_plat = 2 if the machine is a x86_64 ES7000 box
-                *
-                */
-               if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
-                       es7000_plat = 2;
-               else
-                       es7000_plat = 1;
-               ioapic_renumber_irq = es7000_rename_gsi;
-       }
+       } else
+               setup_unisys();
        return es7000_plat;
  }
  
  int __init
- find_unisys_acpi_oem_table(unsigned long *oem_addr, int *length)
+ find_unisys_acpi_oem_table(unsigned long *oem_addr)
  {
        struct acpi_table_rsdp          *rsdp = NULL;
        unsigned long                   rsdp_phys = 0;
                                acpi_table_print(header, sdt.entry[i].pa);
                                t = (struct oem_table *) __acpi_map_table(sdt.entry[i].pa, header->length);
                                addr = (void *) __acpi_map_table(t->OEMTableAddr, t->OEMTableSize);
-                               *length = header->length;
                                *oem_addr = (unsigned long) addr;
                                return 0;
                        }
                }
        }
-       Dprintk("ES7000: did not find Unisys ACPI OEM table!\n");
        return -1;
  }
  
diff --combined arch/ia64/Kconfig
index e1c9ea03f31fde5cf0d40deca90d56311d1e2034,17b5dbf8c311909faa9a1871da94868e89bbb547..00151a8320d8f1732e147d64698e28e747c45833
@@@ -60,7 -60,6 +60,7 @@@ choic
  
  config IA64_GENERIC
        bool "generic"
 +      select ACPI
        select NUMA
        select ACPI_NUMA
        select VIRTUAL_MEM_MAP
@@@ -341,7 -340,6 +341,7 @@@ config IA64_PALINF
  
  config ACPI_DEALLOCATE_IRQ
        bool
 +      depends on ACPI
        depends on IOSAPIC && EXPERIMENTAL
        default y
  
@@@ -353,10 -351,38 +353,10 @@@ endmen
  
  menu "Power management and ACPI"
  
 -config PM
 -      bool "Power Management support"
 -      depends on !IA64_HP_SIM
 -      default y
 -      help
 -        "Power Management" means that parts of your computer are shut
 -        off or put into a power conserving "sleep" mode if they are not
 -        being used.  There are two competing standards for doing this: APM
 -        and ACPI.  If you want to use either one, say Y here and then also
 -        to the requisite support below.
 -
 -        Power Management is most important for battery powered laptop
 -        computers; if you have a laptop, check out the Linux Laptop home
 -        page on the WWW at <http://www.linux-on-laptops.com/> and the
 -        Battery Powered Linux mini-HOWTO, available from
 -        <http://www.tldp.org/docs.html#howto>.
 -
 -        Note that, even if you say N here, Linux on the x86 architecture
 -        will issue the hlt instruction if nothing is to be done, thereby
 -        sending the processor to sleep and saving power.
 -
 -config ACPI
 -      bool
 -      depends on !IA64_HP_SIM
 -      default y
 -
 -if !IA64_HP_SIM
 +source "kernel/power/Kconfig"
  
  source "drivers/acpi/Kconfig"
  
 -endif
 -
  if PM
  
  source "arch/ia64/kernel/cpufreq/Kconfig"
@@@ -408,6 -434,11 +408,11 @@@ config GENERIC_IRQ_PROB
        bool
        default y
  
+ config GENERIC_PENDING_IRQ
+       bool
+       depends on GENERIC_HARDIRQS && SMP
+       default y
  source "arch/ia64/hp/sim/Kconfig"
  
  source "arch/ia64/oprofile/Kconfig"
diff --combined arch/x86_64/Kconfig
index 40242c61e90aac90f251e35595f6295b7762d560,8f868b67ef0f1fe0060d3688cd2a37573a830c4f..e63323e03ea9f17f8bec6c0d6aa9d2c94cad0be9
@@@ -24,6 -24,10 +24,10 @@@ config X8
        bool
        default y
  
+ config SEMAPHORE_SLEEPERS
+       bool
+       default y
  config MMU
        bool
        default y
@@@ -61,6 -65,10 +65,10 @@@ config GENERIC_IOMA
        bool
        default y
  
+ config ARCH_MAY_HAVE_PC_FDC
+       bool
+       default y
  source "init/Kconfig"
  
  
@@@ -144,6 -152,7 +152,6 @@@ config X86_CPUI
          with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
          /dev/cpu/31/cpuid.
  
 -# disable it for opteron optimized builds because it pulls in ACPI_BOOT
  config X86_HT
        bool
        depends on SMP && !MK8
@@@ -436,6 -445,11 +444,11 @@@ config ISA_DMA_AP
        bool
        default y
  
+ config GENERIC_PENDING_IRQ
+       bool
+       depends on GENERIC_HARDIRQS && SMP
+       default y
  menu "Power management options"
  
  source kernel/power/Kconfig
@@@ -460,6 -474,7 +473,6 @@@ config PCI_DIREC
  config PCI_MMCONFIG
        bool "Support mmconfig PCI config space access"
        depends on PCI && ACPI
 -      select ACPI_BOOT
  
  config UNORDERED_IO
         bool "Unordered IO mapping access"
index 0296ca6cbfa3fe285a33c95be086ad929ab2add1,c32e198d7b2bfa8c734cad85c249da3b230585c7..1579bdd0adcde450ad0a4991e1b6986895800822
@@@ -4,15 -4,15 +4,15 @@@
  
  extra-y       := head.o head64.o init_task.o vmlinux.lds
  EXTRA_AFLAGS  := -traditional
- obj-y := process.o semaphore.o signal.o entry.o traps.o irq.o \
+ obj-y := process.o signal.o entry.o traps.o irq.o \
                ptrace.o time.o ioport.o ldt.o setup.o i8259.o sys_x86_64.o \
                x8664_ksyms.o i387.o syscall.o vsyscall.o \
-               setup64.o bootflag.o e820.o reboot.o quirks.o
+               setup64.o bootflag.o e820.o reboot.o quirks.o i8237.o
  
  obj-$(CONFIG_X86_MCE)         += mce.o
  obj-$(CONFIG_X86_MCE_INTEL)   += mce_intel.o
  obj-$(CONFIG_MTRR)            += ../../i386/kernel/cpu/mtrr/
 -obj-$(CONFIG_ACPI_BOOT)               += acpi/
 +obj-$(CONFIG_ACPI)            += acpi/
  obj-$(CONFIG_X86_MSR)         += msr.o
  obj-$(CONFIG_MICROCODE)               += microcode.o
  obj-$(CONFIG_X86_CPUID)               += cpuid.o
@@@ -45,3 -45,4 +45,4 @@@ swiotlb-$(CONFIG_SWIOTLB)      += ../..
  microcode-$(subst m,y,$(CONFIG_MICROCODE))  += ../../i386/kernel/microcode.o
  intel_cacheinfo-y             += ../../i386/kernel/cpu/intel_cacheinfo.o
  quirks-y                      += ../../i386/kernel/quirks.o
+ i8237-y                               += ../../i386/kernel/i8237.o
index f031358906f53a5de94564dfb4ee3b1b75b966ab,f062aa03bab7e282d149091b852c3718e5dbb537..b1c144f73149854da5e9bb277e4309e281b60ba3
  #include <asm/smp.h>
  #include <asm/ipi.h>
  
 -#if defined(CONFIG_ACPI_BUS)
 +#if defined(CONFIG_ACPI)
  #include <acpi/acpi_bus.h>
  #endif
  
  /* which logical CPU number maps to which CPU (physical APIC ID) */
- u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
+ u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
  EXPORT_SYMBOL(x86_cpu_to_apicid);
  u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
  
@@@ -47,7 -47,7 +47,7 @@@ void __init clustered_apic_check(void
        u8 cluster_cnt[NUM_APIC_CLUSTERS];
        int num_cpus = 0;
  
 -#if defined(CONFIG_ACPI_BUS)
 +#if defined(CONFIG_ACPI)
        /*
         * Some x86_64 machines use physical APIC mode regardless of how many
         * procs/clusters are present (x86_64 ES7000 is an example).
index 1013a8fe44d7eb54aa6e904b2cadbab21dc11b4d,ba1a744e9bf0d72e0d8a49eaed6b287243a80415..40e0aca088fbce3d1ba03e3139c552d23ad3b241
@@@ -70,7 -70,7 +70,7 @@@ static struct irq_pin_list 
        short apic, pin, next;
  } irq_2_pin[PIN_MAP_SIZE];
  
- int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
+ int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
  #ifdef CONFIG_PCI_MSI
  #define vector_to_irq(vector)         \
        (platform_legacy_irq(vector) ? vector : vector_irq[vector])
  #define vector_to_irq(vector) (vector)
  #endif
  
+ #define __DO_ACTION(R, ACTION, FINAL)                                 \
+                                                                       \
+ {                                                                     \
+       int pin;                                                        \
+       struct irq_pin_list *entry = irq_2_pin + irq;                   \
+                                                                       \
+       for (;;) {                                                      \
+               unsigned int reg;                                       \
+               pin = entry->pin;                                       \
+               if (pin == -1)                                          \
+                       break;                                          \
+               reg = io_apic_read(entry->apic, 0x10 + R + pin*2);      \
+               reg ACTION;                                             \
+               io_apic_modify(entry->apic, reg);                       \
+               if (!entry->next)                                       \
+                       break;                                          \
+               entry = irq_2_pin + entry->next;                        \
+       }                                                               \
+       FINAL;                                                          \
+ }
+ #ifdef CONFIG_SMP
+ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
+ {
+       unsigned long flags;
+       unsigned int dest;
+       cpumask_t tmp;
+       cpus_and(tmp, mask, cpu_online_map);
+       if (cpus_empty(tmp))
+               tmp = TARGET_CPUS;
+       cpus_and(mask, tmp, CPU_MASK_ALL);
+       dest = cpu_mask_to_apicid(mask);
+       /*
+        * Only the high 8 bits are valid.
+        */
+       dest = SET_APIC_LOGICAL_ID(dest);
+       spin_lock_irqsave(&ioapic_lock, flags);
+       __DO_ACTION(1, = dest, )
+       set_irq_info(irq, mask);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+ }
+ #endif
  /*
   * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
   * shared ISA-space IRQs, so we have to support them. We are super
@@@ -101,26 -149,6 +149,6 @@@ static void add_pin_to_irq(unsigned in
        entry->pin = pin;
  }
  
- #define __DO_ACTION(R, ACTION, FINAL)                                 \
-                                                                       \
- {                                                                     \
-       int pin;                                                        \
-       struct irq_pin_list *entry = irq_2_pin + irq;                   \
-                                                                       \
-       for (;;) {                                                      \
-               unsigned int reg;                                       \
-               pin = entry->pin;                                       \
-               if (pin == -1)                                          \
-                       break;                                          \
-               reg = io_apic_read(entry->apic, 0x10 + R + pin*2);      \
-               reg ACTION;                                             \
-               io_apic_modify(entry->apic, reg);                       \
-               if (!entry->next)                                       \
-                       break;                                          \
-               entry = irq_2_pin + entry->next;                        \
-       }                                                               \
-       FINAL;                                                          \
- }
  
  #define DO_ACTION(name,R,ACTION, FINAL)                                       \
                                                                        \
@@@ -655,7 -683,7 +683,7 @@@ static inline int IO_APIC_irq_trigger(i
  }
  
  /* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
- u8 irq_vector[NR_IRQ_VECTORS] = { FIRST_DEVICE_VECTOR , 0 };
+ u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 };
  
  int assign_irq_vector(int irq)
  {
@@@ -767,6 -795,7 +795,7 @@@ static void __init setup_IO_APIC_irqs(v
                spin_lock_irqsave(&ioapic_lock, flags);
                io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
                io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
+               set_native_irq_info(irq, TARGET_CPUS);
                spin_unlock_irqrestore(&ioapic_lock, flags);
        }
        }
@@@ -1314,6 -1343,7 +1343,7 @@@ static unsigned int startup_edge_ioapic
   */
  static void ack_edge_ioapic_irq(unsigned int irq)
  {
+       move_irq(irq);
        if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
                                        == (IRQ_PENDING | IRQ_DISABLED))
                mask_IO_APIC_irq(irq);
@@@ -1343,26 -1373,10 +1373,10 @@@ static unsigned int startup_level_ioapi
  
  static void end_level_ioapic_irq (unsigned int irq)
  {
+       move_irq(irq);
        ack_APIC_irq();
  }
  
- static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
- {
-       unsigned long flags;
-       unsigned int dest;
-       dest = cpu_mask_to_apicid(mask);
-       /*
-        * Only the high 8 bits are valid.
-        */
-       dest = SET_APIC_LOGICAL_ID(dest);
-       spin_lock_irqsave(&ioapic_lock, flags);
-       __DO_ACTION(1, = dest, )
-       spin_unlock_irqrestore(&ioapic_lock, flags);
- }
  #ifdef CONFIG_PCI_MSI
  static unsigned int startup_edge_ioapic_vector(unsigned int vector)
  {
@@@ -1375,6 -1389,7 +1389,7 @@@ static void ack_edge_ioapic_vector(unsi
  {
        int irq = vector_to_irq(vector);
  
+       move_native_irq(vector);
        ack_edge_ioapic_irq(irq);
  }
  
@@@ -1389,6 -1404,7 +1404,7 @@@ static void end_level_ioapic_vector (un
  {
        int irq = vector_to_irq(vector);
  
+       move_native_irq(vector);
        end_level_ioapic_irq(irq);
  }
  
@@@ -1406,14 -1422,17 +1422,17 @@@ static void unmask_IO_APIC_vector (unsi
        unmask_IO_APIC_irq(irq);
  }
  
+ #ifdef CONFIG_SMP
  static void set_ioapic_affinity_vector (unsigned int vector,
                                        cpumask_t cpu_mask)
  {
        int irq = vector_to_irq(vector);
  
+       set_native_irq_info(vector, cpu_mask);
        set_ioapic_affinity_irq(irq, cpu_mask);
  }
- #endif
+ #endif // CONFIG_SMP
+ #endif // CONFIG_PCI_MSI
  
  /*
   * Level and edge triggered IO-APIC interrupts need different handling,
   * races.
   */
  
- static struct hw_interrupt_type ioapic_edge_type = {
+ static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
        .typename = "IO-APIC-edge",
        .startup        = startup_edge_ioapic,
        .shutdown       = shutdown_edge_ioapic,
        .disable        = disable_edge_ioapic,
        .ack            = ack_edge_ioapic,
        .end            = end_edge_ioapic,
+ #ifdef CONFIG_SMP
        .set_affinity = set_ioapic_affinity,
+ #endif
  };
  
- static struct hw_interrupt_type ioapic_level_type = {
+ static struct hw_interrupt_type ioapic_level_type __read_mostly = {
        .typename = "IO-APIC-level",
        .startup        = startup_level_ioapic,
        .shutdown       = shutdown_level_ioapic,
        .disable        = disable_level_ioapic,
        .ack            = mask_and_ack_level_ioapic,
        .end            = end_level_ioapic,
+ #ifdef CONFIG_SMP
        .set_affinity = set_ioapic_affinity,
+ #endif
  };
  
  static inline void init_IO_APIC_traps(void)
@@@ -1506,7 -1529,7 +1529,7 @@@ static void ack_lapic_irq (unsigned in
  
  static void end_lapic_irq (unsigned int i) { /* nothing */ }
  
- static struct hw_interrupt_type lapic_irq_type = {
+ static struct hw_interrupt_type lapic_irq_type __read_mostly = {
        .typename = "local-APIC-edge",
        .startup = NULL, /* startup_irq() not used for IRQ0 */
        .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
@@@ -1841,7 -1864,7 +1864,7 @@@ device_initcall(ioapic_init_sysfs)
                            ACPI-based IOAPIC Configuration
     -------------------------------------------------------------------------- */
  
 -#ifdef CONFIG_ACPI_BOOT
 +#ifdef CONFIG_ACPI
  
  #define IO_APIC_MAX_ID                0xFE
  
@@@ -1918,12 -1941,13 +1941,13 @@@ int io_apic_set_pci_routing (int ioapic
        spin_lock_irqsave(&ioapic_lock, flags);
        io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
        io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
+       set_native_irq_info(use_pci_vector() ?  entry.vector : irq, TARGET_CPUS);
        spin_unlock_irqrestore(&ioapic_lock, flags);
  
        return 0;
  }
  
 -#endif /*CONFIG_ACPI_BOOT*/
 +#endif /* CONFIG_ACPI */
  
  
  /*
   * we need to reprogram the ioredtbls to cater for the cpus which have come online
   * so mask in all cases should simply be TARGET_CPUS
   */
+ #ifdef CONFIG_SMP
  void __init setup_ioapic_dest(void)
  {
        int pin, ioapic, irq, irq_entry;
  
        }
  }
+ #endif
index cfcebc8ab7b48088713ece980e520f7b507159e1,b356f8e6adfe974f94cd890cd1ddd1d9cc17dd08..0511d8087910d077741a93ce811a4241ee67e0e2
   * Machine setup..
   */
  
- struct cpuinfo_x86 boot_cpu_data;
+ struct cpuinfo_x86 boot_cpu_data __read_mostly;
  
  unsigned long mmu_cr4_features;
  
  int acpi_disabled;
  EXPORT_SYMBOL(acpi_disabled);
 -#ifdef        CONFIG_ACPI_BOOT
 +#ifdef        CONFIG_ACPI
  extern int __initdata acpi_ht;
  extern acpi_interrupt_flags   acpi_sci_flags;
  int __initdata acpi_force = 0;
@@@ -294,7 -294,7 +294,7 @@@ static __init void parse_cmdline_early 
                        maxcpus = simple_strtoul(from + 8, NULL, 0);
                }
  #endif
 -#ifdef CONFIG_ACPI_BOOT
 +#ifdef CONFIG_ACPI
                /* "acpi=off" disables both ACPI table parsing and interpreter init */
                if (!memcmp(from, "acpi=off", 8))
                        disable_acpi();
@@@ -566,7 -566,7 +566,7 @@@ void __init setup_arch(char **cmdline_p
  
        init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
  
 -#ifdef CONFIG_ACPI_BOOT
 +#ifdef CONFIG_ACPI
        /*
         * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
         * Call this early for SRAT node setup.
  
        check_ioapic();
  
 -#ifdef CONFIG_ACPI_BOOT
 +#ifdef CONFIG_ACPI
        /*
         * Read APIC and some other early information from ACPI tables.
         */
diff --combined drivers/char/hpet.c
index 97feb7f7024d83e4180ba2477f89f7fa59445b5f,a695f25e4497b647cb20c776f1f2f0a0e6b4d545..5fe8461271fc35812222bd2b2e2f8e138d1ffc02
@@@ -44,7 -44,7 +44,7 @@@
  /*
   * The High Precision Event Timer driver.
   * This driver is closely modelled after the rtc.c driver.
-  * http://www.intel.com/labs/platcomp/hpet/hpetspec.htm
+  * http://www.intel.com/hardwaredesign/hpetspec.htm
   */
  #define       HPET_USER_FREQ  (64)
  #define       HPET_DRIFT      (500)
@@@ -712,7 -712,7 +712,7 @@@ static void hpet_register_interpolator(
        ti->shift = 10;
        ti->addr = &hpetp->hp_hpet->hpet_mc;
        ti->frequency = hpet_time_div(hpets->hp_period);
-       ti->drift = ti->frequency * HPET_DRIFT / 1000000;
+       ti->drift = HPET_DRIFT;
        ti->mask = -1;
  
        hpetp->hp_interpolator = ti;
@@@ -906,15 -906,11 +906,15 @@@ static acpi_status hpet_resources(struc
                if (irqp->number_of_interrupts > 0) {
                        hdp->hd_nirqs = irqp->number_of_interrupts;
  
 -                      for (i = 0; i < hdp->hd_nirqs; i++)
 -                              hdp->hd_irq[i] =
 +                      for (i = 0; i < hdp->hd_nirqs; i++) {
 +                              int rc =
                                    acpi_register_gsi(irqp->interrupts[i],
                                                      irqp->edge_level,
                                                      irqp->active_high_low);
 +                              if (rc < 0)
 +                                      return AE_ERROR;
 +                              hdp->hd_irq[i] = rc;
 +                      }
                }
        }
  
index adbec73b80a65f9e733934c765806eed906bc08c,1abec687865cf5b4862d26f4552abc39e748094d..278f841049968c15867d25c34e2f17772d375829
  # endif
  static inline void add_usec_to_timer(struct timer_list *t, long v)
  {
-       t->sub_expires += nsec_to_arch_cycle(v * 1000);
-       while (t->sub_expires >= arch_cycles_per_jiffy)
+       t->arch_cycle_expires += nsec_to_arch_cycle(v * 1000);
+       while (t->arch_cycle_expires >= arch_cycles_per_jiffy)
        {
                t->expires++;
-               t->sub_expires -= arch_cycles_per_jiffy;
+               t->arch_cycle_expires -= arch_cycles_per_jiffy;
        }
  }
  #endif
@@@ -75,8 -75,7 +75,7 @@@
  #include <asm/io.h>
  #include "ipmi_si_sm.h"
  #include <linux/init.h>
- #define IPMI_SI_VERSION "v33"
+ #include <linux/dmi.h>
  
  /* Measure times between events in the driver. */
  #undef DEBUG_TIMING
@@@ -109,6 -108,21 +108,21 @@@ enum si_type 
      SI_KCS, SI_SMIC, SI_BT
  };
  
+ struct ipmi_device_id {
+       unsigned char device_id;
+       unsigned char device_revision;
+       unsigned char firmware_revision_1;
+       unsigned char firmware_revision_2;
+       unsigned char ipmi_version;
+       unsigned char additional_device_support;
+       unsigned char manufacturer_id[3];
+       unsigned char product_id[2];
+       unsigned char aux_firmware_revision[4];
+ } __attribute__((packed));
+ #define ipmi_version_major(v) ((v)->ipmi_version & 0xf)
+ #define ipmi_version_minor(v) ((v)->ipmi_version >> 4)
  struct smi_info
  {
        ipmi_smi_t             intf;
        void (*irq_cleanup)(struct smi_info *info);
        unsigned int io_size;
  
+       /* Per-OEM handler, called from handle_flags().
+          Returns 1 when handle_flags() needs to be re-run
+          or 0 indicating it set si_state itself.
+       */
+       int (*oem_data_avail_handler)(struct smi_info *smi_info);
        /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
           is set to hold the flags until we are done handling everything
           from the flags. */
  #define RECEIVE_MSG_AVAIL     0x01
  #define EVENT_MSG_BUFFER_FULL 0x02
  #define WDT_PRE_TIMEOUT_INT   0x08
+ #define OEM0_DATA_AVAIL     0x20
+ #define OEM1_DATA_AVAIL     0x40
+ #define OEM2_DATA_AVAIL     0x80
+ #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
+                              OEM1_DATA_AVAIL | \
+                              OEM2_DATA_AVAIL)
        unsigned char       msg_flags;
  
        /* If set to true, this will request events the next time the
           interrupts. */
        int interrupt_disabled;
  
-       unsigned char ipmi_si_dev_rev;
-       unsigned char ipmi_si_fw_rev_major;
-       unsigned char ipmi_si_fw_rev_minor;
-       unsigned char ipmi_version_major;
-       unsigned char ipmi_version_minor;
+       struct ipmi_device_id device_id;
  
        /* Slave address, could be reported from DMI. */
        unsigned char slave_addr;
@@@ -245,7 -267,7 +267,7 @@@ static enum si_sm_result start_next_msg
                entry = smi_info->xmit_msgs.next;
        }
  
-       if (!entry) {
+       if (! entry) {
                smi_info->curr_msg = NULL;
                rv = SI_SM_IDLE;
        } else {
@@@ -306,7 -328,7 +328,7 @@@ static void start_clear_flags(struct sm
     memory, we will re-enable the interrupt. */
  static inline void disable_si_irq(struct smi_info *smi_info)
  {
-       if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
+       if ((smi_info->irq) && (! smi_info->interrupt_disabled)) {
                disable_irq_nosync(smi_info->irq);
                smi_info->interrupt_disabled = 1;
        }
@@@ -322,6 -344,7 +344,7 @@@ static inline void enable_si_irq(struc
  
  static void handle_flags(struct smi_info *smi_info)
  {
+  retry:
        if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
                /* Watchdog pre-timeout */
                spin_lock(&smi_info->count_lock);
        } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
                /* Messages available. */
                smi_info->curr_msg = ipmi_alloc_smi_msg();
-               if (!smi_info->curr_msg) {
+               if (! smi_info->curr_msg) {
                        disable_si_irq(smi_info);
                        smi_info->si_state = SI_NORMAL;
                        return;
        } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
                /* Events available. */
                smi_info->curr_msg = ipmi_alloc_smi_msg();
-               if (!smi_info->curr_msg) {
+               if (! smi_info->curr_msg) {
                        disable_si_irq(smi_info);
                        smi_info->si_state = SI_NORMAL;
                        return;
                        smi_info->curr_msg->data,
                        smi_info->curr_msg->data_size);
                smi_info->si_state = SI_GETTING_EVENTS;
+       } else if (smi_info->msg_flags & OEM_DATA_AVAIL) {
+               if (smi_info->oem_data_avail_handler)
+                       if (smi_info->oem_data_avail_handler(smi_info))
+                               goto retry;
        } else {
                smi_info->si_state = SI_NORMAL;
        }
@@@ -387,7 -414,7 +414,7 @@@ static void handle_transaction_done(str
  #endif
        switch (smi_info->si_state) {
        case SI_NORMAL:
-               if (!smi_info->curr_msg)
+               if (! smi_info->curr_msg)
                        break;
  
                smi_info->curr_msg->rsp_size
@@@ -761,18 -788,20 +788,20 @@@ static void si_restart_short_timer(stru
  #if defined(CONFIG_HIGH_RES_TIMERS)
        unsigned long flags;
        unsigned long jiffies_now;
+       unsigned long seq;
  
        if (del_timer(&(smi_info->si_timer))) {
                /* If we don't delete the timer, then it will go off
                   immediately, anyway.  So we only process if we
                   actually delete the timer. */
  
-               /* We already have irqsave on, so no need for it
-                    here. */
-               read_lock(&xtime_lock);
-               jiffies_now = jiffies;
-               smi_info->si_timer.expires = jiffies_now;
-               smi_info->si_timer.sub_expires = get_arch_cycles(jiffies_now);
+               do {
+                       seq = read_seqbegin_irqsave(&xtime_lock, flags);
+                       jiffies_now = jiffies;
+                       smi_info->si_timer.expires = jiffies_now;
+                       smi_info->si_timer.arch_cycle_expires
+                               = get_arch_cycles(jiffies_now);
+               } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
  
                add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
  
@@@ -826,15 -855,19 +855,19 @@@ static void smi_timeout(unsigned long d
        /* If the state machine asks for a short delay, then shorten
             the timer timeout. */
        if (smi_result == SI_SM_CALL_WITH_DELAY) {
+ #if defined(CONFIG_HIGH_RES_TIMERS)
+               unsigned long seq;
+ #endif
                spin_lock_irqsave(&smi_info->count_lock, flags);
                smi_info->short_timeouts++;
                spin_unlock_irqrestore(&smi_info->count_lock, flags);
  #if defined(CONFIG_HIGH_RES_TIMERS)
-               read_lock(&xtime_lock);
-                 smi_info->si_timer.expires = jiffies;
-                 smi_info->si_timer.sub_expires
-                         = get_arch_cycles(smi_info->si_timer.expires);
-                 read_unlock(&xtime_lock);
+               do {
+                       seq = read_seqbegin_irqsave(&xtime_lock, flags);
+                       smi_info->si_timer.expires = jiffies;
+                       smi_info->si_timer.arch_cycle_expires
+                               = get_arch_cycles(smi_info->si_timer.expires);
+               } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
                add_usec_to_timer(&smi_info->si_timer, SI_SHORT_TIMEOUT_USEC);
  #else
                smi_info->si_timer.expires = jiffies + 1;
                spin_unlock_irqrestore(&smi_info->count_lock, flags);
                smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
  #if defined(CONFIG_HIGH_RES_TIMERS)
-               smi_info->si_timer.sub_expires = 0;
+               smi_info->si_timer.arch_cycle_expires = 0;
  #endif
        }
  
@@@ -986,7 -1019,7 +1019,7 @@@ MODULE_PARM_DESC(slave_addrs, "Set the 
  #define IPMI_MEM_ADDR_SPACE 1
  #define IPMI_IO_ADDR_SPACE  2
  
 -#if defined(CONFIG_ACPI_INTERPRETER) || defined(CONFIG_X86) || defined(CONFIG_PCI)
 +#if defined(CONFIG_ACPI) || defined(CONFIG_X86) || defined(CONFIG_PCI)
  static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr)
  {
        int i;
@@@ -1014,7 -1047,7 +1047,7 @@@ static int std_irq_setup(struct smi_inf
  {
        int rv;
  
-       if (!info->irq)
+       if (! info->irq)
                return 0;
  
        if (info->si_type == SI_BT) {
                                 SA_INTERRUPT,
                                 DEVICE_NAME,
                                 info);
-               if (!rv)
+               if (! rv)
                        /* Enable the interrupt in the BT interface. */
                        info->io.outputb(&info->io, IPMI_BT_INTMASK_REG,
                                         IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
  
  static void std_irq_cleanup(struct smi_info *info)
  {
-       if (!info->irq)
+       if (! info->irq)
                return;
  
        if (info->si_type == SI_BT)
@@@ -1121,7 -1154,7 +1154,7 @@@ static int port_setup(struct smi_info *
        unsigned int *addr = info->io.info;
        int           mapsize;
  
-       if (!addr || (!*addr))
+       if (! addr || (! *addr))
                return -ENODEV;
  
        info->io_cleanup = port_cleanup;
@@@ -1164,15 -1197,15 +1197,15 @@@ static int try_init_port(int intf_num, 
  {
        struct smi_info *info;
  
-       if (!ports[intf_num])
+       if (! ports[intf_num])
                return -ENODEV;
  
-       if (!is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
+       if (! is_new_interface(intf_num, IPMI_IO_ADDR_SPACE,
                              ports[intf_num]))
                return -ENODEV;
  
        info = kmalloc(sizeof(*info), GFP_KERNEL);
-       if (!info) {
+       if (! info) {
                printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n");
                return -ENOMEM;
        }
        info->io.info = &(ports[intf_num]);
        info->io.addr = NULL;
        info->io.regspacing = regspacings[intf_num];
-       if (!info->io.regspacing)
+       if (! info->io.regspacing)
                info->io.regspacing = DEFAULT_REGSPACING;
        info->io.regsize = regsizes[intf_num];
-       if (!info->io.regsize)
+       if (! info->io.regsize)
                info->io.regsize = DEFAULT_REGSPACING;
        info->io.regshift = regshifts[intf_num];
        info->irq = 0;
@@@ -1270,7 -1303,7 +1303,7 @@@ static int mem_setup(struct smi_info *i
        unsigned long *addr = info->io.info;
        int           mapsize;
  
-       if (!addr || (!*addr))
+       if (! addr || (! *addr))
                return -ENODEV;
  
        info->io_cleanup = mem_cleanup;
@@@ -1325,15 -1358,15 +1358,15 @@@ static int try_init_mem(int intf_num, s
  {
        struct smi_info *info;
  
-       if (!addrs[intf_num])
+       if (! addrs[intf_num])
                return -ENODEV;
  
-       if (!is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
+       if (! is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE,
                              addrs[intf_num]))
                return -ENODEV;
  
        info = kmalloc(sizeof(*info), GFP_KERNEL);
-       if (!info) {
+       if (! info) {
                printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n");
                return -ENOMEM;
        }
        info->io.info = &addrs[intf_num];
        info->io.addr = NULL;
        info->io.regspacing = regspacings[intf_num];
-       if (!info->io.regspacing)
+       if (! info->io.regspacing)
                info->io.regspacing = DEFAULT_REGSPACING;
        info->io.regsize = regsizes[intf_num];
-       if (!info->io.regsize)
+       if (! info->io.regsize)
                info->io.regsize = DEFAULT_REGSPACING;
        info->io.regshift = regshifts[intf_num];
        info->irq = 0;
  }
  
  
 -#ifdef CONFIG_ACPI_INTERPRETER
 +#ifdef CONFIG_ACPI
  
  #include <linux/acpi.h>
  
@@@ -1404,7 -1437,7 +1437,7 @@@ static int acpi_gpe_irq_setup(struct sm
  {
        acpi_status status;
  
-       if (!info->irq)
+       if (! info->irq)
                return 0;
  
        /* FIXME - is level triggered right? */
  
  static void acpi_gpe_irq_cleanup(struct smi_info *info)
  {
-       if (!info->irq)
+       if (! info->irq)
                return;
  
        acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe);
@@@ -1484,9 -1517,6 +1517,9 @@@ static int try_init_acpi(int intf_num, 
        char             *io_type;
        u8               addr_space;
  
 +      if (acpi_disabled)
 +              return -ENODEV;
 +
        if (acpi_failure)
                return -ENODEV;
  
                addr_space = IPMI_MEM_ADDR_SPACE;
        else
                addr_space = IPMI_IO_ADDR_SPACE;
-       if (!is_new_interface(-1, addr_space, spmi->addr.address))
+       if (! is_new_interface(-1, addr_space, spmi->addr.address))
                return -ENODEV;
  
-       if (!spmi->addr.register_bit_width) {
+       if (! spmi->addr.register_bit_width) {
                acpi_failure = 1;
                return -ENODEV;
        }
        }
  
        info = kmalloc(sizeof(*info), GFP_KERNEL);
-       if (!info) {
+       if (! info) {
                printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
                return -ENOMEM;
        }
@@@ -1613,22 -1643,15 +1646,15 @@@ typedef struct dmi_ipmi_dat
  static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS];
  static int dmi_data_entries;
  
- typedef struct dmi_header
- {
-       u8      type;
-       u8      length;
-       u16     handle;
- } dmi_header_t;
- static int decode_dmi(dmi_header_t __iomem *dm, int intf_num)
+ static int __init decode_dmi(struct dmi_header *dm, int intf_num)
  {
-       u8              __iomem *data = (u8 __iomem *)dm;
+       u8              *data = (u8 *)dm;
        unsigned long   base_addr;
        u8              reg_spacing;
-       u8              len = readb(&dm->length);
+       u8              len = dm->length;
        dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
  
-       ipmi_data->type = readb(&data[4]);
+       ipmi_data->type = data[4];
  
        memcpy(&base_addr, data+8, sizeof(unsigned long));
        if (len >= 0x11) {
                }
                /* If bit 4 of byte 0x10 is set, then the lsb for the address
                   is odd. */
-               ipmi_data->base_addr = base_addr | ((readb(&data[0x10]) & 0x10) >> 4);
+               ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
  
-               ipmi_data->irq = readb(&data[0x11]);
+               ipmi_data->irq = data[0x11];
  
                /* The top two bits of byte 0x10 hold the register spacing. */
-               reg_spacing = (readb(&data[0x10]) & 0xC0) >> 6;
+               reg_spacing = (data[0x10] & 0xC0) >> 6;
                switch(reg_spacing){
                case 0x00: /* Byte boundaries */
                    ipmi_data->offset = 1;
                ipmi_data->offset = 1;
        }
  
-       ipmi_data->slave_addr = readb(&data[6]);
+       ipmi_data->slave_addr = data[6];
  
        if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) {
                dmi_data_entries++;
        return -1;
  }
  
- static int dmi_table(u32 base, int len, int num)
+ static void __init dmi_find_bmc(void)
  {
-       u8                __iomem *buf;
-       struct dmi_header __iomem *dm;
-       u8                __iomem *data;
-       int               i=1;
-       int               status=-1;
+       struct dmi_device *dev = NULL;
        int               intf_num = 0;
  
-       buf = ioremap(base, len);
-       if(buf==NULL)
-               return -1;
-       data = buf;
-       while(i<num && (data - buf) < len)
-       {
-               dm=(dmi_header_t __iomem *)data;
-               if((data-buf+readb(&dm->length)) >= len)
-                       break;
-               if (readb(&dm->type) == 38) {
-                       if (decode_dmi(dm, intf_num) == 0) {
-                               intf_num++;
-                               if (intf_num >= SI_MAX_DRIVERS)
-                                       break;
-                       }
-               }
-               data+=readb(&dm->length);
-               while((data-buf) < len && (readb(data)||readb(data+1)))
-                       data++;
-               data+=2;
-               i++;
-       }
-       iounmap(buf);
-       return status;
- }
- static inline int dmi_checksum(u8 *buf)
- {
-       u8   sum=0;
-       int  a;
-       for(a=0; a<15; a++)
-               sum+=buf[a];
-       return (sum==0);
- }
- static int dmi_decode(void)
- {
-       u8   buf[15];
-       u32  fp=0xF0000;
- #ifdef CONFIG_SIMNOW
-       return -1;
- #endif
-       while(fp < 0xFFFFF)
-       {
-               isa_memcpy_fromio(buf, fp, 15);
-               if(memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf))
-               {
-                       u16 num=buf[13]<<8|buf[12];
-                       u16 len=buf[7]<<8|buf[6];
-                       u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
+       while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) {
+               if (intf_num >= SI_MAX_DRIVERS)
+                       break;
  
-                       if(dmi_table(base, len, num) == 0)
-                               return 0;
-               }
-               fp+=16;
+               decode_dmi((struct dmi_header *) dev->device_data, intf_num++);
        }
-       return -1;
  }
  
  static int try_init_smbios(int intf_num, struct smi_info **new_info)
  {
-       struct smi_info   *info;
-       dmi_ipmi_data_t   *ipmi_data = dmi_data+intf_num;
-       char              *io_type;
+       struct smi_info *info;
+       dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num;
+       char            *io_type;
  
        if (intf_num >= dmi_data_entries)
                return -ENODEV;
  
-       switch(ipmi_data->type) {
+       switch (ipmi_data->type) {
                case 0x01: /* KCS */
                        si_type[intf_num] = "kcs";
                        break;
        }
  
        info = kmalloc(sizeof(*info), GFP_KERNEL);
-       if (!info) {
+       if (! info) {
                printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n");
                return -ENOMEM;
        }
  
        regspacings[intf_num] = ipmi_data->offset;
        info->io.regspacing = regspacings[intf_num];
-       if (!info->io.regspacing)
+       if (! info->io.regspacing)
                info->io.regspacing = DEFAULT_REGSPACING;
        info->io.regsize = DEFAULT_REGSPACING;
        info->io.regshift = regshifts[intf_num];
@@@ -1856,14 -1814,14 +1817,14 @@@ static int find_pci_smic(int intf_num, 
  
        pci_smic_checked = 1;
  
-       if ((pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID,
-                                      NULL)))
-               ;
-       else if ((pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL)) &&
-                pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)
-               fe_rmc = 1;
-       else
-               return -ENODEV;
+       pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, NULL);
+       if (! pci_dev) {
+               pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL);
+               if (pci_dev && (pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID))
+                       fe_rmc = 1;
+               else
+                       return -ENODEV;
+       }
  
        error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr);
        if (error)
        }
  
        /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */
-       if (!(base_addr & 0x0001))
+       if (! (base_addr & 0x0001))
        {
                pci_dev_put(pci_dev);
                printk(KERN_ERR
        }
  
        base_addr &= 0xFFFE;
-       if (!fe_rmc)
+       if (! fe_rmc)
                /* Data register starts at base address + 1 in eRMC */
                ++base_addr;
  
-       if (!is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
+       if (! is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) {
                pci_dev_put(pci_dev);
                return -ENODEV;
        }
  
        info = kmalloc(sizeof(*info), GFP_KERNEL);
-       if (!info) {
+       if (! info) {
                pci_dev_put(pci_dev);
                printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n");
                return -ENOMEM;
        ports[intf_num] = base_addr;
        info->io.info = &(ports[intf_num]);
        info->io.regspacing = regspacings[intf_num];
-       if (!info->io.regspacing)
+       if (! info->io.regspacing)
                info->io.regspacing = DEFAULT_REGSPACING;
        info->io.regsize = DEFAULT_REGSPACING;
        info->io.regshift = regshifts[intf_num];
  static int try_init_plug_and_play(int intf_num, struct smi_info **new_info)
  {
  #ifdef CONFIG_PCI
-       if (find_pci_smic(intf_num, new_info)==0)
+       if (find_pci_smic(intf_num, new_info) == 0)
                return 0;
  #endif
        /* Include other methods here. */
@@@ -1946,7 -1904,7 +1907,7 @@@ static int try_get_dev_id(struct smi_in
        int               rv = 0;
  
        resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
-       if (!resp)
+       if (! resp)
                return -ENOMEM;
  
        /* Do a Get Device ID command, since it comes back with some
        }
  
        /* Record info from the get device id, in case we need it. */
-       smi_info->ipmi_si_dev_rev = resp[4] & 0xf;
-       smi_info->ipmi_si_fw_rev_major = resp[5] & 0x7f;
-       smi_info->ipmi_si_fw_rev_minor = resp[6];
-       smi_info->ipmi_version_major = resp[7] & 0xf;
-       smi_info->ipmi_version_minor = resp[7] >> 4;
+       memcpy(&smi_info->device_id, &resp[3],
+              min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id)));
  
   out:
        kfree(resp);
@@@ -2031,7 -1986,7 +1989,7 @@@ static int stat_file_read_proc(char *pa
        struct smi_info *smi = data;
  
        out += sprintf(out, "interrupts_enabled:    %d\n",
-                      smi->irq && !smi->interrupt_disabled);
+                      smi->irq && ! smi->interrupt_disabled);
        out += sprintf(out, "short_timeouts:        %ld\n",
                       smi->short_timeouts);
        out += sprintf(out, "long_timeouts:         %ld\n",
        return (out - ((char *) page));
  }
  
+ /*
+  * oem_data_avail_to_receive_msg_avail
+  * @info - smi_info structure with msg_flags set
+  *
+  * Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
+  * Returns 1 indicating need to re-run handle_flags().
+  */
+ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
+ {
+       smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
+                               RECEIVE_MSG_AVAIL);
+       return 1;
+ }
+ /*
+  * setup_dell_poweredge_oem_data_handler
+  * @info - smi_info.device_id must be populated
+  *
+  * Systems that match, but have firmware version < 1.40 may assert
+  * OEM0_DATA_AVAIL on their own, without being told via Set Flags that
+  * it's safe to do so.  Such systems will de-assert OEM1_DATA_AVAIL
+  * upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
+  * as RECEIVE_MSG_AVAIL instead.
+  *
+  * As Dell has no plans to release IPMI 1.5 firmware that *ever*
+  * assert the OEM[012] bits, and if it did, the driver would have to
+  * change to handle that properly, we don't actually check for the
+  * firmware version.
+  * Device ID = 0x20                BMC on PowerEdge 8G servers
+  * Device Revision = 0x80
+  * Firmware Revision1 = 0x01       BMC version 1.40
+  * Firmware Revision2 = 0x40       BCD encoded
+  * IPMI Version = 0x51             IPMI 1.5
+  * Manufacturer ID = A2 02 00      Dell IANA
+  *
+  */
+ #define DELL_POWEREDGE_8G_BMC_DEVICE_ID  0x20
+ #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
+ #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
+ #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
+ static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
+ {
+       struct ipmi_device_id *id = &smi_info->device_id;
+       const char mfr[3]=DELL_IANA_MFR_ID;
+       if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))
+           && (id->device_id       == DELL_POWEREDGE_8G_BMC_DEVICE_ID)
+           && (id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV)
+           && (id->ipmi_version    == DELL_POWEREDGE_8G_BMC_IPMI_VERSION))
+       {
+               smi_info->oem_data_avail_handler =
+                       oem_data_avail_to_receive_msg_avail;
+       }
+ }
+ /*
+  * setup_oem_data_handler
+  * @info - smi_info.device_id must be filled in already
+  *
+  * Fills in smi_info.device_id.oem_data_available_handler
+  * when we know what function to use there.
+  */
+ static void setup_oem_data_handler(struct smi_info *smi_info)
+ {
+       setup_dell_poweredge_oem_data_handler(smi_info);
+ }
  /* Returns 0 if initialized, or negative on an error. */
  static int init_one_smi(int intf_num, struct smi_info **smi)
  {
        rv = try_init_mem(intf_num, &new_smi);
        if (rv)
                rv = try_init_port(intf_num, &new_smi);
 -#ifdef CONFIG_ACPI_INTERPRETER
 +#ifdef CONFIG_ACPI
-       if ((rv) && (si_trydefaults)) {
+       if (rv && si_trydefaults)
                rv = try_init_acpi(intf_num, &new_smi);
-       }
  #endif
  #ifdef CONFIG_X86
-       if ((rv) && (si_trydefaults)) {
+       if (rv && si_trydefaults)
                rv = try_init_smbios(intf_num, &new_smi);
-         }
  #endif
-       if ((rv) && (si_trydefaults)) {
+       if (rv && si_trydefaults)
                rv = try_init_plug_and_play(intf_num, &new_smi);
-       }
  
        if (rv)
                return rv;
        new_smi->si_sm = NULL;
        new_smi->handlers = NULL;
  
-       if (!new_smi->irq_setup) {
+       if (! new_smi->irq_setup) {
                new_smi->irq = irqs[intf_num];
                new_smi->irq_setup = std_irq_setup;
                new_smi->irq_cleanup = std_irq_cleanup;
  
        /* Allocate the state machine's data and initialize it. */
        new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
-       if (!new_smi->si_sm) {
+       if (! new_smi->si_sm) {
                printk(" Could not allocate state machine memory\n");
                rv = -ENOMEM;
                goto out_err;
        if (rv)
                goto out_err;
  
+       setup_oem_data_handler(new_smi);
        /* Try to claim any interrupts. */
        new_smi->irq_setup(new_smi);
  
  
        rv = ipmi_register_smi(&handlers,
                               new_smi,
-                              new_smi->ipmi_version_major,
-                              new_smi->ipmi_version_minor,
+                              ipmi_version_major(&new_smi->device_id),
+                              ipmi_version_minor(&new_smi->device_id),
                               new_smi->slave_addr,
                               &(new_smi->intf));
        if (rv) {
  
        /* Wait for the timer to stop.  This avoids problems with race
           conditions removing the timer here. */
-       while (!new_smi->timer_stopped) {
+       while (! new_smi->timer_stopped) {
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(1);
        }
@@@ -2273,7 -2293,7 +2296,7 @@@ static __init int init_ipmi_si(void
        /* Parse out the si_type string into its components. */
        str = si_type_str;
        if (*str != '\0') {
-               for (i=0; (i<SI_MAX_PARMS) && (*str != '\0'); i++) {
+               for (i = 0; (i < SI_MAX_PARMS) && (*str != '\0'); i++) {
                        si_type[i] = str;
                        str = strchr(str, ',');
                        if (str) {
                }
        }
  
-       printk(KERN_INFO "IPMI System Interface driver version "
-              IPMI_SI_VERSION);
-       if (kcs_smi_handlers.version)
-               printk(", KCS version %s", kcs_smi_handlers.version);
-       if (smic_smi_handlers.version)
-               printk(", SMIC version %s", smic_smi_handlers.version);
-       if (bt_smi_handlers.version)
-               printk(", BT version %s", bt_smi_handlers.version);
-       printk("\n");
+       printk(KERN_INFO "IPMI System Interface driver.\n");
  
  #ifdef CONFIG_X86
-       dmi_decode();
+       dmi_find_bmc();
  #endif
  
        rv = init_one_smi(0, &(smi_infos[pos]));
-       if (rv && !ports[0] && si_trydefaults) {
+       if (rv && ! ports[0] && si_trydefaults) {
                /* If we are trying defaults and the initial port is
                     not set, then set it. */
                si_type[0] = "kcs";
        if (rv == 0)
                pos++;
  
-       for (i=1; i < SI_MAX_PARMS; i++) {
+       for (i = 1; i < SI_MAX_PARMS; i++) {
                rv = init_one_smi(i, &(smi_infos[pos]));
                if (rv == 0)
                        pos++;
@@@ -2364,14 -2376,14 +2379,14 @@@ static void __exit cleanup_one_si(struc
  
        /* Wait for the timer to stop.  This avoids problems with race
           conditions removing the timer here. */
-       while (!to_clean->timer_stopped) {
+       while (! to_clean->timer_stopped) {
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(1);
        }
  
        /* Interrupts and timeouts are stopped, now make sure the
           interface is in a clean state. */
-       while ((to_clean->curr_msg) || (to_clean->si_state != SI_NORMAL)) {
+       while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                poll(to_clean);
                set_current_state(TASK_UNINTERRUPTIBLE);
                schedule_timeout(1);
@@@ -2395,13 -2407,15 +2410,15 @@@ static __exit void cleanup_ipmi_si(void
  {
        int i;
  
-       if (!initialized)
+       if (! initialized)
                return;
  
-       for (i=0; i<SI_MAX_DRIVERS; i++) {
+       for (i = 0; i < SI_MAX_DRIVERS; i++) {
                cleanup_one_si(smi_infos[i]);
        }
  }
  module_exit(cleanup_ipmi_si);
  
  MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
+ MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
index 6db549c9480c0a8f7a034be65d1127563cbbdf18,675b76a42403fdf8b39f01ab28b65ba2408fa519..416d30debe6c97a12582c7646d3bb6d3eab46ec8
@@@ -73,35 -73,25 +73,35 @@@ static void decode_irq_flags(int flag, 
  }
  
  static void
 -pnpacpi_parse_allocated_irqresource(struct pnp_resource_table * res, int irq)
 +pnpacpi_parse_allocated_irqresource(struct pnp_resource_table * res, u32 gsi,
 +      int edge_level, int active_high_low)
  {
        int i = 0;
 +      int irq;
 +
 +      if (!valid_IRQ(gsi))
 +              return;
 +
        while (!(res->irq_resource[i].flags & IORESOURCE_UNSET) &&
                        i < PNP_MAX_IRQ)
                i++;
 -      if (i < PNP_MAX_IRQ) {
 -              res->irq_resource[i].flags = IORESOURCE_IRQ;  //Also clears _UNSET flag
 -              if (irq == -1) {
 -                      res->irq_resource[i].flags |= IORESOURCE_DISABLED;
 -                      return;
 -              }
 -              res->irq_resource[i].start =(unsigned long) irq;
 -              res->irq_resource[i].end = (unsigned long) irq;
 +      if (i >= PNP_MAX_IRQ)
 +              return;
 +
 +      res->irq_resource[i].flags = IORESOURCE_IRQ;  // Also clears _UNSET flag
 +      irq = acpi_register_gsi(gsi, edge_level, active_high_low);
 +      if (irq < 0) {
 +              res->irq_resource[i].flags |= IORESOURCE_DISABLED;
 +              return;
        }
 +
 +      res->irq_resource[i].start = irq;
 +      res->irq_resource[i].end = irq;
 +      pcibios_penalize_isa_irq(irq, 1);
  }
  
  static void
 -pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table * res, int dma)
 +pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table * res, u32 dma)
  {
        int i = 0;
        while (i < PNP_MAX_DMA &&
                        res->dma_resource[i].flags |= IORESOURCE_DISABLED;
                        return;
                }
 -              res->dma_resource[i].start =(unsigned long) dma;
 -              res->dma_resource[i].end = (unsigned long) dma;
 +              res->dma_resource[i].start = dma;
 +              res->dma_resource[i].end = dma;
        }
  }
  
  static void
  pnpacpi_parse_allocated_ioresource(struct pnp_resource_table * res,
 -      int io, int len)
 +      u32 io, u32 len)
  {
        int i = 0;
        while (!(res->port_resource[i].flags & IORESOURCE_UNSET) &&
                        res->port_resource[i].flags |= IORESOURCE_DISABLED;
                        return;
                }
 -              res->port_resource[i].start = (unsigned long) io;
 -              res->port_resource[i].end = (unsigned long)(io + len - 1);
 +              res->port_resource[i].start = io;
 +              res->port_resource[i].end = io + len - 1;
        }
  }
  
  static void
  pnpacpi_parse_allocated_memresource(struct pnp_resource_table * res,
 -      int mem, int len)
 +      u64 mem, u64 len)
  {
        int i = 0;
        while (!(res->mem_resource[i].flags & IORESOURCE_UNSET) &&
                        res->mem_resource[i].flags |= IORESOURCE_DISABLED;
                        return;
                }
 -              res->mem_resource[i].start = (unsigned long) mem;
 -              res->mem_resource[i].end = (unsigned long)(mem + len - 1);
 +              res->mem_resource[i].start = mem;
 +              res->mem_resource[i].end = mem + len - 1;
        }
  }
  
@@@ -161,28 -151,27 +161,28 @@@ static acpi_status pnpacpi_allocated_re
        void *data)
  {
        struct pnp_resource_table * res_table = (struct pnp_resource_table *)data;
 +      int i;
  
        switch (res->id) {
        case ACPI_RSTYPE_IRQ:
 -              if ((res->data.irq.number_of_interrupts > 0) &&
 -                      valid_IRQ(res->data.irq.interrupts[0])) {
 -                      pnpacpi_parse_allocated_irqresource(res_table, 
 -                              acpi_register_gsi(res->data.irq.interrupts[0],
 -                                      res->data.irq.edge_level,
 -                                      res->data.irq.active_high_low));
 -                      pcibios_penalize_isa_irq(res->data.irq.interrupts[0], 1);
 +              /*
 +               * Per spec, only one interrupt per descriptor is allowed in
 +               * _CRS, but some firmware violates this, so parse them all.
 +               */
 +              for (i = 0; i < res->data.irq.number_of_interrupts; i++) {
 +                      pnpacpi_parse_allocated_irqresource(res_table,
 +                              res->data.irq.interrupts[i],
 +                              res->data.irq.edge_level,
 +                              res->data.irq.active_high_low);
                }
                break;
  
        case ACPI_RSTYPE_EXT_IRQ:
 -              if ((res->data.extended_irq.number_of_interrupts > 0) &&
 -                      valid_IRQ(res->data.extended_irq.interrupts[0])) {
 -                      pnpacpi_parse_allocated_irqresource(res_table, 
 -                              acpi_register_gsi(res->data.extended_irq.interrupts[0],
 -                                      res->data.extended_irq.edge_level,
 -                                      res->data.extended_irq.active_high_low));
 -                      pcibios_penalize_isa_irq(res->data.extended_irq.interrupts[0], 1);
 +              for (i = 0; i < res->data.extended_irq.number_of_interrupts; i++) {
 +                      pnpacpi_parse_allocated_irqresource(res_table,
 +                              res->data.extended_irq.interrupts[i],
 +                              res->data.extended_irq.edge_level,
 +                              res->data.extended_irq.active_high_low);
                }
                break;
        case ACPI_RSTYPE_DMA:
@@@ -255,7 -244,7 +255,7 @@@ static void pnpacpi_parse_dma_option(st
  
        if (p->number_of_channels == 0)
                return;
-       dma = pnpacpi_kmalloc(sizeof(struct pnp_dma), GFP_KERNEL);
+       dma = kcalloc(1, sizeof(struct pnp_dma), GFP_KERNEL);
        if (!dma)
                return;
  
@@@ -311,7 -300,7 +311,7 @@@ static void pnpacpi_parse_irq_option(st
        
        if (p->number_of_interrupts == 0)
                return;
-       irq = pnpacpi_kmalloc(sizeof(struct pnp_irq), GFP_KERNEL);
+       irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
        if (!irq)
                return;
  
@@@ -332,7 -321,7 +332,7 @@@ static void pnpacpi_parse_ext_irq_optio
  
        if (p->number_of_interrupts == 0)
                return;
-       irq = pnpacpi_kmalloc(sizeof(struct pnp_irq), GFP_KERNEL);
+       irq = kcalloc(1, sizeof(struct pnp_irq), GFP_KERNEL);
        if (!irq)
                return;
  
@@@ -353,7 -342,7 +353,7 @@@ pnpacpi_parse_port_option(struct pnp_op
  
        if (io->range_length == 0)
                return;
-       port = pnpacpi_kmalloc(sizeof(struct pnp_port), GFP_KERNEL);
+       port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
        if (!port)
                return;
        port->min = io->min_base_address;
@@@ -374,7 -363,7 +374,7 @@@ pnpacpi_parse_fixed_port_option(struct 
  
        if (io->range_length == 0)
                return;
-       port = pnpacpi_kmalloc(sizeof(struct pnp_port), GFP_KERNEL);
+       port = kcalloc(1, sizeof(struct pnp_port), GFP_KERNEL);
        if (!port)
                return;
        port->min = port->max = io->base_address;
@@@ -393,7 -382,7 +393,7 @@@ pnpacpi_parse_mem24_option(struct pnp_o
  
        if (p->range_length == 0)
                return;
-       mem = pnpacpi_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL);
+       mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
        if (!mem)
                return;
        mem->min = p->min_base_address;
@@@ -416,7 -405,7 +416,7 @@@ pnpacpi_parse_mem32_option(struct pnp_o
  
        if (p->range_length == 0)
                return;
-       mem = pnpacpi_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL);
+       mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
        if (!mem)
                return;
        mem->min = p->min_base_address;
@@@ -439,7 -428,7 +439,7 @@@ pnpacpi_parse_fixed_mem32_option(struc
  
        if (p->range_length == 0)
                return;
-       mem = pnpacpi_kmalloc(sizeof(struct pnp_mem), GFP_KERNEL);
+       mem = kcalloc(1, sizeof(struct pnp_mem), GFP_KERNEL);
        if (!mem)
                return;
        mem->min = mem->max = p->range_base_address;
@@@ -623,7 -612,7 +623,7 @@@ int pnpacpi_build_resource_template(acp
        if (!res_cnt)
                return -EINVAL;
        buffer->length = sizeof(struct acpi_resource) * (res_cnt + 1) + 1;
-       buffer->pointer = pnpacpi_kmalloc(buffer->length - 1, GFP_KERNEL);
+       buffer->pointer = kcalloc(1, buffer->length - 1, GFP_KERNEL);
        if (!buffer->pointer)
                return -ENOMEM;
        pnp_dbg("Res cnt %d", res_cnt);
diff --combined drivers/serial/Kconfig
index db8f39c30096c50e2b8098dc089db293670bf7d1,e39818a34a0754a824df7bb2e3e46878704e18cd..b745a1b9e835258b947493bbb8f8e6ebdb3829e0
@@@ -80,7 -80,7 +80,7 @@@ config SERIAL_8250_C
  config SERIAL_8250_ACPI
        bool "8250/16550 device discovery via ACPI namespace"
        default y if IA64
 -      depends on ACPI_BUS && SERIAL_8250
 +      depends on ACPI && SERIAL_8250
        ---help---
          If you wish to enable serial port discovery via the ACPI
          namespace, say Y here.  If unsure, say N.
@@@ -308,7 -308,7 +308,7 @@@ config SERIAL_S3C2410_CONSOL
  
  config SERIAL_DZ
        bool "DECstation DZ serial driver"
-       depends on MACH_DECSTATION && MIPS32
+       depends on MACH_DECSTATION && 32BIT
        select SERIAL_CORE
        help
          DZ11-family serial controllers for VAXstations, including the
index b9e9f66d2721836729ccd5d7ab87c4db6b210edd,d84a9c326c2238e70cdc3159d18dd864a481fd01..64a0b8e6afeb27137c70b45ff981023ac2fad532
@@@ -11,6 -11,7 +11,7 @@@ extern int mp_bus_id_to_local [MAX_MP_B
  extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
  extern int mp_bus_id_to_pci_bus [MAX_MP_BUSSES];
  
+ extern unsigned int def_to_bigsmp;
  extern unsigned int boot_cpu_physical_apicid;
  extern int smp_found_config;
  extern void find_smp_config (void);
@@@ -26,14 -27,14 +27,14 @@@ extern unsigned long mp_lapic_addr
  extern int pic_mode;
  extern int using_apic_timer;
  
 -#ifdef CONFIG_ACPI_BOOT
 +#ifdef CONFIG_ACPI
  extern void mp_register_lapic (u8 id, u8 enabled);
  extern void mp_register_lapic_address (u64 address);
  extern void mp_register_ioapic (u8 id, u32 address, u32 gsi_base);
  extern void mp_override_legacy_irq (u8 bus_irq, u8 polarity, u8 trigger, u32 gsi);
  extern void mp_config_acpi_legacy_irqs (void);
  extern int mp_register_gsi (u32 gsi, int edge_level, int active_high_low);
 -#endif /*CONFIG_ACPI_BOOT*/
 +#endif /* CONFIG_ACPI */
  
  #define PHYSID_ARRAY_SIZE     BITS_TO_LONGS(MAX_APICS)
  
diff --combined kernel/power/Kconfig
index b99f61b82685b07b517c29fec4411ec7add0f4fa,c14cd99911818c893294c52afff301b58e729554..396c7873e804076a7dbdf718bcbbcc19bb94f475
@@@ -1,6 -1,5 +1,6 @@@
  config PM
        bool "Power Management support"
 +      depends on !IA64_HP_SIM
        ---help---
          "Power Management" means that parts of your computer are shut
          off or put into a power conserving "sleep" mode if they are not
@@@ -29,7 -28,7 +29,7 @@@ config PM_DEBU
  
  config SOFTWARE_SUSPEND
        bool "Software Suspend"
-       depends on EXPERIMENTAL && PM && SWAP && ((X86 && SMP) || ((FVR || PPC32 || X86) && !SMP))
+       depends on PM && SWAP && (X86 || ((FVR || PPC32) && !SMP))
        ---help---
          Enable the possibility of suspending the machine.
          It doesn't need APM.
@@@ -73,6 -72,18 +73,18 @@@ config PM_STD_PARTITIO
          suspended image to. It will simply pick the first available swap 
          device.
  
+ config SWSUSP_ENCRYPT
+       bool "Encrypt suspend image"
+       depends on SOFTWARE_SUSPEND && CRYPTO=y && (CRYPTO_AES=y || CRYPTO_AES_586=y || CRYPTO_AES_X86_64=y)
+       default ""
+       ---help---
+         To prevent data gathering from swap after resume you can encrypt
+         the suspend image with a temporary key that is deleted on
+         resume.
+         Note that the temporary key is stored unencrypted on disk while the
+         system is suspended.
  config SUSPEND_SMP
        bool
        depends on HOTPLUG_CPU && X86 && PM