Merge branches 'x86/apic', 'x86/asm', 'x86/cleanups', 'x86/debug', 'x86/kconfig'...
authorIngo Molnar <mingo@elte.hu>
Sat, 14 Mar 2009 15:25:40 +0000 (16:25 +0100)
committerIngo Molnar <mingo@elte.hu>
Sat, 14 Mar 2009 15:25:40 +0000 (16:25 +0100)
603 files changed:
Documentation/cputopology.txt
Documentation/filesystems/ext2.txt
Documentation/filesystems/ext3.txt
Documentation/hwmon/lm90
Documentation/kernel-parameters.txt
Documentation/x86/boot.txt
Documentation/x86/earlyprintk.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/alpha/include/asm/statfs.h
arch/alpha/include/asm/swab.h
arch/alpha/kernel/irq.c
arch/alpha/mm/init.c
arch/arm/include/asm/a.out.h
arch/arm/include/asm/setup.h
arch/arm/include/asm/swab.h
arch/arm/kernel/irq.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/oprofile/op_model_mpcore.c
arch/avr32/Kconfig
arch/avr32/include/asm/swab.h
arch/blackfin/include/asm/percpu.h
arch/blackfin/include/asm/swab.h
arch/blackfin/kernel/irqchip.c
arch/h8300/include/asm/swab.h
arch/ia64/include/asm/fpu.h
arch/ia64/include/asm/gcc_intrin.h
arch/ia64/include/asm/intrinsics.h
arch/ia64/include/asm/kvm.h
arch/ia64/include/asm/percpu.h
arch/ia64/include/asm/swab.h
arch/ia64/include/asm/topology.h
arch/ia64/include/asm/uv/uv.h [new file with mode: 0644]
arch/ia64/kernel/acpi.c
arch/ia64/kernel/iosapic.c
arch/ia64/kernel/irq.c
arch/ia64/kernel/irq_ia64.c
arch/ia64/kernel/msi_ia64.c
arch/ia64/kernel/vmlinux.lds.S
arch/ia64/sn/kernel/msi_sn.c
arch/mips/configs/ip27_defconfig
arch/mips/configs/jmr3927_defconfig
arch/mips/configs/rbtx49xx_defconfig
arch/mips/include/asm/hazards.h
arch/mips/include/asm/irq.h
arch/mips/include/asm/prefetch.h
arch/mips/include/asm/sigcontext.h
arch/mips/include/asm/swab.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/irq-gic.c
arch/mips/kernel/linux32.c
arch/mips/kernel/smtc.c
arch/mips/mm/page.c
arch/mips/mm/tlbex.c
arch/mips/mti-malta/malta-smtc.c
arch/mips/sgi-ip22/ip22-int.c
arch/mips/sgi-ip22/ip22-time.c
arch/mips/sibyte/bcm1480/smp.c
arch/mips/sibyte/sb1250/smp.c
arch/mn10300/kernel/mn10300-watchdog.c
arch/parisc/include/asm/pdc.h
arch/parisc/include/asm/swab.h
arch/parisc/kernel/irq.c
arch/powerpc/include/asm/bootx.h
arch/powerpc/include/asm/elf.h
arch/powerpc/include/asm/kvm.h
arch/powerpc/include/asm/ps3fb.h
arch/powerpc/include/asm/spu_info.h
arch/powerpc/include/asm/swab.h
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/sysdev/mpic.c
arch/sparc/kernel/irq_64.c
arch/sparc/kernel/time_64.c
arch/um/kernel/ptrace.c
arch/um/os-Linux/user_syms.c
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/Kconfig.debug
arch/x86/Makefile
arch/x86/boot/Makefile
arch/x86/boot/a20.c
arch/x86/boot/boot.h
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/boot/compressed/misc.c
arch/x86/boot/copy.S
arch/x86/boot/header.S
arch/x86/boot/main.c
arch/x86/boot/pm.c
arch/x86/boot/pmjump.S
arch/x86/boot/tools/build.c
arch/x86/boot/voyager.c [deleted file]
arch/x86/configs/i386_defconfig
arch/x86/configs/x86_64_defconfig
arch/x86/ia32/ia32_signal.c
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/a.out-core.h
arch/x86/include/asm/acpi.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/apicdef.h
arch/x86/include/asm/apicnum.h [new file with mode: 0644]
arch/x86/include/asm/apm.h [new file with mode: 0644]
arch/x86/include/asm/arch_hooks.h [deleted file]
arch/x86/include/asm/bigsmp/apic.h [deleted file]
arch/x86/include/asm/bigsmp/apicdef.h [deleted file]
arch/x86/include/asm/bigsmp/ipi.h [deleted file]
arch/x86/include/asm/boot.h
arch/x86/include/asm/cacheflush.h
arch/x86/include/asm/calling.h
arch/x86/include/asm/cpu.h
arch/x86/include/asm/cpu_debug.h [new file with mode: 0755]
arch/x86/include/asm/cpumask.h [new file with mode: 0644]
arch/x86/include/asm/current.h
arch/x86/include/asm/desc.h
arch/x86/include/asm/do_timer.h [new file with mode: 0644]
arch/x86/include/asm/elf.h
arch/x86/include/asm/entry_arch.h [new file with mode: 0644]
arch/x86/include/asm/es7000/apic.h [deleted file]
arch/x86/include/asm/es7000/apicdef.h [deleted file]
arch/x86/include/asm/es7000/ipi.h [deleted file]
arch/x86/include/asm/es7000/mpparse.h [deleted file]
arch/x86/include/asm/es7000/wakecpu.h [deleted file]
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/fixmap_32.h [deleted file]
arch/x86/include/asm/fixmap_64.h [deleted file]
arch/x86/include/asm/genapic.h
arch/x86/include/asm/genapic_32.h [deleted file]
arch/x86/include/asm/genapic_64.h [deleted file]
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/hardirq_32.h [deleted file]
arch/x86/include/asm/hardirq_64.h [deleted file]
arch/x86/include/asm/highmem.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/i8259.h
arch/x86/include/asm/init.h [new file with mode: 0644]
arch/x86/include/asm/io.h
arch/x86/include/asm/io_32.h
arch/x86/include/asm/io_64.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/ipi.h
arch/x86/include/asm/irq.h
arch/x86/include/asm/irq_regs.h
arch/x86/include/asm/irq_regs_32.h [deleted file]
arch/x86/include/asm/irq_regs_64.h [deleted file]
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/kexec.h
arch/x86/include/asm/linkage.h
arch/x86/include/asm/mach-default/apm.h [deleted file]
arch/x86/include/asm/mach-default/do_timer.h [deleted file]
arch/x86/include/asm/mach-default/entry_arch.h [deleted file]
arch/x86/include/asm/mach-default/mach_apic.h [deleted file]
arch/x86/include/asm/mach-default/mach_apicdef.h [deleted file]
arch/x86/include/asm/mach-default/mach_ipi.h [deleted file]
arch/x86/include/asm/mach-default/mach_mpparse.h [deleted file]
arch/x86/include/asm/mach-default/mach_mpspec.h [deleted file]
arch/x86/include/asm/mach-default/mach_timer.h [deleted file]
arch/x86/include/asm/mach-default/mach_traps.h [deleted file]
arch/x86/include/asm/mach-default/mach_wakecpu.h [deleted file]
arch/x86/include/asm/mach-default/pci-functions.h [deleted file]
arch/x86/include/asm/mach-default/setup_arch.h [deleted file]
arch/x86/include/asm/mach-default/smpboot_hooks.h [deleted file]
arch/x86/include/asm/mach-generic/gpio.h [deleted file]
arch/x86/include/asm/mach-generic/mach_apic.h [deleted file]
arch/x86/include/asm/mach-generic/mach_apicdef.h [deleted file]
arch/x86/include/asm/mach-generic/mach_ipi.h [deleted file]
arch/x86/include/asm/mach-generic/mach_mpparse.h [deleted file]
arch/x86/include/asm/mach-generic/mach_mpspec.h [deleted file]
arch/x86/include/asm/mach-generic/mach_wakecpu.h [deleted file]
arch/x86/include/asm/mach-rdc321x/gpio.h [deleted file]
arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h [deleted file]
arch/x86/include/asm/mach-voyager/do_timer.h [deleted file]
arch/x86/include/asm/mach-voyager/entry_arch.h [deleted file]
arch/x86/include/asm/mach-voyager/setup_arch.h [deleted file]
arch/x86/include/asm/mach_timer.h [new file with mode: 0644]
arch/x86/include/asm/mach_traps.h [new file with mode: 0644]
arch/x86/include/asm/mce.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/mmu_context_32.h [deleted file]
arch/x86/include/asm/mmu_context_64.h [deleted file]
arch/x86/include/asm/mmzone_32.h
arch/x86/include/asm/mpspec.h
arch/x86/include/asm/mpspec_def.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/numa_32.h
arch/x86/include/asm/numaq.h
arch/x86/include/asm/numaq/apic.h [deleted file]
arch/x86/include/asm/numaq/apicdef.h [deleted file]
arch/x86/include/asm/numaq/ipi.h [deleted file]
arch/x86/include/asm/numaq/mpparse.h [deleted file]
arch/x86/include/asm/numaq/wakecpu.h [deleted file]
arch/x86/include/asm/page.h
arch/x86/include/asm/page_32.h
arch/x86/include/asm/page_32_types.h [new file with mode: 0644]
arch/x86/include/asm/page_64.h
arch/x86/include/asm/page_64_types.h [new file with mode: 0644]
arch/x86/include/asm/page_types.h [new file with mode: 0644]
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/pat.h
arch/x86/include/asm/pci-functions.h [new file with mode: 0644]
arch/x86/include/asm/pda.h [deleted file]
arch/x86/include/asm/percpu.h
arch/x86/include/asm/pgtable-2level-defs.h [deleted file]
arch/x86/include/asm/pgtable-2level.h
arch/x86/include/asm/pgtable-2level_types.h [new file with mode: 0644]
arch/x86/include/asm/pgtable-3level-defs.h [deleted file]
arch/x86/include/asm/pgtable-3level.h
arch/x86/include/asm/pgtable-3level_types.h [new file with mode: 0644]
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_32.h
arch/x86/include/asm/pgtable_32_types.h [new file with mode: 0644]
arch/x86/include/asm/pgtable_64.h
arch/x86/include/asm/pgtable_64_types.h [new file with mode: 0644]
arch/x86/include/asm/pgtable_types.h [new file with mode: 0644]
arch/x86/include/asm/prctl.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/proto.h
arch/x86/include/asm/ptrace.h
arch/x86/include/asm/rdc321x_defs.h [new file with mode: 0644]
arch/x86/include/asm/segment.h
arch/x86/include/asm/setup.h
arch/x86/include/asm/setup_arch.h [new file with mode: 0644]
arch/x86/include/asm/smp.h
arch/x86/include/asm/smpboot_hooks.h [new file with mode: 0644]
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/stackprotector.h [new file with mode: 0644]
arch/x86/include/asm/summit/apic.h [deleted file]
arch/x86/include/asm/summit/apicdef.h [deleted file]
arch/x86/include/asm/summit/ipi.h [deleted file]
arch/x86/include/asm/summit/mpparse.h [deleted file]
arch/x86/include/asm/syscalls.h
arch/x86/include/asm/system.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/timer.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/topology.h
arch/x86/include/asm/trampoline.h
arch/x86/include/asm/traps.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/uv/uv.h [new file with mode: 0644]
arch/x86/include/asm/uv/uv_bau.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/include/asm/vic.h [deleted file]
arch/x86/include/asm/voyager.h [deleted file]
arch/x86/include/asm/xen/events.h
arch/x86/include/asm/xen/hypervisor.h
arch/x86/include/asm/xen/page.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/realmode/wakeup.S
arch/x86/kernel/acpi/sleep.c
arch/x86/kernel/acpi/wakeup_32.S
arch/x86/kernel/acpi/wakeup_64.S
arch/x86/kernel/alternative.c
arch/x86/kernel/apic.c [deleted file]
arch/x86/kernel/apic/Makefile [new file with mode: 0644]
arch/x86/kernel/apic/apic.c [new file with mode: 0644]
arch/x86/kernel/apic/apic_flat_64.c [new file with mode: 0644]
arch/x86/kernel/apic/bigsmp_32.c [new file with mode: 0644]
arch/x86/kernel/apic/es7000_32.c [new file with mode: 0644]
arch/x86/kernel/apic/io_apic.c [new file with mode: 0644]
arch/x86/kernel/apic/ipi.c [new file with mode: 0644]
arch/x86/kernel/apic/nmi.c [new file with mode: 0644]
arch/x86/kernel/apic/numaq_32.c [new file with mode: 0644]
arch/x86/kernel/apic/probe_32.c [new file with mode: 0644]
arch/x86/kernel/apic/probe_64.c [new file with mode: 0644]
arch/x86/kernel/apic/summit_32.c [new file with mode: 0644]
arch/x86/kernel/apic/x2apic_cluster.c [new file with mode: 0644]
arch/x86/kernel/apic/x2apic_phys.c [new file with mode: 0644]
arch/x86/kernel/apic/x2apic_uv_x.c [new file with mode: 0644]
arch/x86/kernel/apm_32.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/check.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/addon_cpuid_features.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/centaur_64.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cpu.h
arch/x86/kernel/cpu/cpu_debug.c [new file with mode: 0755]
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/e_powersaver.c
arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/Makefile
arch/x86/kernel/cpu/mcheck/mce_32.c
arch/x86/kernel/cpu/mcheck/mce_64.c
arch/x86/kernel/cpu/mcheck/mce_amd_64.c
arch/x86/kernel/cpu/mcheck/mce_intel_64.c
arch/x86/kernel/cpu/mcheck/p4.c
arch/x86/kernel/cpu/mcheck/threshold.c [new file with mode: 0644]
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/cpu/proc.c
arch/x86/kernel/cpu/transmeta.c
arch/x86/kernel/cpu/umc.c
arch/x86/kernel/crash.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/e820.c
arch/x86/kernel/early_printk.c
arch/x86/kernel/efi.c
arch/x86/kernel/efi_64.c
arch/x86/kernel/efi_stub_32.S
arch/x86/kernel/efi_stub_64.S
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/es7000_32.c [deleted file]
arch/x86/kernel/genapic_64.c [deleted file]
arch/x86/kernel/genapic_flat_64.c [deleted file]
arch/x86/kernel/genx2apic_cluster.c [deleted file]
arch/x86/kernel/genx2apic_phys.c [deleted file]
arch/x86/kernel/genx2apic_uv_x.c [deleted file]
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/i8259.c
arch/x86/kernel/io_apic.c [deleted file]
arch/x86/kernel/ioport.c
arch/x86/kernel/ipi.c [deleted file]
arch/x86/kernel/irq.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/irqinit_32.c
arch/x86/kernel/irqinit_64.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/machine_kexec_32.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/mca_32.c
arch/x86/kernel/microcode_intel.c
arch/x86/kernel/mmconf-fam10h_64.c
arch/x86/kernel/module_32.c
arch/x86/kernel/module_64.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/msr.c
arch/x86/kernel/nmi.c [deleted file]
arch/x86/kernel/numaq_32.c [deleted file]
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt_patch_32.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/probe_roms_32.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/quirks.c
arch/x86/kernel/reboot.c
arch/x86/kernel/relocate_kernel_32.S
arch/x86/kernel/relocate_kernel_64.S
arch/x86/kernel/setup.c
arch/x86/kernel/setup_percpu.c
arch/x86/kernel/signal.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/smpcommon.c [deleted file]
arch/x86/kernel/stacktrace.c
arch/x86/kernel/summit_32.c [deleted file]
arch/x86/kernel/syscall_table_32.S
arch/x86/kernel/time_32.c
arch/x86/kernel/tlb_32.c [deleted file]
arch/x86/kernel/tlb_64.c [deleted file]
arch/x86/kernel/tlb_uv.c
arch/x86/kernel/trampoline_32.S
arch/x86/kernel/trampoline_64.S
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kernel/uv_time.c [new file with mode: 0644]
arch/x86/kernel/visws_quirks.c
arch/x86/kernel/vm86_32.c
arch/x86/kernel/vmi_32.c
arch/x86/kernel/vmiclock_32.c
arch/x86/kernel/vmlinux_32.lds.S
arch/x86/kernel/vmlinux_64.lds.S
arch/x86/kernel/vsmp_64.c
arch/x86/kernel/x8664_ksyms_64.c
arch/x86/lguest/Kconfig
arch/x86/lguest/boot.c
arch/x86/lib/getuser.S
arch/x86/lib/memcpy_64.S
arch/x86/mach-default/Makefile [deleted file]
arch/x86/mach-default/setup.c [deleted file]
arch/x86/mach-generic/Makefile [deleted file]
arch/x86/mach-generic/bigsmp.c [deleted file]
arch/x86/mach-generic/default.c [deleted file]
arch/x86/mach-generic/es7000.c [deleted file]
arch/x86/mach-generic/numaq.c [deleted file]
arch/x86/mach-generic/probe.c [deleted file]
arch/x86/mach-generic/summit.c [deleted file]
arch/x86/mach-rdc321x/Makefile [deleted file]
arch/x86/mach-rdc321x/gpio.c [deleted file]
arch/x86/mach-rdc321x/platform.c [deleted file]
arch/x86/mach-voyager/Makefile [deleted file]
arch/x86/mach-voyager/setup.c [deleted file]
arch/x86/mach-voyager/voyager_basic.c [deleted file]
arch/x86/mach-voyager/voyager_cat.c [deleted file]
arch/x86/mach-voyager/voyager_smp.c [deleted file]
arch/x86/mach-voyager/voyager_thread.c [deleted file]
arch/x86/math-emu/get_address.c
arch/x86/mm/Makefile
arch/x86/mm/extable.c
arch/x86/mm/fault.c
arch/x86/mm/highmem_32.c
arch/x86/mm/init.c [new file with mode: 0644]
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/iomap_32.c
arch/x86/mm/ioremap.c
arch/x86/mm/memtest.c
arch/x86/mm/mmap.c
arch/x86/mm/numa_32.c
arch/x86/mm/numa_64.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/mm/pgtable.c
arch/x86/mm/pgtable_32.c
arch/x86/mm/srat_64.c
arch/x86/mm/tlb.c [new file with mode: 0644]
arch/x86/pci/common.c
arch/x86/pci/fixup.c
arch/x86/pci/numaq_32.c
arch/x86/pci/pcbios.c
arch/x86/power/hibernate_asm_32.S
arch/x86/power/hibernate_asm_64.S
arch/x86/vdso/Makefile
arch/x86/vdso/vma.c
arch/x86/xen/Kconfig
arch/x86/xen/Makefile
arch/x86/xen/enlighten.c
arch/x86/xen/irq.c
arch/x86/xen/mmu.c
arch/x86/xen/mmu.h
arch/x86/xen/multicalls.c
arch/x86/xen/multicalls.h
arch/x86/xen/smp.c
arch/x86/xen/suspend.c
arch/x86/xen/xen-asm.S [new file with mode: 0644]
arch/x86/xen/xen-asm.h [new file with mode: 0644]
arch/x86/xen/xen-asm_32.S
arch/x86/xen/xen-asm_64.S
arch/x86/xen/xen-head.S
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/swab.h
block/blktrace.c
drivers/acpi/acpica/tbxface.c
drivers/acpi/osl.c
drivers/acpi/processor_perflib.c
drivers/acpi/tables.c
drivers/base/cpu.c
drivers/base/topology.c
drivers/clocksource/acpi_pm.c
drivers/clocksource/cyclone.c
drivers/eisa/Kconfig
drivers/firmware/dcdbas.c
drivers/firmware/iscsi_ibft.c
drivers/gpu/drm/drm_proc.c
drivers/hwmon/Kconfig
drivers/hwmon/abituguru3.c
drivers/hwmon/f75375s.c
drivers/hwmon/it87.c
drivers/hwmon/lm90.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/nes/nes_verbs.h
drivers/input/keyboard/Kconfig
drivers/input/mouse/Kconfig
drivers/lguest/Kconfig
drivers/mfd/wm8350-core.c
drivers/misc/Kconfig
drivers/misc/sgi-gru/grufile.c
drivers/misc/sgi-xp/xp.h
drivers/misc/sgi-xp/xpc_main.c
drivers/mmc/host/s3cmci.c
drivers/mtd/nand/Kconfig
drivers/net/ne3210.c
drivers/net/sfc/efx.c
drivers/net/sfc/falcon.c
drivers/net/sunhme.c
drivers/net/wireless/arlan-main.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/buffer_sync.h
drivers/oprofile/oprof.c
drivers/pci/dmar.c
drivers/pci/hotplug/Kconfig
drivers/pci/intr_remapping.c
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/portdrv_pci.c
drivers/pci/quirks.c
drivers/platform/x86/acer-wmi.c
drivers/power/ds2760_battery.c
drivers/sbus/char/bbc_i2c.c
drivers/sbus/char/jsflash.c
drivers/w1/masters/w1-gpio.c
drivers/watchdog/rdc321x_wdt.c
drivers/xen/events.c
drivers/xen/manage.c
fs/fs-writeback.c
fs/inode.c
fs/pipe.c
fs/squashfs/block.c
fs/super.c
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_buf.h
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/xfs_iget.c
fs/xfs/xfs_log_recover.c
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/asm-frv/swab.h
include/asm-generic/percpu.h
include/asm-generic/sections.h
include/asm-generic/vmlinux.lds.h
include/asm-m32r/swab.h
include/asm-mn10300/swab.h
include/linux/acpi.h
include/linux/bootmem.h
include/linux/coda_psdev.h
include/linux/decompress/bunzip2.h [new file with mode: 0644]
include/linux/decompress/generic.h [new file with mode: 0644]
include/linux/decompress/inflate.h [new file with mode: 0644]
include/linux/decompress/mm.h [new file with mode: 0644]
include/linux/decompress/unlzma.h [new file with mode: 0644]
include/linux/elfcore.h
include/linux/in6.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/irqnr.h
include/linux/kprobes.h
include/linux/magic.h
include/linux/mm_types.h
include/linux/mmiotrace.h
include/linux/nubus.h
include/linux/percpu.h
include/linux/reiserfs_fs.h
include/linux/sched.h
include/linux/smp.h
include/linux/socket.h
include/linux/stackprotector.h [new file with mode: 0644]
include/linux/topology.h
include/linux/types.h
include/linux/vmalloc.h
init/Kconfig
init/do_mounts_rd.c
init/initramfs.c
init/main.c
kernel/exit.c
kernel/fork.c
kernel/irq/chip.c
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/migration.c
kernel/irq/numa_migrate.c
kernel/irq/proc.c
kernel/kexec.c
kernel/module.c
kernel/panic.c
kernel/sched.c
kernel/sched_rt.c
kernel/softirq.c
kernel/stop_machine.c
lib/Kconfig
lib/Makefile
lib/bitmap.c
lib/decompress.c [new file with mode: 0644]
lib/decompress_bunzip2.c [new file with mode: 0644]
lib/decompress_inflate.c [new file with mode: 0644]
lib/decompress_unlzma.c [new file with mode: 0644]
lib/zlib_inflate/inflate.h
lib/zlib_inflate/inftrees.h
mm/Makefile
mm/allocpercpu.c
mm/bootmem.c
mm/filemap.c
mm/percpu.c [new file with mode: 0644]
mm/vmalloc.c
mm/vmscan.c
net/ipv4/af_inet.c
net/ipv4/route.c
scripts/Makefile.lib
scripts/bin_size [new file with mode: 0644]
scripts/gcc-x86_32-has-stack-protector.sh [new file with mode: 0644]
scripts/gcc-x86_64-has-stack-protector.sh
scripts/gen_initramfs_list.sh
scripts/headers_check.pl
scripts/mod/modpost.c
scripts/package/Makefile
scripts/package/mkspec
scripts/unifdef.c
sound/drivers/Kconfig
usr/Kconfig
usr/Makefile
usr/initramfs_data.S
usr/initramfs_data.bz2.S [new file with mode: 0644]
usr/initramfs_data.gz.S [new file with mode: 0644]
usr/initramfs_data.lzma.S [new file with mode: 0644]

index 45932ec21cee7cc2fadc11c96b831d435e4d9728..b41f3e58aefa8581eda3fa35286791b4ae0117c7 100644 (file)
@@ -18,11 +18,11 @@ For an architecture to support this feature, it must define some of
 these macros in include/asm-XXX/topology.h:
 #define topology_physical_package_id(cpu)
 #define topology_core_id(cpu)
-#define topology_thread_siblings(cpu)
-#define topology_core_siblings(cpu)
+#define topology_thread_cpumask(cpu)
+#define topology_core_cpumask(cpu)
 
 The type of **_id is int.
-The type of siblings is cpumask_t.
+The type of siblings is (const) struct cpumask *.
 
 To be consistent on all architectures, include/linux/topology.h
 provides default definitions for any of the above macros that are
index 4333e836c4951459d9145213938bbca78644d63d..23448551cabed2765eedc123dda88883e7cbc862 100644 (file)
@@ -373,10 +373,10 @@ Filesystem Resizing       http://ext2resize.sourceforge.net/
 Compression (*)                http://e2compr.sourceforge.net/
 
 Implementations for:
-Windows 95/98/NT/2000  http://uranus.it.swin.edu.au/~jn/linux/Explore2fs.htm
-Windows 95 (*)         http://www.yipton.demon.co.uk/content.html#FSDEXT2
+Windows 95/98/NT/2000  http://www.chrysocome.net/explore2fs
+Windows 95 (*)         http://www.yipton.net/content.html#FSDEXT2
 DOS client (*)         ftp://metalab.unc.edu/pub/Linux/system/filesystems/ext2/
-OS/2                   http://perso.wanadoo.fr/matthieu.willm/ext2-os2/
-RISC OS client         ftp://ftp.barnet.ac.uk/pub/acorn/armlinux/iscafs/
+OS/2 (*)               ftp://metalab.unc.edu/pub/Linux/system/filesystems/ext2/
+RISC OS client         http://www.esw-heim.tu-clausthal.de/~marco/smorbrod/IscaFS/
 
-(*) no longer actively developed/supported (as of Apr 2001)
+(*) no longer actively developed/supported (as of Mar 2009)
index 9dd2a3bb2acc87b85d473a2080284910f04f8585..e5f3833a6ef8f53c3b22adf48d57b84e2a24ba0c 100644 (file)
@@ -198,5 +198,5 @@ kernel source:      <file:fs/ext3/>
 programs:      http://e2fsprogs.sourceforge.net/
                http://ext2resize.sourceforge.net
 
-useful links:  http://www-106.ibm.com/developerworks/linux/library/l-fs7/
-               http://www-106.ibm.com/developerworks/linux/library/l-fs8/
+useful links:  http://www.ibm.com/developerworks/library/l-fs7.html
+               http://www.ibm.com/developerworks/library/l-fs8.html
index 0e8411710238ff3c0a58c1f373031f3f7447ce5c..93d8e3d5515018e490629da68423c03ac9565e02 100644 (file)
@@ -42,6 +42,11 @@ Supported chips:
     Addresses scanned: I2C 0x4e
     Datasheet: Publicly available at the Maxim website
                http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+  * Maxim MAX6648
+    Prefix: 'max6646'
+    Addresses scanned: I2C 0x4c
+    Datasheet: Publicly available at the Maxim website
+               http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
   * Maxim MAX6649
     Prefix: 'max6646'
     Addresses scanned: I2C 0x4c
@@ -74,6 +79,11 @@ Supported chips:
                            0x4c, 0x4d and 0x4e
     Datasheet: Publicly available at the Maxim website
                http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3370
+  * Maxim MAX6692
+    Prefix: 'max6646'
+    Addresses scanned: I2C 0x4c
+    Datasheet: Publicly available at the Maxim website
+               http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3500
 
 
 Author: Jean Delvare <khali@linux-fr.org>
index 54f21a5c262b02ef6cb4718f72b8357693197cf3..28de395fa096a4077b1b521ca23cab9f05b66502 100644 (file)
@@ -1310,8 +1310,13 @@ and is between 256 and 4096 characters. It is defined in the file
 
        memtest=        [KNL,X86] Enable memtest
                        Format: <integer>
-                       range: 0,4 : pattern number
                        default : 0 <disable>
+                       Specifies the number of memtest passes to be
+                       performed. Each pass selects another test
+                       pattern from a given set of patterns. Memtest
+                       fills the memory with this pattern, validates
+                       memory contents and reserves bad memory
+                       regions that are detected.
 
        meye.*=         [HW] Set MotionEye Camera parameters
                        See Documentation/video4linux/meye.txt.
index 7b4596ac41208f49d83b3ed09b8ebb7774c7e010..e0203662f9e9f8a1a907060437f3fcdf907e2667 100644 (file)
@@ -158,7 +158,7 @@ Offset      Proto   Name            Meaning
 0202/4 2.00+   header          Magic signature "HdrS"
 0206/2 2.00+   version         Boot protocol version supported
 0208/4 2.00+   realmode_swtch  Boot loader hook (see below)
-020C/2 2.00+   start_sys       The load-low segment (0x1000) (obsolete)
+020C/2 2.00+   start_sys_seg   The load-low segment (0x1000) (obsolete)
 020E/2 2.00+   kernel_version  Pointer to kernel version string
 0210/1 2.00+   type_of_loader  Boot loader identifier
 0211/1 2.00+   loadflags       Boot protocol option flags
@@ -170,10 +170,11 @@ Offset    Proto   Name            Meaning
 0224/2 2.01+   heap_end_ptr    Free memory after setup end
 0226/2 N/A     pad1            Unused
 0228/4 2.02+   cmd_line_ptr    32-bit pointer to the kernel command line
-022C/4 2.03+   initrd_addr_max Highest legal initrd address
+022C/4 2.03+   ramdisk_max     Highest legal initrd address
 0230/4 2.05+   kernel_alignment Physical addr alignment required for kernel
 0234/1 2.05+   relocatable_kernel Whether kernel is relocatable or not
-0235/3 N/A     pad2            Unused
+0235/1 N/A     pad2            Unused
+0236/2 N/A     pad3            Unused
 0238/4 2.06+   cmdline_size    Maximum size of the kernel command line
 023C/4 2.07+   hardware_subarch Hardware subarchitecture
 0240/8 2.07+   hardware_subarch_data Subarchitecture-specific data
@@ -299,14 +300,14 @@ Protocol: 2.00+
   e.g. 0x0204 for version 2.04, and 0x0a11 for a hypothetical version
   10.17.
 
-Field name:    readmode_swtch
+Field name:    realmode_swtch
 Type:          modify (optional)
 Offset/size:   0x208/4
 Protocol:      2.00+
 
   Boot loader hook (see ADVANCED BOOT LOADER HOOKS below.)
 
-Field name:    start_sys
+Field name:    start_sys_seg
 Type:          read
 Offset/size:   0x20c/2
 Protocol:      2.00+
@@ -468,7 +469,7 @@ Protocol:   2.02+
   zero, the kernel will assume that your boot loader does not support
   the 2.02+ protocol.
 
-Field name:    initrd_addr_max
+Field name:    ramdisk_max
 Type:          read
 Offset/size:   0x22c/4
 Protocol:      2.03+
@@ -542,7 +543,10 @@ Protocol:  2.08+
 
   The payload may be compressed. The format of both the compressed and
   uncompressed data should be determined using the standard magic
-  numbers. Currently only gzip compressed ELF is used.
+  numbers.  The currently supported compression formats are gzip
+  (magic numbers 1F 8B or 1F 9E), bzip2 (magic number 42 5A) and LZMA
+  (magic number 5D 00).  The uncompressed payload is currently always ELF
+  (magic number 7F 45 4C 46).
   
 Field name:    payload_length
 Type:          read
diff --git a/Documentation/x86/earlyprintk.txt b/Documentation/x86/earlyprintk.txt
new file mode 100644 (file)
index 0000000..607b1a0
--- /dev/null
@@ -0,0 +1,101 @@
+
+Mini-HOWTO for using the earlyprintk=dbgp boot option with a
+USB2 Debug port key and a debug cable, on x86 systems.
+
+You need two computers, the 'USB debug key' special gadget and
+and two USB cables, connected like this:
+
+  [host/target] <-------> [USB debug key] <-------> [client/console]
+
+1. There are three specific hardware requirements:
+
+ a.) Host/target system needs to have USB debug port capability.
+
+ You can check this capability by looking at a 'Debug port' bit in
+ the lspci -vvv output:
+
+ # lspci -vvv
+ ...
+ 00:1d.7 USB Controller: Intel Corporation 82801H (ICH8 Family) USB2 EHCI Controller #1 (rev 03) (prog-if 20 [EHCI])
+         Subsystem: Lenovo ThinkPad T61
+         Control: I/O- Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR+ FastB2B- DisINTx-
+         Status: Cap+ 66MHz- UDF- FastB2B+ ParErr- DEVSEL=medium >TAbort- <TAbort- <MAbort- >SERR- <PERR- INTx-
+         Latency: 0
+         Interrupt: pin D routed to IRQ 19
+         Region 0: Memory at fe227000 (32-bit, non-prefetchable) [size=1K]
+         Capabilities: [50] Power Management version 2
+                 Flags: PMEClk- DSI- D1- D2- AuxCurrent=375mA PME(D0+,D1-,D2-,D3hot+,D3cold+)
+                 Status: D0 PME-Enable- DSel=0 DScale=0 PME+
+         Capabilities: [58] Debug port: BAR=1 offset=00a0
+                            ^^^^^^^^^^^ <==================== [ HERE ]
+        Kernel driver in use: ehci_hcd
+         Kernel modules: ehci-hcd
+ ...
+
+( If your system does not list a debug port capability then you probably
+  wont be able to use the USB debug key. )
+
+ b.) You also need a Netchip USB debug cable/key:
+
+        http://www.plxtech.com/products/NET2000/NET20DC/default.asp
+
+     This is a small blue plastic connector with two USB connections,
+     it draws power from its USB connections.
+
+ c.) Thirdly, you need a second client/console system with a regular USB port.
+
+2. Software requirements:
+
+ a.) On the host/target system:
+
+    You need to enable the following kernel config option:
+
+      CONFIG_EARLY_PRINTK_DBGP=y
+
+    And you need to add the boot command line: "earlyprintk=dbgp".
+    (If you are using Grub, append it to the 'kernel' line in
+     /etc/grub.conf)
+
+    NOTE: normally earlyprintk console gets turned off once the
+    regular console is alive - use "earlyprintk=dbgp,keep" to keep
+    this channel open beyond early bootup. This can be useful for
+    debugging crashes under Xorg, etc.
+
+ b.) On the client/console system:
+
+    You should enable the following kernel config option:
+
+      CONFIG_USB_SERIAL_DEBUG=y
+
+    On the next bootup with the modified kernel you should
+    get a /dev/ttyUSBx device(s).
+
+    Now this channel of kernel messages is ready to be used: start
+    your favorite terminal emulator (minicom, etc.) and set
+    it up to use /dev/ttyUSB0 - or use a raw 'cat /dev/ttyUSBx' to
+    see the raw output.
+
+ c.) On Nvidia Southbridge based systems: the kernel will try to probe
+     and find out which port has debug device connected.
+
+3. Testing that it works fine:
+
+   You can test the output by using earlyprintk=dbgp,keep and provoking
+   kernel messages on the host/target system. You can provoke a harmless
+   kernel message by for example doing:
+
+     echo h > /proc/sysrq-trigger
+
+   On the host/target system you should see this help line in "dmesg" output:
+
+     SysRq : HELP : loglevel(0-9) reBoot Crashdump terminate-all-tasks(E) memory-full-oom-kill(F) kill-all-tasks(I) saK show-backtrace-all-active-cpus(L) show-memory-usage(M) nice-all-RT-tasks(N) powerOff show-registers(P) show-all-timers(Q) unRaw Sync show-task-states(T) Unmount show-blocked-tasks(W) dump-ftrace-buffer(Z)
+
+   On the client/console system do:
+
+       cat /dev/ttyUSB0
+
+   And you should see the help line above displayed shortly after you've
+   provoked it on the host system.
+
+If it does not work then please ask about it on the linux-kernel@vger.kernel.org
+mailing list or contact the x86 maintainers.
index 1c2ca1dc66f2006765dba54bb05edf8a5b57d258..61aeb5aae244b68d7b8726369423c3b2f7820e48 100644 (file)
@@ -1469,8 +1469,6 @@ L:        linux-acpi@vger.kernel.org
 S:     Supported
 
 DOCUMENTATION (/Documentation directory)
-P:     Michael Kerrisk
-M:     mtk.manpages@gmail.com
 P:     Randy Dunlap
 M:     rdunlap@xenotime.net
 L:     linux-doc@vger.kernel.org
@@ -2879,7 +2877,7 @@ P:        Michael Kerrisk
 M:     mtk.manpages@gmail.com
 W:     http://www.kernel.org/doc/man-pages
 L:     linux-man@vger.kernel.org
-S:     Supported
+S:     Maintained
 
 MARVELL LIBERTAS WIRELESS DRIVER
 P:     Dan Williams
index d04ee0ad1dccce7ea2503637ffbe6d8753074ffe..46c04c546ee2d4676693289f54270391fb15ad97 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 29
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Erotic Pickled Herring
 
 # *DOCUMENTATION*
@@ -533,8 +533,9 @@ KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
 endif
 
 # Force gcc to behave correct even for buggy distributions
-# Arch Makefiles may override this setting
+ifndef CONFIG_CC_STACKPROTECTOR
 KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
+endif
 
 ifdef CONFIG_FRAME_POINTER
 KBUILD_CFLAGS  += -fno-omit-frame-pointer -fno-optimize-sibling-calls
@@ -904,12 +905,18 @@ localver = $(subst $(space),, $(string) \
 # and if the SCM is know a tag from the SCM is appended.
 # The appended tag is determined by the SCM used.
 #
-# Currently, only git is supported.
-# Other SCMs can edit scripts/setlocalversion and add the appropriate
-# checks as needed.
+# .scmversion is used when generating rpm packages so we do not loose
+# the version information from the SCM when we do the build of the kernel
+# from the copied source
 ifdef CONFIG_LOCALVERSION_AUTO
-       _localver-auto = $(shell $(CONFIG_SHELL) \
-                         $(srctree)/scripts/setlocalversion $(srctree))
+
+ifeq ($(wildcard .scmversion),)
+        _localver-auto = $(shell $(CONFIG_SHELL) \
+                         $(srctree)/scripts/setlocalversion $(srctree))
+else
+        _localver-auto = $(shell cat .scmversion 2> /dev/null)
+endif
+
        localver-auto  = $(LOCALVERSION)$(_localver-auto)
 endif
 
@@ -1537,7 +1544,7 @@ quiet_cmd_depmod = DEPMOD  $(KERNELRELEASE)
       cmd_depmod = \
        if [ -r System.map -a -x $(DEPMOD) ]; then                              \
                $(DEPMOD) -ae -F System.map                                     \
-               $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) -r)   \
+               $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) )     \
                $(KERNELRELEASE);                                               \
        fi
 
index de35cd438a1023a57f3c5dfb4a49230d8ff69aee..ccd2e186bfd8244e5892b181bcb43906cfdd4b08 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ALPHA_STATFS_H
 #define _ALPHA_STATFS_H
 
+#include <linux/types.h>
+
 /* Alpha is the only 64-bit platform with 32-bit statfs. And doesn't
    even seem to implement statfs64 */
 #define __statfs_word __u32
index 68e7089e02d5724315e6bc4a5f27b40feadd3419..4d682b16c7c448b2e07d9c64abfaf5ed9417904b 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _ALPHA_SWAB_H
 #define _ALPHA_SWAB_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/compiler.h>
 #include <asm/compiler.h>
 
index 703731accda6d515e6879afe276e68818eaddd4b..7bc7489223f3e03b77aec838838202e8aed3f3b4 100644 (file)
@@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
                cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
        last_cpu = cpu;
 
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
        irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
        return 0;
 }
index 5d7a16eab312619f94cc48515715ae81b298b296..af71d38c8e412295bdd85e7f323cf9391e517ec5 100644 (file)
@@ -189,9 +189,21 @@ callback_init(void * kernel_end)
 
        if (alpha_using_srm) {
                static struct vm_struct console_remap_vm;
-               unsigned long vaddr = VMALLOC_START;
+               unsigned long nr_pages = 0;
+               unsigned long vaddr;
                unsigned long i, j;
 
+               /* calculate needed size */
+               for (i = 0; i < crb->map_entries; ++i)
+                       nr_pages += crb->map[i].count;
+
+               /* register the vm area */
+               console_remap_vm.flags = VM_ALLOC;
+               console_remap_vm.size = nr_pages << PAGE_SHIFT;
+               vm_area_register_early(&console_remap_vm, PAGE_SIZE);
+
+               vaddr = (unsigned long)console_remap_vm.addr;
+
                /* Set up the third level PTEs and update the virtual
                   addresses of the CRB entries.  */
                for (i = 0; i < crb->map_entries; ++i) {
@@ -213,12 +225,6 @@ callback_init(void * kernel_end)
                                vaddr += PAGE_SIZE;
                        }
                }
-
-               /* Let vmalloc know that we've allocated some space.  */
-               console_remap_vm.flags = VM_ALLOC;
-               console_remap_vm.addr = (void *) VMALLOC_START;
-               console_remap_vm.size = vaddr - VMALLOC_START;
-               vmlist = &console_remap_vm;
        }
 
        callback_init_done = 1;
index 79489fdcc8b812c4ba81e88dd4bd3692622efac5..083894b2e3bcb6cb7ef455dc1b71ed7a6392db77 100644 (file)
@@ -2,7 +2,7 @@
 #define __ARM_A_OUT_H__
 
 #include <linux/personality.h>
-#include <asm/types.h>
+#include <linux/types.h>
 
 struct exec
 {
index f2cd18a0932b786d0283d371ab9172f91db40fe1..ee1304f22f944c3104d28d44c1b0eeb05957eb22 100644 (file)
@@ -14,7 +14,7 @@
 #ifndef __ASMARM_SETUP_H
 #define __ASMARM_SETUP_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 #define COMMAND_LINE_SIZE 1024
 
index 27a689be085640b2755872e647e689fefd0fc065..ca2bf2f6d6ea93c29d312525b89aceb9d30134f3 100644 (file)
@@ -16,7 +16,7 @@
 #define __ASM_ARM_SWAB_H
 
 #include <linux/compiler.h>
-#include <asm/types.h>
+#include <linux/types.h>
 
 #if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
 #  define __SWAB_64_THRU_32__
index 363db186cb93334791588db5b2158e6f295c5c54..45eacb5a2ecd80fb7a30dc56d6f3386f1c842f19 100644 (file)
@@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
        .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
 };
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating bad_irq_desc.affinity or .pending_mask */
+#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
 /*
  * do_IRQ handles all hardware IRQ's.  Decoded IRQs should not
  * come via this function.  Instead, they should provide their
@@ -161,7 +166,7 @@ void __init init_IRQ(void)
                irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
 
 #ifdef CONFIG_SMP
-       bad_irq_desc.affinity = CPU_MASK_ALL;
+       cpumask_setall(bad_irq_desc.affinity);
        bad_irq_desc.cpu = smp_processor_id();
 #endif
        init_arch_irq();
@@ -191,15 +196,16 @@ void migrate_irqs(void)
                struct irq_desc *desc = irq_desc + i;
 
                if (desc->cpu == cpu) {
-                       unsigned int newcpu = any_online_cpu(desc->affinity);
-
-                       if (newcpu == NR_CPUS) {
+                       unsigned int newcpu = cpumask_any_and(desc->affinity,
+                                                             cpu_online_mask);
+                       if (newcpu >= nr_cpu_ids) {
                                if (printk_ratelimit())
                                        printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
                                               i, cpu);
 
-                               cpus_setall(desc->affinity);
-                               newcpu = any_online_cpu(desc->affinity);
+                               cpumask_setall(desc->affinity);
+                               newcpu = cpumask_any_and(desc->affinity,
+                                                        cpu_online_mask);
                        }
 
                        route_irq(desc, i, newcpu);
index 00216071eaf72d2bf87ddccbfe4f6a95e4ce2d4f..1602373e539cf72adc4176635f4364b4c6376c96 100644 (file)
@@ -64,7 +64,9 @@ SECTIONS
                __initramfs_end = .;
 #endif
                . = ALIGN(4096);
+               __per_cpu_load = .;
                __per_cpu_start = .;
+                       *(.data.percpu.page_aligned)
                        *(.data.percpu)
                        *(.data.percpu.shared_aligned)
                __per_cpu_end = .;
index 6d6bd5899240862771935c47f32007fb452aec48..853d42bb8682c83b517bf656ae922d4ce5cd3b63 100644 (file)
@@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
        const struct cpumask *mask = cpumask_of(cpu);
 
        spin_lock_irq(&desc->lock);
-       desc->affinity = *mask;
+       cpumask_copy(desc->affinity, mask);
        desc->chip->set_affinity(irq, mask);
        spin_unlock_irq(&desc->lock);
 }
index b189680d18b0493d7f9d12f265442563bec2f11d..05fe3053dcaec7f725059d78e7a6988788c557ff 100644 (file)
@@ -181,7 +181,7 @@ source "kernel/Kconfig.preempt"
 config QUICKLIST
        def_bool y
 
-config HAVE_ARCH_BOOTMEM_NODE
+config HAVE_ARCH_BOOTMEM
        def_bool n
 
 config ARCH_HAVE_MEMORY_PRESENT
index a14aa5b46d98c39d32f6c1eed8b8dca37f6897ee..14cc737bbca6e909dbd7ec44d36703731f8ca519 100644 (file)
@@ -4,7 +4,7 @@
 #ifndef __ASM_AVR32_SWAB_H
 #define __ASM_AVR32_SWAB_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/compiler.h>
 
 #define __SWAB_64_THRU_32__
index 797c0c1650695291e2c99f0e8a632c529674b1cf..c94c7bc88c71865312352e7e32e2fd6ea0f4ae50 100644 (file)
@@ -3,14 +3,4 @@
 
 #include <asm-generic/percpu.h>
 
-#ifdef CONFIG_MODULES
-#define PERCPU_MODULE_RESERVE 8192
-#else
-#define PERCPU_MODULE_RESERVE 0
-#endif
-
-#define PERCPU_ENOUGH_ROOM \
-       (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
-        PERCPU_MODULE_RESERVE)
-
 #endif /* __ARCH_BLACKFIN_PERCPU__ */
index 69a051b612bd2dfaaa107eccd838804f05c749bb..6403ad2932ebd0efe539f1b16a87325d7aa015a3 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _BLACKFIN_SWAB_H
 #define _BLACKFIN_SWAB_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/compiler.h>
 
 #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
index 7fd12656484666ee4c29784a1d2d1e0e4e5fee04..1ab5b532ec724c97e02dc1f71282aa9918d9a83f 100644 (file)
@@ -70,6 +70,11 @@ static struct irq_desc bad_irq_desc = {
 #endif
 };
 
+#ifdef CONFIG_CPUMASK_OFFSTACK
+/* We are not allocating a variable-sized bad_irq_desc.affinity */
+#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
+#endif
+
 int show_interrupts(struct seq_file *p, void *v)
 {
        int i = *(loff_t *) v, j;
index c108f39b8bc499efa884a64490c21cfc33206415..39abbf52807dd9de130686f9c472c168f01516ae 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _H8300_SWAB_H
 #define _H8300_SWAB_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
 #  define __SWAB_64_THRU_32__
index 3859558ff0a464c906549bcbc1c17d6d18aa937c..0c26157cffa55fe0ea1c348fd0a542b11e6c46c3 100644 (file)
@@ -6,8 +6,6 @@
  *     David Mosberger-Tang <davidm@hpl.hp.com>
  */
 
-#include <asm/types.h>
-
 /* floating point status register: */
 #define FPSR_TRAP_VD   (1 << 0)        /* invalid op trap disabled */
 #define FPSR_TRAP_DD   (1 << 1)        /* denormal trap disabled */
index 0f5b559217580cf5a19b83df0f35c138d22b5308..c2c5fd8fcac4ecccc4010951545688b41df7a792 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2002,2003 Suresh Siddha <suresh.b.siddha@intel.com>
  */
 
+#include <linux/types.h>
 #include <linux/compiler.h>
 
 /* define this macro to get some asm stmts included in 'c' files */
index a3e44a5ed497dc6c84d1a6fc701c9884c2d0e40c..c47830e26cb7dd310af374b9b8ccd60194bef129 100644 (file)
@@ -10,6 +10,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/types.h>
 /* include compiler specific intrinsics */
 #include <asm/ia64regs.h>
 #ifdef __INTEL_COMPILER
index bfa86b6af7cd0774d652c99d800e822284f74c23..2b0a38e847059fd18d995a01eff7ac178adda084 100644 (file)
@@ -21,8 +21,7 @@
  *
  */
 
-#include <asm/types.h>
-
+#include <linux/types.h>
 #include <linux/ioctl.h>
 
 /* Select x86 specific features in <linux/kvm.h> */
index 77f30b664b4eade81ca0985be35d97825fe0a3a8..30cf46534dd23f1aa386d0168cbcc0022cbecf6d 100644 (file)
@@ -27,12 +27,12 @@ extern void *per_cpu_init(void);
 
 #else /* ! SMP */
 
-#define PER_CPU_ATTRIBUTES     __attribute__((__section__(".data.percpu")))
-
 #define per_cpu_init()                         (__phys_per_cpu_start)
 
 #endif /* SMP */
 
+#define PER_CPU_BASE_SECTION ".data.percpu"
+
 /*
  * Be extremely careful when taking the address of this variable!  Due to virtual
  * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
index 6aa58b699eea1662f7b6796913fad904d34d36d6..c89a8cb5d8a592f7c89ac30e198642a91d4d5bcd 100644 (file)
@@ -6,7 +6,7 @@
  *     David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co.
  */
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <asm/intrinsics.h>
 #include <linux/compiler.h>
 
index 32f3af1641c52515dcc36cf591235acddb69f618..3193f4417e16c8ea919016f05aa904215f6eeae9 100644 (file)
@@ -84,7 +84,7 @@ void build_cpu_to_node_map(void);
        .child                  = NULL,                 \
        .groups                 = NULL,                 \
        .min_interval           = 8,                    \
-       .max_interval           = 8*(min(num_online_cpus(), 32)), \
+       .max_interval           = 8*(min(num_online_cpus(), 32U)), \
        .busy_factor            = 64,                   \
        .imbalance_pct          = 125,                  \
        .cache_nice_tries       = 2,                    \
diff --git a/arch/ia64/include/asm/uv/uv.h b/arch/ia64/include/asm/uv/uv.h
new file mode 100644 (file)
index 0000000..61b5bdf
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _ASM_IA64_UV_UV_H
+#define _ASM_IA64_UV_UV_H
+
+#include <asm/system.h>
+#include <asm/sn/simulator.h>
+
+static inline int is_uv_system(void)
+{
+       /* temporary support for running on hardware simulator */
+       return IS_MEDUSA() || ia64_platform_is("uv");
+}
+
+#endif /* _ASM_IA64_UV_UV_H */
index d541671caf4a8d7e655ce5b9e811b25c8dd4777b..bdef2ce38c8b45c3227b7d70673db1bce4deb46f 100644 (file)
@@ -199,6 +199,10 @@ char *__init __acpi_map_table(unsigned long phys_addr, unsigned long size)
        return __va(phys_addr);
 }
 
+void __init __acpi_unmap_table(char *map, unsigned long size)
+{
+}
+
 /* --------------------------------------------------------------------------
                             Boot-time Table Parsing
    -------------------------------------------------------------------------- */
index e13125058bedb03eb56d2ab291d7c3e3628c9c25..166e0d839fa04d753fa3709bfe62544eee32e9ec 100644 (file)
@@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
        if (iosapic_intr_info[irq].count == 0) {
 #ifdef CONFIG_SMP
                /* Clear affinity */
-               cpus_setall(idesc->affinity);
+               cpumask_setall(idesc->affinity);
 #endif
                /* Clear the interrupt information */
                iosapic_intr_info[irq].dest = 0;
index a58f64ca9f0e9931476ffd4f8460f95ec4200fad..226233a6fa19a2d3d8eb9f9ab5684dfda50b2c9d 100644 (file)
@@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
 void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
 {
        if (irq < NR_IRQS) {
-               cpumask_copy(&irq_desc[irq].affinity,
+               cpumask_copy(irq_desc[irq].affinity,
                             cpumask_of(cpu_logical_id(hwid)));
                irq_redir[irq] = (char) (redir & 0xff);
        }
@@ -148,7 +148,7 @@ static void migrate_irqs(void)
                if (desc->status == IRQ_PER_CPU)
                        continue;
 
-               if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
+               if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
                    >= nr_cpu_ids) {
                        /*
                         * Save it for phase 2 processing
index 28d3d483db9203e32ce3e642130c966194660403..927ad027820c5476ac9403477251e06107054696 100644 (file)
@@ -493,11 +493,13 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
        saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
        ia64_srlz_d();
        while (vector != IA64_SPURIOUS_INT_VECTOR) {
+               struct irq_desc *desc = irq_to_desc(vector);
+
                if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
                        smp_local_flush_tlb();
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                } else if (unlikely(IS_RESCHEDULE(vector)))
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                else {
                        int irq = local_vector_to_irq(vector);
 
@@ -551,11 +553,13 @@ void ia64_process_pending_intr(void)
          * Perform normal interrupt style processing
          */
        while (vector != IA64_SPURIOUS_INT_VECTOR) {
+               struct irq_desc *desc = irq_to_desc(vector);
+
                if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
                        smp_local_flush_tlb();
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                } else if (unlikely(IS_RESCHEDULE(vector)))
-                       kstat_this_cpu.irqs[vector]++;
+                       kstat_incr_irqs_this_cpu(vector, desc);
                else {
                        struct pt_regs *old_regs = set_irq_regs(NULL);
                        int irq = local_vector_to_irq(vector);
index 890339339035b74814721c799a024fa6d184438a..dcb6b7c51ea7eb8dc3c8514bf24ef38ee18a6fe2 100644 (file)
@@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
        msg.data = data;
 
        write_msi_msg(irq, &msg);
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 }
 #endif /* CONFIG_SMP */
 
@@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
        msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
 
        dmar_msi_write(irq, &msg);
-       irq_desc[irq].affinity = *mask;
+       cpumask_copy(irq_desc[irq].affinity, mask);
 }
 #endif /* CONFIG_SMP */
 
index 10a7d47e8510b47fc4ce18ef4a385718d269bb1e..3765efc5f96333d3a40df579c075f5e6e95a77eb 100644 (file)
@@ -213,16 +213,9 @@ SECTIONS
         { *(.data.cacheline_aligned) }
 
   /* Per-cpu data: */
-  percpu : { } :percpu
   . = ALIGN(PERCPU_PAGE_SIZE);
-  __phys_per_cpu_start = .;
-  .data.percpu PERCPU_ADDR : AT(__phys_per_cpu_start - LOAD_OFFSET)
-       {
-               __per_cpu_start = .;
-               *(.data.percpu)
-               *(.data.percpu.shared_aligned)
-               __per_cpu_end = .;
-       }
+  PERCPU_VADDR(PERCPU_ADDR, :percpu)
+  __phys_per_cpu_start = __per_cpu_load;
   . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
                                                 * into percpu page size
                                                 */
index ca553b0429ce34cfb00398701d32ab73c942eeaf..81e428943d7374d01e0aef8530e39c7ecd892b30 100644 (file)
@@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
        msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
 
        write_msi_msg(irq, &msg);
-       irq_desc[irq].affinity = *cpu_mask;
+       cpumask_copy(irq_desc[irq].affinity, cpu_mask);
 }
 #endif /* CONFIG_SMP */
 
index f2baea3039bb8bfe6f95cc9828ff5dd0686b9510..0208723adf28e287d98d59bcc9afe2876b2cb184 100644 (file)
@@ -512,7 +512,7 @@ CONFIG_MD_LINEAR=m
 CONFIG_MD_RAID0=y
 CONFIG_MD_RAID1=y
 CONFIG_MD_RAID10=m
-CONFIG_MD_RAID456=m
+CONFIG_MD_RAID456=y
 CONFIG_MD_RAID5_RESHAPE=y
 CONFIG_MD_MULTIPATH=m
 CONFIG_MD_FAULTY=m
index 9d5bd2a0af3d6af5b7d8ca1bad39a0fe5c0fea25..5380f1f582d90e421eed8ec919e0dd37604f0bc5 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.26-rc9
-# Fri Jul 11 23:01:36 2008
+# Linux kernel version: 2.6.29-rc7
+# Wed Mar  4 23:07:16 2009
 #
 CONFIG_MIPS=y
 
@@ -18,8 +18,10 @@ CONFIG_MIPS=y
 # CONFIG_LEMOTE_FULONG is not set
 # CONFIG_MIPS_MALTA is not set
 # CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_MACH_EMMA is not set
 # CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
 # CONFIG_PNX8550_JBS is not set
 # CONFIG_PNX8550_STB810 is not set
 # CONFIG_PMC_MSP is not set
@@ -39,7 +41,11 @@ CONFIG_MIPS=y
 # CONFIG_SNI_RM is not set
 CONFIG_MACH_TX39XX=y
 # CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
 # CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+CONFIG_MACH_TXX9=y
 CONFIG_TOSHIBA_JMR3927=y
 CONFIG_SOC_TX3927=y
 # CONFIG_TOSHIBA_FPCIB0 is not set
@@ -54,12 +60,14 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
 CONFIG_CEVT_TXX9=y
 CONFIG_GPIO_TXX9=y
 CONFIG_DMA_NONCOHERENT=y
 CONFIG_DMA_NEED_PCI_MAP_STATE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
 # CONFIG_HOTPLUG_CPU is not set
 # CONFIG_NO_IOPORT is not set
 CONFIG_GENERIC_GPIO=y
@@ -87,6 +95,7 @@ CONFIG_CPU_TX39XX=y
 # CONFIG_CPU_TX49XX is not set
 # CONFIG_CPU_R5000 is not set
 # CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
 # CONFIG_CPU_R6000 is not set
 # CONFIG_CPU_NEVADA is not set
 # CONFIG_CPU_R8000 is not set
@@ -94,6 +103,7 @@ CONFIG_CPU_TX39XX=y
 # CONFIG_CPU_RM7000 is not set
 # CONFIG_CPU_RM9000 is not set
 # CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
 CONFIG_SYS_HAS_CPU_TX39XX=y
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
@@ -117,14 +127,12 @@ CONFIG_ARCH_FLATMEM_ENABLE=y
 CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
 CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_VIRT_TO_BUS=y
-# CONFIG_TICK_ONESHOT is not set
+CONFIG_UNEVICTABLE_LRU=y
 # CONFIG_NO_HZ is not set
 # CONFIG_HIGH_RES_TIMERS is not set
 CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
@@ -159,6 +167,15 @@ CONFIG_SYSVIPC_SYSCTL=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
 # CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=14
 # CONFIG_CGROUPS is not set
@@ -171,7 +188,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 CONFIG_EMBEDDED=y
 CONFIG_SYSCTL_SYSCALL=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 # CONFIG_HOTPLUG is not set
@@ -188,26 +204,23 @@ CONFIG_SIGNALFD=y
 CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
 # CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-# CONFIG_HAVE_KRETPROBES is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-CONFIG_PROC_PAGE_MONITOR=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 # CONFIG_MODULES is not set
 CONFIG_BLOCK=y
 # CONFIG_LBD is not set
 # CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -221,7 +234,7 @@ CONFIG_IOSCHED_CFQ=y
 CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+# CONFIG_FREEZER is not set
 
 #
 # Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -231,12 +244,15 @@ CONFIG_PCI=y
 CONFIG_PCI_DOMAINS=y
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_STUB is not set
 CONFIG_MMU=y
 
 #
 # Executable file formats
 #
 CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 CONFIG_TRAD_SIGNALS=y
 
@@ -245,15 +261,12 @@ CONFIG_TRAD_SIGNALS=y
 #
 CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_PM is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
 # Networking options
 #
+CONFIG_COMPAT_NET_DEV_OPS=y
 CONFIG_PACKET=y
 # CONFIG_PACKET_MMAP is not set
 CONFIG_UNIX=y
@@ -293,6 +306,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_IPX is not set
 # CONFIG_ATALK is not set
 # CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
 
 #
 # Network testing
@@ -302,14 +316,9 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_CAN is not set
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
-
-#
-# Wireless
-#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_PHONET is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
 # CONFIG_RFKILL is not set
 
 #
@@ -323,7 +332,89 @@ CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 # CONFIG_SYS_HYPERVISOR is not set
 # CONFIG_CONNECTOR is not set
-# CONFIG_MTD is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+# CONFIG_MTD_BLKDEVS is not set
+# CONFIG_MTD_BLOCK is not set
+# CONFIG_MTD_BLOCK_RO is not set
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
 # CONFIG_PARPORT is not set
 CONFIG_BLK_DEV=y
 # CONFIG_BLK_CPQ_DA is not set
@@ -336,6 +427,7 @@ CONFIG_BLK_DEV=y
 # CONFIG_BLK_DEV_RAM is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 # CONFIG_MISC_DEVICES is not set
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
@@ -361,7 +453,6 @@ CONFIG_HAVE_IDE=y
 # CONFIG_IEEE1394 is not set
 # CONFIG_I2O is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_EQUALIZER is not set
@@ -383,6 +474,9 @@ CONFIG_PHYLIB=y
 # CONFIG_BROADCOM_PHY is not set
 # CONFIG_ICPLUS_PHY is not set
 # CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
 # CONFIG_FIXED_PHY is not set
 # CONFIG_MDIO_BITBANG is not set
 CONFIG_NET_ETHERNET=y
@@ -392,6 +486,7 @@ CONFIG_NET_ETHERNET=y
 # CONFIG_SUNGEM is not set
 # CONFIG_CASSINI is not set
 # CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_SMC91X is not set
 # CONFIG_DM9000 is not set
 # CONFIG_NET_TULIP is not set
 # CONFIG_HP100 is not set
@@ -399,6 +494,9 @@ CONFIG_NET_ETHERNET=y
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NET_PCI=y
 # CONFIG_PCNET32 is not set
 # CONFIG_AMD8111_ETH is not set
@@ -406,7 +504,6 @@ CONFIG_NET_PCI=y
 # CONFIG_B44 is not set
 # CONFIG_FORCEDETH is not set
 CONFIG_TC35815=y
-# CONFIG_EEPRO100 is not set
 # CONFIG_E100 is not set
 # CONFIG_FEALNX is not set
 # CONFIG_NATSEMI is not set
@@ -415,9 +512,11 @@ CONFIG_TC35815=y
 # CONFIG_R6040 is not set
 # CONFIG_SIS900 is not set
 # CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
 # CONFIG_SUNDANCE is not set
 # CONFIG_TLAN is not set
 # CONFIG_VIA_RHINE is not set
+# CONFIG_ATL2 is not set
 # CONFIG_NETDEV_1000 is not set
 # CONFIG_NETDEV_10000 is not set
 # CONFIG_TR is not set
@@ -428,6 +527,10 @@ CONFIG_TC35815=y
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
 # CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
 # CONFIG_WAN is not set
 # CONFIG_FDDI is not set
 # CONFIG_PPP is not set
@@ -440,27 +543,7 @@ CONFIG_TC35815=y
 #
 # Input device support
 #
-CONFIG_INPUT=y
-# CONFIG_INPUT_FF_MEMLESS is not set
-# CONFIG_INPUT_POLLDEV is not set
-
-#
-# Userland interfaces
-#
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_EVDEV is not set
-# CONFIG_INPUT_EVBUG is not set
-
-#
-# Input Device Drivers
-#
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_INPUT_JOYSTICK is not set
-# CONFIG_INPUT_TABLET is not set
-# CONFIG_INPUT_TOUCHSCREEN is not set
-# CONFIG_INPUT_MISC is not set
+# CONFIG_INPUT is not set
 
 #
 # Hardware I/O ports
@@ -517,16 +600,22 @@ CONFIG_LEGACY_PTY_COUNT=256
 CONFIG_DEVPORT=y
 # CONFIG_I2C is not set
 # CONFIG_SPI is not set
-CONFIG_HAVE_GPIO_LIB=y
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
 
 #
-# GPIO Support
+# Memory mapped GPIO expanders:
 #
 
 #
 # I2C GPIO expanders:
 #
 
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_BT8XX is not set
+
 #
 # SPI GPIO expanders:
 #
@@ -542,6 +631,7 @@ CONFIG_WATCHDOG=y
 # Watchdog Device Drivers
 #
 # CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
 CONFIG_TXX9_WDT=y
 
 #
@@ -549,18 +639,21 @@ CONFIG_TXX9_WDT=y
 #
 # CONFIG_PCIPCWATCHDOG is not set
 # CONFIG_WDTPCI is not set
+CONFIG_SSB_POSSIBLE=y
 
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB_POSSIBLE=y
 # CONFIG_SSB is not set
 
 #
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
 
 #
 # Multimedia devices
@@ -591,16 +684,26 @@ CONFIG_SSB_POSSIBLE=y
 # Display device support
 #
 # CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Sound
-#
 # CONFIG_SOUND is not set
-# CONFIG_HID_SUPPORT is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_MMC is not set
 # CONFIG_MEMSTICK is not set
-# CONFIG_NEW_LEDS is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+CONFIG_LEDS_GPIO=y
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
 # CONFIG_ACCESSIBILITY is not set
 # CONFIG_INFINIBAND is not set
 CONFIG_RTC_LIB=y
@@ -626,27 +729,34 @@ CONFIG_RTC_INTF_DEV=y
 # Platform RTC drivers
 #
 # CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
 # CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
 CONFIG_RTC_DRV_DS1742=y
 # CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
 # on-CPU RTC drivers
 #
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
 #
 # CONFIG_EXT2_FS is not set
 # CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
 # CONFIG_OCFS2_FS is not set
 CONFIG_DNOTIFY=y
@@ -676,28 +786,17 @@ CONFIG_INOTIFY_USER=y
 CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 # CONFIG_TMPFS is not set
 # CONFIG_HUGETLB_PAGE is not set
 # CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
+# CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=y
 # CONFIG_NFS_V3 is not set
-# CONFIG_NFSD is not set
 CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
 CONFIG_LOCKD=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=y
@@ -726,7 +825,16 @@ CONFIG_FRAME_WARN=1024
 # CONFIG_DEBUG_FS is not set
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+
+#
+# Tracers
+#
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
 CONFIG_CMDLINE=""
 
 #
@@ -734,15 +842,18 @@ CONFIG_CMDLINE=""
 #
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
 # CONFIG_CRYPTO is not set
 
 #
 # Library routines
 #
 CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
 # CONFIG_CRC_CCITT is not set
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
index 83d5c58662c88ba0eb7f4962840aaba743dcf9eb..1efe977497dd5c9d0b83235154e772c92baefd74 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.26-rc9
-# Fri Jul 11 23:03:21 2008
+# Linux kernel version: 2.6.29-rc7
+# Wed Mar  4 23:08:06 2009
 #
 CONFIG_MIPS=y
 
@@ -18,8 +18,10 @@ CONFIG_MIPS=y
 # CONFIG_LEMOTE_FULONG is not set
 # CONFIG_MIPS_MALTA is not set
 # CONFIG_MIPS_SIM is not set
-# CONFIG_MARKEINS is not set
+# CONFIG_MACH_EMMA is not set
 # CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
 # CONFIG_PNX8550_JBS is not set
 # CONFIG_PNX8550_STB810 is not set
 # CONFIG_PMC_MSP is not set
@@ -39,20 +41,28 @@ CONFIG_MIPS=y
 # CONFIG_SNI_RM is not set
 # CONFIG_MACH_TX39XX is not set
 CONFIG_MACH_TX49XX=y
+# CONFIG_MIKROTIK_RB532 is not set
 # CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+CONFIG_MACH_TXX9=y
 CONFIG_TOSHIBA_RBTX4927=y
 CONFIG_TOSHIBA_RBTX4938=y
+CONFIG_TOSHIBA_RBTX4939=y
 CONFIG_SOC_TX4927=y
 CONFIG_SOC_TX4938=y
+CONFIG_SOC_TX4939=y
+CONFIG_TXX9_7SEGLED=y
 # CONFIG_TOSHIBA_FPCIB0 is not set
 CONFIG_PICMG_PCI_BACKPLANE_DEFAULT=y
 
 #
 # Multiplex Pin Select
 #
-CONFIG_TOSHIBA_RBTX4938_MPLEX_PIO58_61=y
+# CONFIG_TOSHIBA_RBTX4938_MPLEX_PIO58_61 is not set
 # CONFIG_TOSHIBA_RBTX4938_MPLEX_NAND is not set
 # CONFIG_TOSHIBA_RBTX4938_MPLEX_ATA is not set
+CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP=y
 CONFIG_PCI_TX4927=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
 # CONFIG_ARCH_HAS_ILOG2_U32 is not set
@@ -64,14 +74,18 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
 CONFIG_GENERIC_CLOCKEVENTS=y
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CMOS_UPDATE=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
 CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
 CONFIG_CEVT_R4K=y
 CONFIG_CEVT_TXX9=y
+CONFIG_CSRC_R4K_LIB=y
 CONFIG_CSRC_R4K=y
 CONFIG_GPIO_TXX9=y
 CONFIG_DMA_NONCOHERENT=y
 CONFIG_DMA_NEED_PCI_MAP_STATE=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
 # CONFIG_HOTPLUG_CPU is not set
 # CONFIG_NO_IOPORT is not set
 CONFIG_GENERIC_GPIO=y
@@ -100,6 +114,7 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
 CONFIG_CPU_TX49XX=y
 # CONFIG_CPU_R5000 is not set
 # CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
 # CONFIG_CPU_R6000 is not set
 # CONFIG_CPU_NEVADA is not set
 # CONFIG_CPU_R8000 is not set
@@ -107,6 +122,7 @@ CONFIG_CPU_TX49XX=y
 # CONFIG_CPU_RM7000 is not set
 # CONFIG_CPU_RM9000 is not set
 # CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
 CONFIG_SYS_HAS_CPU_TX49XX=y
 CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
 CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
@@ -134,13 +150,12 @@ CONFIG_ARCH_FLATMEM_ENABLE=y
 CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
 CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
 CONFIG_TICK_ONESHOT=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -176,6 +191,15 @@ CONFIG_SYSVIPC_SYSCTL=y
 # CONFIG_BSD_PROCESS_ACCT is not set
 # CONFIG_TASKSTATS is not set
 # CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_CLASSIC_RCU=y
+# CONFIG_TREE_RCU is not set
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
@@ -190,7 +214,6 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_SYSCTL=y
 CONFIG_EMBEDDED=y
 CONFIG_SYSCTL_SYSCALL=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
 # CONFIG_HOTPLUG is not set
@@ -207,30 +230,26 @@ CONFIG_SIGNALFD=y
 CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
 CONFIG_SLAB=y
 # CONFIG_SLUB is not set
 # CONFIG_SLOB is not set
 # CONFIG_PROFILING is not set
-# CONFIG_MARKERS is not set
 CONFIG_HAVE_OPROFILE=y
-# CONFIG_HAVE_KPROBES is not set
-# CONFIG_HAVE_KRETPROBES is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-CONFIG_PROC_PAGE_MONITOR=y
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
-# CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
 # CONFIG_MODULE_FORCE_LOAD is not set
-# CONFIG_MODULE_UNLOAD is not set
+CONFIG_MODULE_UNLOAD=y
 # CONFIG_MODVERSIONS is not set
 # CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
 CONFIG_BLOCK=y
 # CONFIG_LBD is not set
 # CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
 
 #
 # IO Schedulers
@@ -244,7 +263,8 @@ CONFIG_DEFAULT_AS=y
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="anticipatory"
-CONFIG_CLASSIC_RCU=y
+# CONFIG_PROBE_INITRD_HEADER is not set
+# CONFIG_FREEZER is not set
 
 #
 # Bus options (PCI, PCMCIA, EISA, ISA, TC)
@@ -254,12 +274,15 @@ CONFIG_PCI=y
 CONFIG_PCI_DOMAINS=y
 # CONFIG_ARCH_SUPPORTS_MSI is not set
 # CONFIG_PCI_LEGACY is not set
+# CONFIG_PCI_STUB is not set
 CONFIG_MMU=y
 
 #
 # Executable file formats
 #
 CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_HAVE_AOUT is not set
 # CONFIG_BINFMT_MISC is not set
 CONFIG_TRAD_SIGNALS=y
 
@@ -268,15 +291,12 @@ CONFIG_TRAD_SIGNALS=y
 #
 CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_PM is not set
-
-#
-# Networking
-#
 CONFIG_NET=y
 
 #
 # Networking options
 #
+CONFIG_COMPAT_NET_DEV_OPS=y
 CONFIG_PACKET=y
 # CONFIG_PACKET_MMAP is not set
 CONFIG_UNIX=y
@@ -318,6 +338,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_IPX is not set
 # CONFIG_ATALK is not set
 # CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
 
 #
 # Network testing
@@ -327,14 +348,9 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_CAN is not set
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
-
-#
-# Wireless
-#
-# CONFIG_CFG80211 is not set
-# CONFIG_WIRELESS_EXT is not set
-# CONFIG_MAC80211 is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_PHONET is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_WIMAX is not set
 # CONFIG_RFKILL is not set
 
 #
@@ -348,7 +364,90 @@ CONFIG_STANDALONE=y
 CONFIG_PREVENT_FIRMWARE_BUILD=y
 # CONFIG_SYS_HYPERVISOR is not set
 # CONFIG_CONNECTOR is not set
-# CONFIG_MTD is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_TESTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+# CONFIG_MTD_BLKDEVS is not set
+# CONFIG_MTD_BLOCK is not set
+# CONFIG_MTD_BLOCK_RO is not set
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
 # CONFIG_PARPORT is not set
 CONFIG_BLK_DEV=y
 # CONFIG_BLK_CPQ_DA is not set
@@ -365,9 +464,60 @@ CONFIG_BLK_DEV_RAM_SIZE=8192
 # CONFIG_BLK_DEV_XIP is not set
 # CONFIG_CDROM_PKTCDVD is not set
 # CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_HD is not set
 # CONFIG_MISC_DEVICES is not set
 CONFIG_HAVE_IDE=y
-# CONFIG_IDE is not set
+CONFIG_IDE=y
+
+#
+# Please see Documentation/ide/ide.txt for help/info on IDE drives
+#
+CONFIG_IDE_TIMINGS=y
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_IDE_GD=y
+CONFIG_IDE_GD_ATA=y
+# CONFIG_IDE_GD_ATAPI is not set
+# CONFIG_BLK_DEV_IDECD is not set
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+CONFIG_IDE_PROC_FS=y
+
+#
+# IDE chipset support/bugfixes
+#
+# CONFIG_IDE_GENERIC is not set
+# CONFIG_BLK_DEV_PLATFORM is not set
+CONFIG_BLK_DEV_IDEDMA_SFF=y
+
+#
+# PCI IDE chipsets support
+#
+# CONFIG_BLK_DEV_GENERIC is not set
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+# CONFIG_BLK_DEV_PIIX is not set
+# CONFIG_BLK_DEV_IT8172 is not set
+# CONFIG_BLK_DEV_IT8213 is not set
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+# CONFIG_BLK_DEV_TC86C001 is not set
+CONFIG_BLK_DEV_IDE_TX4938=y
+CONFIG_BLK_DEV_IDE_TX4939=y
+CONFIG_BLK_DEV_IDEDMA=y
 
 #
 # SCSI device support
@@ -390,7 +540,6 @@ CONFIG_HAVE_IDE=y
 # CONFIG_IEEE1394 is not set
 # CONFIG_I2O is not set
 CONFIG_NETDEVICES=y
-# CONFIG_NETDEVICES_MULTIQUEUE is not set
 # CONFIG_DUMMY is not set
 # CONFIG_BONDING is not set
 # CONFIG_EQUALIZER is not set
@@ -412,15 +561,19 @@ CONFIG_PHYLIB=y
 # CONFIG_BROADCOM_PHY is not set
 # CONFIG_ICPLUS_PHY is not set
 # CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
 # CONFIG_FIXED_PHY is not set
 # CONFIG_MDIO_BITBANG is not set
 CONFIG_NET_ETHERNET=y
-# CONFIG_MII is not set
+CONFIG_MII=y
 # CONFIG_AX88796 is not set
 # CONFIG_HAPPYMEAL is not set
 # CONFIG_SUNGEM is not set
 # CONFIG_CASSINI is not set
 # CONFIG_NET_VENDOR_3COM is not set
+CONFIG_SMC91X=y
 # CONFIG_DM9000 is not set
 # CONFIG_NET_TULIP is not set
 # CONFIG_HP100 is not set
@@ -429,6 +582,9 @@ CONFIG_NE2000=y
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NET_PCI=y
 # CONFIG_PCNET32 is not set
 # CONFIG_AMD8111_ETH is not set
@@ -436,7 +592,6 @@ CONFIG_NET_PCI=y
 # CONFIG_B44 is not set
 # CONFIG_FORCEDETH is not set
 CONFIG_TC35815=y
-# CONFIG_EEPRO100 is not set
 # CONFIG_E100 is not set
 # CONFIG_FEALNX is not set
 # CONFIG_NATSEMI is not set
@@ -445,9 +600,11 @@ CONFIG_TC35815=y
 # CONFIG_R6040 is not set
 # CONFIG_SIS900 is not set
 # CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
 # CONFIG_SUNDANCE is not set
 # CONFIG_TLAN is not set
 # CONFIG_VIA_RHINE is not set
+# CONFIG_ATL2 is not set
 # CONFIG_NETDEV_1000 is not set
 # CONFIG_NETDEV_10000 is not set
 # CONFIG_TR is not set
@@ -458,6 +615,10 @@ CONFIG_TC35815=y
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
 # CONFIG_IWLWIFI_LEDS is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
 # CONFIG_WAN is not set
 # CONFIG_FDDI is not set
 # CONFIG_PPP is not set
@@ -502,6 +663,7 @@ CONFIG_SERIAL_TXX9_CONSOLE=y
 CONFIG_SERIAL_TXX9_STDSERIAL=y
 # CONFIG_SERIAL_JSM is not set
 CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
 # CONFIG_IPMI_HANDLER is not set
@@ -517,26 +679,34 @@ CONFIG_SPI_MASTER=y
 #
 # SPI Master Controller Drivers
 #
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_GPIO is not set
 CONFIG_SPI_TXX9=y
 
 #
 # SPI Protocol Masters
 #
-CONFIG_EEPROM_AT25=y
 # CONFIG_SPI_TLE62X0 is not set
-CONFIG_HAVE_GPIO_LIB=y
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
 
 #
-# GPIO Support
+# Memory mapped GPIO expanders:
 #
 
 #
 # I2C GPIO expanders:
 #
 
+#
+# PCI GPIO expanders:
+#
+# CONFIG_GPIO_BT8XX is not set
+
 #
 # SPI GPIO expanders:
 #
+# CONFIG_GPIO_MAX7301 is not set
 # CONFIG_GPIO_MCP23S08 is not set
 # CONFIG_W1 is not set
 # CONFIG_POWER_SUPPLY is not set
@@ -550,6 +720,7 @@ CONFIG_WATCHDOG=y
 # Watchdog Device Drivers
 #
 # CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_ALIM7101_WDT is not set
 CONFIG_TXX9_WDT=m
 
 #
@@ -557,18 +728,21 @@ CONFIG_TXX9_WDT=m
 #
 # CONFIG_PCIPCWATCHDOG is not set
 # CONFIG_WDTPCI is not set
+CONFIG_SSB_POSSIBLE=y
 
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB_POSSIBLE=y
 # CONFIG_SSB is not set
 
 #
 # Multifunction device drivers
 #
+# CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_REGULATOR is not set
 
 #
 # Multimedia devices
@@ -599,15 +773,27 @@ CONFIG_SSB_POSSIBLE=y
 # Display device support
 #
 # CONFIG_DISPLAY_SUPPORT is not set
-
-#
-# Sound
-#
 # CONFIG_SOUND is not set
 # CONFIG_USB_SUPPORT is not set
 # CONFIG_MMC is not set
 # CONFIG_MEMSTICK is not set
-# CONFIG_NEW_LEDS is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+CONFIG_LEDS_GPIO=y
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
 # CONFIG_ACCESSIBILITY is not set
 # CONFIG_INFINIBAND is not set
 CONFIG_RTC_LIB=y
@@ -628,35 +814,47 @@ CONFIG_RTC_INTF_DEV_UIE_EMUL=y
 #
 # SPI RTC drivers
 #
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
 # CONFIG_RTC_DRV_MAX6902 is not set
 # CONFIG_RTC_DRV_R9701 is not set
 CONFIG_RTC_DRV_RS5C348=y
+# CONFIG_RTC_DRV_DS3234 is not set
 
 #
 # Platform RTC drivers
 #
 # CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
 # CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
 CONFIG_RTC_DRV_DS1742=y
 # CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
 # on-CPU RTC drivers
 #
+CONFIG_RTC_DRV_TX4939=y
+# CONFIG_DMADEVICES is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
 
 #
 # File systems
 #
 # CONFIG_EXT2_FS is not set
 # CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
 # CONFIG_OCFS2_FS is not set
 # CONFIG_DNOTIFY is not set
@@ -687,30 +885,19 @@ CONFIG_GENERIC_ACL=y
 CONFIG_PROC_FS=y
 # CONFIG_PROC_KCORE is not set
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 # CONFIG_HUGETLB_PAGE is not set
 # CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
-# CONFIG_HFSPLUS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_VXFS_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_HPFS_FS is not set
-# CONFIG_QNX4FS_FS is not set
-# CONFIG_ROMFS_FS is not set
-# CONFIG_SYSV_FS is not set
-# CONFIG_UFS_FS is not set
+# CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
-# CONFIG_NFSD is not set
 CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
 CONFIG_LOCKD=y
 CONFIG_LOCKD_V4=y
 CONFIG_NFS_COMMON=y
@@ -740,7 +927,16 @@ CONFIG_FRAME_WARN=1024
 CONFIG_DEBUG_FS=y
 # CONFIG_HEADERS_CHECK is not set
 # CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_SYSCTL_SYSCALL_CHECK=y
+
+#
+# Tracers
+#
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
 CONFIG_CMDLINE=""
 
 #
@@ -748,15 +944,18 @@ CONFIG_CMDLINE=""
 #
 # CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
 # CONFIG_CRYPTO is not set
 
 #
 # Library routines
 #
 CONFIG_BITREVERSE=y
-# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+CONFIG_GENERIC_FIND_LAST_BIT=y
 # CONFIG_CRC_CCITT is not set
 # CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
 # CONFIG_CRC_ITU_T is not set
 CONFIG_CRC32=y
 # CONFIG_CRC7 is not set
index 43baed16a1093aea91ec92b872976ba0cd605317..134e1fc8f4d6d7a5eb5ae3b2d361284b23ab5e95 100644 (file)
@@ -138,7 +138,8 @@ do {                                                                        \
                __instruction_hazard();                                 \
 } while (0)
 
-#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_CAVIUM_OCTEON)
+#elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
+      defined(CONFIG_CPU_R5500)
 
 /*
  * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
index abc62aa744ace852888367ffce294fea809095f0..3214ade02d105988937576c8425f444b08783b0e 100644 (file)
@@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
  */
 #define IRQ_AFFINITY_HOOK(irq)                                         \
 do {                                                                   \
-    if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) {      \
+    if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
        smtc_forward_irq(irq);                                          \
        irq_exit();                                                     \
        return;                                                         \
index 17850834ccb0960ea8fe46c910c48e64cb61171a..a56594f360ee247e4bf560b1274984622dfc5857 100644 (file)
@@ -26,7 +26,7 @@
  * Pref_WriteBackInvalidate is a nop and Pref_PrepareForStore is broken in
  * current versions due to erratum G105.
  *
- * VR7701 only implements the Load prefetch.
+ * VR5500 (including VR5701 and VR7701) only implement load prefetch.
  *
  * Finally MIPS32 and MIPS64 implement all of the following hints.
  */
index 9ce0607d7a4e9391ba9d4c592f312b1e999f8b31..9e89cf99d4e432e29a0f18aef60c3fd7a1d0b764 100644 (file)
@@ -9,6 +9,7 @@
 #ifndef _ASM_SIGCONTEXT_H
 #define _ASM_SIGCONTEXT_H
 
+#include <linux/types.h>
 #include <asm/sgidefs.h>
 
 #if _MIPS_SIM == _MIPS_SIM_ABI32
index 88f1f7d555cb89c6fd415814cf16683c2e05eba3..99993c0d6c12bb3ea50996991c8072ed254d7155 100644 (file)
@@ -9,7 +9,7 @@
 #define _ASM_SWAB_H
 
 #include <linux/compiler.h>
-#include <asm/types.h>
+#include <linux/types.h>
 
 #define __SWAB_64_THRU_32__
 
index a7162a4484cff791612d00d028280aa0132c67e4..1bdbcad3bb7414d2a394150e02bfcbbb0eb9db27 100644 (file)
@@ -149,6 +149,7 @@ void __init check_wait(void)
        case CPU_R4650:
        case CPU_R4700:
        case CPU_R5000:
+       case CPU_R5500:
        case CPU_NEVADA:
        case CPU_4KC:
        case CPU_4KEC:
index 494a49a317e9b78fb137714423a64ef0661a411f..87deb8f6c45885e5c356df3ac4ddf1e58a6ac81c 100644 (file)
@@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
                set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
 
        }
-       irq_desc[irq].affinity = *cpumask;
+       cpumask_copy(irq_desc[irq].affinity, cpumask);
        spin_unlock_irqrestore(&gic_lock, flags);
 
 }
index 2f8452b404c71be4a22bdf508dcedb54320d45c4..1a86f84fa9470f31030c53fdc9ad45c792d08425 100644 (file)
@@ -235,7 +235,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
 #else
 
 SYSCALL_DEFINE6(32_ipc, u32, call, int, first, int, second, int, third,
-       u32, ptr, u32 fifth)
+       u32, ptr, u32, fifth)
 {
        return -ENOSYS;
 }
index b6cca01ff82b309e803105c1230957e943c87df8..5f5af7d4c890791929291b35c2f0371484d34657 100644 (file)
@@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
         * and efficiency, we just pick the easiest one to find.
         */
 
-       target = first_cpu(irq_desc[irq].affinity);
+       target = cpumask_first(irq_desc[irq].affinity);
 
        /*
         * We depend on the platform code to have correctly processed
@@ -921,11 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
        struct clock_event_device *cd;
        void *arg_copy = pipi->arg;
        int type_copy = pipi->type;
+       int irq = MIPS_CPU_IRQ_BASE + 1;
+
        smtc_ipi_nq(&freeIPIq, pipi);
        switch (type_copy) {
        case SMTC_CLOCK_TICK:
                irq_enter();
-               kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
+               kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
                cd = &per_cpu(mips_clockevent_device, cpu);
                cd->event_handler(cd);
                irq_exit();
index 1417c6494858cb383d3c3a254f28ab2f619485ad..48060c635acdc91458fe2528c599c395ae410b6b 100644 (file)
@@ -172,8 +172,9 @@ static void __cpuinit set_prefetch_parameters(void)
                 */
                cache_line_size = cpu_dcache_line_size();
                switch (current_cpu_type()) {
+               case CPU_R5500:
                case CPU_TX49XX:
-                       /* TX49 supports only Pref_Load */
+                       /* These processors only support the Pref_Load. */
                        pref_bias_copy_load = 256;
                        break;
 
index 42942038d0fd4daa4a3a7a4be7fcabe43d014fa0..f335cf6cdd78ed1c60da268fc63d5979fb9800a8 100644 (file)
@@ -318,6 +318,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case CPU_BCM4710:
        case CPU_LOONGSON2:
        case CPU_CAVIUM_OCTEON:
+       case CPU_R5500:
                if (m4kc_tlbp_war())
                        uasm_i_nop(p);
                tlbw(p);
index aabd7274507b875e2726c246240d9e465a27bc9b..5ba31888fefbbecacd123e7033ef8ddcb6c30944 100644 (file)
@@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
 
 void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
 {
-       cpumask_t tmask = *affinity;
+       cpumask_t tmask;
        int cpu = 0;
        void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
 
@@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
         * be made to forward to an offline "CPU".
         */
 
+       cpumask_copy(&tmask, affinity);
        for_each_cpu(cpu, affinity) {
                if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
                        cpu_clear(cpu, tmask);
        }
-       irq_desc[irq].affinity = tmask;
+       cpumask_copy(irq_desc[irq].affinity, &tmask);
 
        if (cpus_empty(tmask))
                /*
index f8b18af141a19c5e82ee6c4c543f533dec0bea9a..0ecd5fe9486ee183e750c014f6d2cfb3283bff9a 100644 (file)
@@ -155,7 +155,7 @@ static void indy_buserror_irq(void)
        int irq = SGI_BUSERR_IRQ;
 
        irq_enter();
-       kstat_this_cpu.irqs[irq]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        ip22_be_interrupt(irq);
        irq_exit();
 }
index 3dcb27ec0c530b7aa4238bc4eb2cd568cd9fc794..c8f7d2328b2425daf81a06cd07670be594e7affa 100644 (file)
@@ -122,7 +122,7 @@ void indy_8254timer_irq(void)
        char c;
 
        irq_enter();
-       kstat_this_cpu.irqs[irq]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        printk(KERN_ALERT "Oops, got 8254 interrupt.\n");
        ArcRead(0, &c, 1, &cnt);
        ArcEnterInteractiveMode();
index dddfda8e829461e8297ab44cfad6501e34641ca3..314691648c97b5fe4ff0122ed1f46f3239809d29 100644 (file)
@@ -178,9 +178,10 @@ struct plat_smp_ops bcm1480_smp_ops = {
 void bcm1480_mailbox_interrupt(void)
 {
        int cpu = smp_processor_id();
+       int irq = K_BCM1480_INT_MBOX_0_0;
        unsigned int action;
 
-       kstat_this_cpu.irqs[K_BCM1480_INT_MBOX_0_0]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        /* Load the mailbox register to figure out what we're supposed to do */
        action = (__raw_readq(mailbox_0_regs[cpu]) >> 48) & 0xffff;
 
index 5950a288a7daab17939664417fadacd68ca28d7b..cad14003b84f2efc4a0fb746df799597af7e6f90 100644 (file)
@@ -166,9 +166,10 @@ struct plat_smp_ops sb_smp_ops = {
 void sb1250_mailbox_interrupt(void)
 {
        int cpu = smp_processor_id();
+       int irq = K_INT_MBOX_0;
        unsigned int action;
 
-       kstat_this_cpu.irqs[K_INT_MBOX_0]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        /* Load the mailbox register to figure out what we're supposed to do */
        action = (____raw_readq(mailbox_regs[cpu]) >> 48) & 0xffff;
 
index 10811e981d20fd397792151bc3c7693471368da4..2e370d88a87a22d7302f6dd386bf85a7dbe2ae97 100644 (file)
@@ -130,6 +130,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
         * the stack NMI-atomically, it's safe to use smp_processor_id().
         */
        int sum, cpu = smp_processor_id();
+       int irq = NMIIRQ;
        u8 wdt, tmp;
 
        wdt = WDCTR & ~WDCTR_WDCNE;
@@ -138,7 +139,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
        NMICR = NMICR_WDIF;
 
        nmi_count(cpu)++;
-       kstat_this_cpu.irqs[NMIIRQ]++;
+       kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
        sum = irq_stat[cpu].__irq_count;
 
        if (last_irq_sums[cpu] == sum) {
index c584b00c6074af9419ac31badf1e432f38262815..430f1aeea0b896334a2ec6f1affbc7e44694d01f 100644 (file)
 #define NUM_PDC_RESULT 32
 
 #if !defined(__ASSEMBLY__)
-#ifdef __KERNEL__
 
 #include <linux/types.h>
 
+#ifdef __KERNEL__
+
 extern int pdc_type;
 
 /* Values for pdc_type */
index 3ff16c5a335856572bb7c48263a395fff7c9438d..e78403b129ef927361032c8c92c5e3c82e830941 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _PARISC_SWAB_H
 #define _PARISC_SWAB_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/compiler.h>
 
 #define __SWAB_64_THRU_32__
index ac2c822928c75e3ade5d9e6d278c08804703c3d8..49482806863fa522d71e6e37d90a5c470e1e4023 100644 (file)
@@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
        if (CHECK_IRQ_PER_CPU(irq)) {
                /* Bad linux design decision.  The mask has already
                 * been set; we must reset it */
-               irq_desc[irq].affinity = CPU_MASK_ALL;
+               cpumask_setall(irq_desc[irq].affinity);
                return -EINVAL;
        }
 
@@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
        if (cpu_check_affinity(irq, dest))
                return;
 
-       irq_desc[irq].affinity = *dest;
+       cpumask_copy(irq_desc[irq].affinity, dest);
 }
 #endif
 
@@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
 unsigned long txn_affinity_addr(unsigned int irq, int cpu)
 {
 #ifdef CONFIG_SMP
-       irq_desc[irq].affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
 #endif
 
        return per_cpu(cpu_data, cpu).txn_addr;
@@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
        irq = eirr_to_irq(eirr_val);
 
 #ifdef CONFIG_SMP
-       dest = irq_desc[irq].affinity;
+       cpumask_copy(&dest, irq_desc[irq].affinity);
        if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
            !cpu_isset(smp_processor_id(), dest)) {
                int cpu = first_cpu(dest);
index 57b82e3f89cea36a1b332db5974e2efa047969c6..60a3c9ef3017818897ac689d43c3db2b35e7eac9 100644 (file)
@@ -9,7 +9,7 @@
 #ifndef __ASM_BOOTX_H__
 #define __ASM_BOOTX_H__
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 #ifdef macintosh
 #include <Types.h>
index cd46f023ec6d298dd4571249881ed54a12649eb0..b5600ce6055ea7d324fdff62686d385d6160a272 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/string.h>
 #endif
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <asm/ptrace.h>
 #include <asm/cputable.h>
 #include <asm/auxvec.h>
index f993e4198d5ca0d3554fc68ee4c64c92c25243ca..4e0cf65f7f5a567d7644c464f6e5f3d0cc78fd25 100644 (file)
@@ -20,7 +20,7 @@
 #ifndef __LINUX_KVM_POWERPC_H
 #define __LINUX_KVM_POWERPC_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 struct kvm_regs {
        __u64 pc;
index 3f121fe4010d02144b30a9eefe6840e4f0cbbc78..e7233a849680ce4595f8b9afc345593b9431d6ce 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef _ASM_POWERPC_PS3FB_H_
 #define _ASM_POWERPC_PS3FB_H_
 
+#include <linux/types.h>
 #include <linux/ioctl.h>
 
 /* ioctl */
index 3545efbf9891094c79b5c491b01cbeb55c833990..1286c823f0d881f58137cf98eb3b9b3cde2d606e 100644 (file)
 #ifndef _SPU_INFO_H
 #define _SPU_INFO_H
 
+#include <linux/types.h>
+
 #ifdef __KERNEL__
 #include <asm/spu.h>
-#include <linux/types.h>
 #else
 struct mfc_cq_sr {
        __u64 mfc_cq_data0_RW;
index ef824ae4b79c47565cb0227c4df668e933baa30f..c581e3ef73ed0ab38b70f3da7f2e58f9d67b5898 100644 (file)
@@ -8,7 +8,7 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/compiler.h>
 
 #ifdef __GNUC__
index 23b8b5e36f98b9afb5cf970e20b4a77ab5785627..ad1e5ac721d86f557bac20079b407c8929459273 100644 (file)
@@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
                if (irq_desc[irq].status & IRQ_PER_CPU)
                        continue;
 
-               cpus_and(mask, irq_desc[irq].affinity, map);
+               cpumask_and(&mask, irq_desc[irq].affinity, &map);
                if (any_online_cpu(mask) == NR_CPUS) {
                        printk("Breaking affinity for irq %i\n", irq);
                        mask = map;
index 161b9b9691f0b3591c25d546202205ec79ab4a4a..67f07f453385f5ef99357052c6780537a945b495 100644 (file)
@@ -181,13 +181,7 @@ SECTIONS
                __initramfs_end = .;
        }
 #endif
-       . = ALIGN(PAGE_SIZE);
-       .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
-               __per_cpu_start = .;
-               *(.data.percpu)
-               *(.data.percpu.shared_aligned)
-               __per_cpu_end = .;
-       }
+       PERCPU(PAGE_SIZE)
 
        . = ALIGN(8);
        .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
index 84e058f1e1cc805516157d9a2c503168149e2084..80b513449f4c44d3c98915efe233c01caa382f4d 100644 (file)
@@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
 {
        int server;
        /* For the moment only implement delivery to all cpus or one cpu */
-       cpumask_t cpumask = irq_desc[virq].affinity;
+       cpumask_t cpumask;
        cpumask_t tmp = CPU_MASK_NONE;
 
+       cpumask_copy(&cpumask, irq_desc[virq].affinity);
        if (!distribute_irqs)
                return default_server;
 
@@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
                       virq, cpu);
 
                /* Reset affinity to all cpus */
-               irq_desc[virq].affinity = CPU_MASK_ALL;
+               cpumask_setall(irq_desc[virq].affinity);
                desc->chip->set_affinity(virq, cpu_all_mask);
 unlock:
                spin_unlock_irqrestore(&desc->lock, flags);
index a35297dbac28d572d061c555aacfa3103cf465d2..532e205303a29ea530ab4e3e180cfd4e459ceaa4 100644 (file)
@@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
 #ifdef CONFIG_SMP
 static int irq_choose_cpu(unsigned int virt_irq)
 {
-       cpumask_t mask = irq_desc[virt_irq].affinity;
+       cpumask_t mask;
        int cpuid;
 
+       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
index e289376198eb2be4e722b5cbf1a31d440c580631..233bd87a963744f62641c5e50e1ce2bdf49b2b14 100644 (file)
@@ -252,9 +252,10 @@ struct irq_handler_data {
 #ifdef CONFIG_SMP
 static int irq_choose_cpu(unsigned int virt_irq)
 {
-       cpumask_t mask = irq_desc[virt_irq].affinity;
+       cpumask_t mask;
        int cpuid;
 
+       cpumask_copy(&mask, irq_desc[virt_irq].affinity);
        if (cpus_equal(mask, CPU_MASK_ALL)) {
                static int irq_rover;
                static DEFINE_SPINLOCK(irq_rover_lock);
@@ -323,17 +324,25 @@ static void sun4u_set_affinity(unsigned int virt_irq,
        sun4u_irq_enable(virt_irq);
 }
 
+/* Don't do anything.  The desc->status check for IRQ_DISABLED in
+ * handler_irq() will skip the handler call and that will leave the
+ * interrupt in the sent state.  The next ->enable() call will hit the
+ * ICLR register to reset the state machine.
+ *
+ * This scheme is necessary, instead of clearing the Valid bit in the
+ * IMAP register, to handle the case of IMAP registers being shared by
+ * multiple INOs (and thus ICLR registers).  Since we use a different
+ * virtual IRQ for each shared IMAP instance, the generic code thinks
+ * there is only one user so it prematurely calls ->disable() on
+ * free_irq().
+ *
+ * We have to provide an explicit ->disable() method instead of using
+ * NULL to get the default.  The reason is that if the generic code
+ * sees that, it also hooks up a default ->shutdown method which
+ * invokes ->mask() which we do not want.  See irq_chip_set_defaults().
+ */
 static void sun4u_irq_disable(unsigned int virt_irq)
 {
-       struct irq_handler_data *data = get_irq_chip_data(virt_irq);
-
-       if (likely(data)) {
-               unsigned long imap = data->imap;
-               unsigned long tmp = upa_readq(imap);
-
-               tmp &= ~IMAP_VALID;
-               upa_writeq(tmp, imap);
-       }
 }
 
 static void sun4u_irq_eoi(unsigned int virt_irq)
@@ -746,7 +755,8 @@ void handler_irq(int irq, struct pt_regs *regs)
 
                desc = irq_desc + virt_irq;
 
-               desc->handle_irq(virt_irq, desc);
+               if (!(desc->status & IRQ_DISABLED))
+                       desc->handle_irq(virt_irq, desc);
 
                bucket_pa = next_pa;
        }
@@ -796,7 +806,7 @@ void fixup_irqs(void)
                    !(irq_desc[irq].status & IRQ_PER_CPU)) {
                        if (irq_desc[irq].chip->set_affinity)
                                irq_desc[irq].chip->set_affinity(irq,
-                                       &irq_desc[irq].affinity);
+                                       irq_desc[irq].affinity);
                }
                spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
        }
index 2db3c2229b95ca39870fef29c01677ff39dc7c2d..db310aa00183cb0b92243cc234f9fcaedc03deeb 100644 (file)
@@ -729,7 +729,7 @@ void timer_interrupt(int irq, struct pt_regs *regs)
 
        irq_enter();
 
-       kstat_this_cpu.irqs[0]++;
+       kstat_incr_irqs_this_cpu(0, irq_to_desc(0));
 
        if (unlikely(!evt->event_handler)) {
                printk(KERN_WARNING
index 15e8b7c4de1383b57a01587b1634171f2a0f26a5..8e3d69e4fcb5137bde2e2aa62d3d16587d4028b1 100644 (file)
@@ -64,6 +64,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                ret = poke_user(child, addr, data);
                break;
 
+       case PTRACE_SYSEMU:
+       case PTRACE_SYSEMU_SINGLESTEP:
+               ret = -EIO;
+               break;
+
        /* continue and stop at next (return from) syscall */
        case PTRACE_SYSCALL:
        /* restart after signal. */
index 74f49bb9b125ed8728de851780b489895f2f4ac2..89b48a116a89b0807f77929c752366eee6ecb4fc 100644 (file)
@@ -14,7 +14,6 @@
 #undef memset
 
 extern size_t strlen(const char *);
-extern void *memcpy(void *, const void *, size_t);
 extern void *memmove(void *, const void *, size_t);
 extern void *memset(void *, int, size_t);
 extern int printf(const char *, ...);
@@ -24,7 +23,11 @@ extern int printf(const char *, ...);
 EXPORT_SYMBOL(strstr);
 #endif
 
+#ifndef __x86_64__
+extern void *memcpy(void *, const void *, size_t);
 EXPORT_SYMBOL(memcpy);
+#endif
+
 EXPORT_SYMBOL(memmove);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(printf);
index bc2fbadff9f90fe681d9c30ae621d1ec94e348cf..34bc3a89228b58cb31d2aa18c70c1965bb35ace6 100644 (file)
@@ -5,7 +5,7 @@ mainmenu "Linux Kernel Configuration for x86"
 config 64BIT
        bool "64-bit kernel" if ARCH = "x86"
        default ARCH = "x86_64"
-       help
+       ---help---
          Say yes to build a 64-bit kernel - formerly known as x86_64
          Say no to build a 32-bit kernel - formerly known as i386
 
@@ -34,12 +34,15 @@ config X86
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACE_MCOUNT_TEST
-       select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
-       select HAVE_ARCH_KGDB if !X86_VOYAGER
+       select HAVE_KVM
+       select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
        select HAVE_GENERIC_DMA_COHERENT if X86_32
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
        select USER_STACKTRACE_SUPPORT
+       select HAVE_KERNEL_GZIP
+       select HAVE_KERNEL_BZIP2
+       select HAVE_KERNEL_LZMA
 
 config ARCH_DEFCONFIG
        string
@@ -133,18 +136,19 @@ config ARCH_HAS_CACHE_LINE_SIZE
        def_bool y
 
 config HAVE_SETUP_PER_CPU_AREA
-       def_bool X86_64_SMP || (X86_SMP && !X86_VOYAGER)
+       def_bool y
+
+config HAVE_DYNAMIC_PER_CPU_AREA
+       def_bool y
 
 config HAVE_CPUMASK_OF_CPU_MAP
        def_bool X86_64_SMP
 
 config ARCH_HIBERNATION_POSSIBLE
        def_bool y
-       depends on !SMP || !X86_VOYAGER
 
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
-       depends on !X86_VOYAGER
 
 config ZONE_DMA32
        bool
@@ -165,6 +169,9 @@ config GENERIC_HARDIRQS
        bool
        default y
 
+config GENERIC_HARDIRQS_NO__DO_IRQ
+       def_bool y
+
 config GENERIC_IRQ_PROBE
        bool
        default y
@@ -174,11 +181,6 @@ config GENERIC_PENDING_IRQ
        depends on GENERIC_HARDIRQS && SMP
        default y
 
-config X86_SMP
-       bool
-       depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64)
-       default y
-
 config USE_GENERIC_SMP_HELPERS
        def_bool y
        depends on SMP
@@ -194,19 +196,17 @@ config X86_64_SMP
 config X86_HT
        bool
        depends on SMP
-       depends on (X86_32 && !X86_VOYAGER) || X86_64
-       default y
-
-config X86_BIOS_REBOOT
-       bool
-       depends on !X86_VOYAGER
        default y
 
 config X86_TRAMPOLINE
        bool
-       depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP)
+       depends on SMP || (64BIT && ACPI_SLEEP)
        default y
 
+config X86_32_LAZY_GS
+       def_bool y
+       depends on X86_32 && !CC_STACKPROTECTOR
+
 config KTIME_SCALAR
        def_bool X86_32
 source "init/Kconfig"
@@ -244,14 +244,24 @@ config SMP
 
          If you don't know what to do here, say N.
 
-config X86_HAS_BOOT_CPU_ID
-       def_bool y
-       depends on X86_VOYAGER
+config X86_X2APIC
+       bool "Support x2apic"
+       depends on X86_LOCAL_APIC && X86_64
+       ---help---
+         This enables x2apic support on CPUs that have this feature.
+
+         This allows 32-bit apic IDs (so it can support very large systems),
+         and accesses the local apic via MSRs not via mmio.
+
+         ( On certain CPU models you may need to enable INTR_REMAP too,
+           to get functional x2apic mode. )
+
+         If you don't know what to do here, say N.
 
 config SPARSE_IRQ
        bool "Support sparse irq numbering"
        depends on PCI_MSI || HT_IRQ
-       help
+       ---help---
          This enables support for sparse irqs. This is useful for distro
          kernels that want to define a high CONFIG_NR_CPUS value but still
          want to have low kernel memory footprint on smaller machines.
@@ -265,114 +275,140 @@ config NUMA_MIGRATE_IRQ_DESC
        bool "Move irq desc when changing irq smp_affinity"
        depends on SPARSE_IRQ && NUMA
        default n
-       help
+       ---help---
          This enables moving irq_desc to cpu/node that irq will use handled.
 
          If you don't know what to do here, say N.
 
-config X86_FIND_SMP_CONFIG
-       def_bool y
-       depends on X86_MPPARSE || X86_VOYAGER
-
 config X86_MPPARSE
        bool "Enable MPS table" if ACPI
        default y
        depends on X86_LOCAL_APIC
-       help
+       ---help---
          For old smp systems that do not have proper acpi support. Newer systems
          (esp with 64bit cpus) with acpi support, MADT and DSDT will override it
 
-choice
-       prompt "Subarchitecture Type"
-       default X86_PC
+config X86_BIGSMP
+       bool "Support for big SMP systems with more than 8 CPUs"
+       depends on X86_32 && SMP
+       ---help---
+         This option is needed for the systems that have more than 8 CPUs
 
-config X86_PC
-       bool "PC-compatible"
-       help
-         Choose this option if your computer is a standard PC or compatible.
+if X86_32
+config X86_EXTENDED_PLATFORM
+       bool "Support for extended (non-PC) x86 platforms"
+       default y
+       ---help---
+         If you disable this option then the kernel will only support
+         standard PC platforms. (which covers the vast majority of
+         systems out there.)
+
+         If you enable this option then you'll be able to select support
+         for the following (non-PC) 32 bit x86 platforms:
+               AMD Elan
+               NUMAQ (IBM/Sequent)
+               RDC R-321x SoC
+               SGI 320/540 (Visual Workstation)
+               Summit/EXA (IBM x440)
+               Unisys ES7000 IA32 series
+
+         If you have one of these systems, or if you want to build a
+         generic distribution kernel, say Y here - otherwise say N.
+endif
+
+if X86_64
+config X86_EXTENDED_PLATFORM
+       bool "Support for extended (non-PC) x86 platforms"
+       default y
+       ---help---
+         If you disable this option then the kernel will only support
+         standard PC platforms. (which covers the vast majority of
+         systems out there.)
+
+         If you enable this option then you'll be able to select support
+         for the following (non-PC) 64 bit x86 platforms:
+               ScaleMP vSMP
+               SGI Ultraviolet
+
+         If you have one of these systems, or if you want to build a
+         generic distribution kernel, say Y here - otherwise say N.
+endif
+# This is an alphabetically sorted list of 64 bit extended platforms
+# Please maintain the alphabetic order if and when there are additions
+
+config X86_VSMP
+       bool "ScaleMP vSMP"
+       select PARAVIRT
+       depends on X86_64 && PCI
+       depends on X86_EXTENDED_PLATFORM
+       ---help---
+         Support for ScaleMP vSMP systems.  Say 'Y' here if this kernel is
+         supposed to run on these EM64T-based machines.  Only choose this option
+         if you have one of these machines.
+
+config X86_UV
+       bool "SGI Ultraviolet"
+       depends on X86_64
+       depends on X86_EXTENDED_PLATFORM
+       select X86_X2APIC
+       ---help---
+         This option is needed in order to support SGI Ultraviolet systems.
+         If you don't have one of these, you should say N here.
+
+# Following is an alphabetically sorted list of 32 bit extended platforms
+# Please maintain the alphabetic order if and when there are additions
 
 config X86_ELAN
        bool "AMD Elan"
        depends on X86_32
-       help
+       depends on X86_EXTENDED_PLATFORM
+       ---help---
          Select this for an AMD Elan processor.
 
          Do not use this option for K6/Athlon/Opteron processors!
 
          If unsure, choose "PC-compatible" instead.
 
-config X86_VOYAGER
-       bool "Voyager (NCR)"
-       depends on X86_32 && (SMP || BROKEN) && !PCI
-       help
-         Voyager is an MCA-based 32-way capable SMP architecture proprietary
-         to NCR Corp.  Machine classes 345x/35xx/4100/51xx are Voyager-based.
-
-         *** WARNING ***
-
-         If you do not specifically know you have a Voyager based machine,
-         say N here, otherwise the kernel you build will not be bootable.
-
-config X86_GENERICARCH
-       bool "Generic architecture"
+config X86_RDC321X
+       bool "RDC R-321x SoC"
        depends on X86_32
-       help
-          This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default
+       depends on X86_EXTENDED_PLATFORM
+       select M486
+       select X86_REBOOTFIXUPS
+       ---help---
+         This option is needed for RDC R-321x system-on-chip, also known
+         as R-8610-(G).
+         If you don't have one of these chips, you should say N here.
+
+config X86_32_NON_STANDARD
+       bool "Support non-standard 32-bit SMP architectures"
+       depends on X86_32 && SMP
+       depends on X86_EXTENDED_PLATFORM
+       ---help---
+         This option compiles in the NUMAQ, Summit, bigsmp, ES7000, default
          subarchitectures.  It is intended for a generic binary kernel.
          if you select them all, kernel will probe it one by one. and will
          fallback to default.
 
-if X86_GENERICARCH
+# Alphabetically sorted list of Non standard 32 bit platforms
 
 config X86_NUMAQ
        bool "NUMAQ (IBM/Sequent)"
-       depends on SMP && X86_32 && PCI && X86_MPPARSE
+       depends on X86_32_NON_STANDARD
        select NUMA
-       help
+       select X86_MPPARSE
+       ---help---
          This option is used for getting Linux to run on a NUMAQ (IBM/Sequent)
          NUMA multiquad box. This changes the way that processors are
          bootstrapped, and uses Clustered Logical APIC addressing mode instead
          of Flat Logical.  You will need a new lynxer.elf file to flash your
          firmware with - send email to <Martin.Bligh@us.ibm.com>.
 
-config X86_SUMMIT
-       bool "Summit/EXA (IBM x440)"
-       depends on X86_32 && SMP
-       help
-         This option is needed for IBM systems that use the Summit/EXA chipset.
-         In particular, it is needed for the x440.
-
-config X86_ES7000
-       bool "Support for Unisys ES7000 IA32 series"
-       depends on X86_32 && SMP
-       help
-         Support for Unisys ES7000 systems.  Say 'Y' here if this kernel is
-         supposed to run on an IA32-based Unisys ES7000 system.
-
-config X86_BIGSMP
-       bool "Support for big SMP systems with more than 8 CPUs"
-       depends on X86_32 && SMP
-       help
-         This option is needed for the systems that have more than 8 CPUs
-         and if the system is not of any sub-arch type above.
-
-endif
-
-config X86_VSMP
-       bool "Support for ScaleMP vSMP"
-       select PARAVIRT
-       depends on X86_64 && PCI
-       help
-         Support for ScaleMP vSMP systems.  Say 'Y' here if this kernel is
-         supposed to run on these EM64T-based machines.  Only choose this option
-         if you have one of these machines.
-
-endchoice
-
 config X86_VISWS
        bool "SGI 320/540 (Visual Workstation)"
-       depends on X86_32 && PCI && !X86_VOYAGER && X86_MPPARSE && PCI_GODIRECT
-       help
+       depends on X86_32 && PCI && X86_MPPARSE && PCI_GODIRECT
+       depends on X86_32_NON_STANDARD
+       ---help---
          The SGI Visual Workstation series is an IA32-based workstation
          based on SGI systems chips with some legacy PC hardware attached.
 
@@ -381,21 +417,25 @@ config X86_VISWS
          A kernel compiled for the Visual Workstation will run on general
          PCs as well. See <file:Documentation/sgi-visws.txt> for details.
 
-config X86_RDC321X
-       bool "RDC R-321x SoC"
-       depends on X86_32
-       select M486
-       select X86_REBOOTFIXUPS
-       help
-         This option is needed for RDC R-321x system-on-chip, also known
-         as R-8610-(G).
-         If you don't have one of these chips, you should say N here.
+config X86_SUMMIT
+       bool "Summit/EXA (IBM x440)"
+       depends on X86_32_NON_STANDARD
+       ---help---
+         This option is needed for IBM systems that use the Summit/EXA chipset.
+         In particular, it is needed for the x440.
+
+config X86_ES7000
+       bool "Unisys ES7000 IA32 series"
+       depends on X86_32_NON_STANDARD && X86_BIGSMP
+       ---help---
+         Support for Unisys ES7000 systems.  Say 'Y' here if this kernel is
+         supposed to run on an IA32-based Unisys ES7000 system.
 
 config SCHED_OMIT_FRAME_POINTER
        def_bool y
        prompt "Single-depth WCHAN output"
        depends on X86
-       help
+       ---help---
          Calculate simpler /proc/<PID>/wchan values. If this option
          is disabled then wchan values will recurse back to the
          caller function. This provides more accurate wchan values,
@@ -405,7 +445,7 @@ config SCHED_OMIT_FRAME_POINTER
 
 menuconfig PARAVIRT_GUEST
        bool "Paravirtualized guest support"
-       help
+       ---help---
          Say Y here to get to see options related to running Linux under
          various hypervisors.  This option alone does not add any kernel code.
 
@@ -419,8 +459,7 @@ config VMI
        bool "VMI Guest support"
        select PARAVIRT
        depends on X86_32
-       depends on !X86_VOYAGER
-       help
+       ---help---
          VMI provides a paravirtualized interface to the VMware ESX server
          (it could be used by other hypervisors in theory too, but is not
          at the moment), by linking the kernel to a GPL-ed ROM module
@@ -430,8 +469,7 @@ config KVM_CLOCK
        bool "KVM paravirtualized clock"
        select PARAVIRT
        select PARAVIRT_CLOCK
-       depends on !X86_VOYAGER
-       help
+       ---help---
          Turning on this option will allow you to run a paravirtualized clock
          when running over the KVM hypervisor. Instead of relying on a PIT
          (or probably other) emulation by the underlying device model, the host
@@ -441,17 +479,15 @@ config KVM_CLOCK
 config KVM_GUEST
        bool "KVM Guest support"
        select PARAVIRT
-       depends on !X86_VOYAGER
-       help
-        This option enables various optimizations for running under the KVM
-        hypervisor.
+       ---help---
+         This option enables various optimizations for running under the KVM
+         hypervisor.
 
 source "arch/x86/lguest/Kconfig"
 
 config PARAVIRT
        bool "Enable paravirtualization code"
-       depends on !X86_VOYAGER
-       help
+       ---help---
          This changes the kernel so it can modify itself when it is run
          under a hypervisor, potentially improving performance significantly
          over full virtualization.  However, when run without a hypervisor
@@ -464,51 +500,51 @@ config PARAVIRT_CLOCK
 endif
 
 config PARAVIRT_DEBUG
-       bool "paravirt-ops debugging"
-       depends on PARAVIRT && DEBUG_KERNEL
-       help
-         Enable to debug paravirt_ops internals.  Specifically, BUG if
-        a paravirt_op is missing when it is called.
+       bool "paravirt-ops debugging"
+       depends on PARAVIRT && DEBUG_KERNEL
+       ---help---
+         Enable to debug paravirt_ops internals.  Specifically, BUG if
+         a paravirt_op is missing when it is called.
 
 config MEMTEST
        bool "Memtest"
-       help
+       ---help---
          This option adds a kernel parameter 'memtest', which allows memtest
          to be set.
-               memtest=0, mean disabled; -- default
-               memtest=1, mean do 1 test pattern;
-               ...
-               memtest=4, mean do 4 test patterns.
+               memtest=0, mean disabled; -- default
+               memtest=1, mean do 1 test pattern;
+               ...
+               memtest=4, mean do 4 test patterns.
          If you are unsure how to answer this question, answer N.
 
 config X86_SUMMIT_NUMA
        def_bool y
-       depends on X86_32 && NUMA && X86_GENERICARCH
+       depends on X86_32 && NUMA && X86_32_NON_STANDARD
 
 config X86_CYCLONE_TIMER
        def_bool y
-       depends on X86_GENERICARCH
+       depends on X86_32_NON_STANDARD
 
 source "arch/x86/Kconfig.cpu"
 
 config HPET_TIMER
        def_bool X86_64
        prompt "HPET Timer Support" if X86_32
-       help
-         Use the IA-PC HPET (High Precision Event Timer) to manage
-         time in preference to the PIT and RTC, if a HPET is
-         present.
-         HPET is the next generation timer replacing legacy 8254s.
-         The HPET provides a stable time base on SMP
-         systems, unlike the TSC, but it is more expensive to access,
-         as it is off-chip.  You can find the HPET spec at
-         <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
+       ---help---
+         Use the IA-PC HPET (High Precision Event Timer) to manage
+         time in preference to the PIT and RTC, if a HPET is
+         present.
+         HPET is the next generation timer replacing legacy 8254s.
+         The HPET provides a stable time base on SMP
+         systems, unlike the TSC, but it is more expensive to access,
+         as it is off-chip.  You can find the HPET spec at
+         <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
 
-         You can safely choose Y here.  However, HPET will only be
-         activated if the platform and the BIOS support this feature.
-         Otherwise the 8254 will be used for timing services.
+         You can safely choose Y here.  However, HPET will only be
+         activated if the platform and the BIOS support this feature.
+         Otherwise the 8254 will be used for timing services.
 
-         Choose N to continue using the legacy 8254 timer.
+         Choose N to continue using the legacy 8254 timer.
 
 config HPET_EMULATE_RTC
        def_bool y
@@ -519,7 +555,7 @@ config HPET_EMULATE_RTC
 config DMI
        default y
        bool "Enable DMI scanning" if EMBEDDED
-       help
+       ---help---
          Enabled scanning of DMI to identify machine quirks. Say Y
          here unless you have verified that your setup is not
          affected by entries in the DMI blacklist. Required by PNP
@@ -531,7 +567,7 @@ config GART_IOMMU
        select SWIOTLB
        select AGP
        depends on X86_64 && PCI
-       help
+       ---help---
          Support for full DMA access of devices with 32bit memory access only
          on systems with more than 3GB. This is usually needed for USB,
          sound, many IDE/SATA chipsets and some other devices.
@@ -546,7 +582,7 @@ config CALGARY_IOMMU
        bool "IBM Calgary IOMMU support"
        select SWIOTLB
        depends on X86_64 && PCI && EXPERIMENTAL
-       help
+       ---help---
          Support for hardware IOMMUs in IBM's xSeries x366 and x460
          systems. Needed to run systems with more than 3GB of memory
          properly with 32-bit PCI devices that do not support DAC
@@ -564,7 +600,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
        def_bool y
        prompt "Should Calgary be enabled by default?"
        depends on CALGARY_IOMMU
-       help
+       ---help---
          Should Calgary be enabled by default? if you choose 'y', Calgary
          will be used (if it exists). If you choose 'n', Calgary will not be
          used even if it exists. If you choose 'n' and would like to use
@@ -576,7 +612,7 @@ config AMD_IOMMU
        select SWIOTLB
        select PCI_MSI
        depends on X86_64 && PCI && ACPI
-       help
+       ---help---
          With this option you can enable support for AMD IOMMU hardware in
          your system. An IOMMU is a hardware component which provides
          remapping of DMA memory accesses from devices. With an AMD IOMMU you
@@ -591,7 +627,7 @@ config AMD_IOMMU_STATS
        bool "Export AMD IOMMU statistics to debugfs"
        depends on AMD_IOMMU
        select DEBUG_FS
-       help
+       ---help---
          This option enables code in the AMD IOMMU driver to collect various
          statistics about whats happening in the driver and exports that
          information to userspace via debugfs.
@@ -600,7 +636,7 @@ config AMD_IOMMU_STATS
 # need this always selected by IOMMU for the VIA workaround
 config SWIOTLB
        def_bool y if X86_64
-       help
+       ---help---
          Support for software bounce buffers used on x86-64 systems
          which don't have a hardware IOMMU (e.g. the current generation
          of Intel's x86-64 CPUs). Using this PCI devices which can only
@@ -618,7 +654,7 @@ config MAXSMP
        depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
        select CPUMASK_OFFSTACK
        default n
-       help
+       ---help---
          Configure maximum number of CPUS and NUMA Nodes for this architecture.
          If unsure, say N.
 
@@ -629,7 +665,7 @@ config NR_CPUS
        default "4096" if MAXSMP
        default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
        default "8" if SMP
-       help
+       ---help---
          This allows you to specify the maximum number of CPUs which this
          kernel will support.  The maximum supported value is 512 and the
          minimum value which makes sense is 2.
@@ -640,7 +676,7 @@ config NR_CPUS
 config SCHED_SMT
        bool "SMT (Hyperthreading) scheduler support"
        depends on X86_HT
-       help
+       ---help---
          SMT scheduler support improves the CPU scheduler's decision making
          when dealing with Intel Pentium 4 chips with HyperThreading at a
          cost of slightly increased overhead in some places. If unsure say
@@ -650,7 +686,7 @@ config SCHED_MC
        def_bool y
        prompt "Multi-core scheduler support"
        depends on X86_HT
-       help
+       ---help---
          Multi-core scheduler support improves the CPU scheduler's decision
          making when dealing with multi-core CPU chips at a cost of slightly
          increased overhead in some places. If unsure say N here.
@@ -659,8 +695,8 @@ source "kernel/Kconfig.preempt"
 
 config X86_UP_APIC
        bool "Local APIC support on uniprocessors"
-       depends on X86_32 && !SMP && !(X86_VOYAGER || X86_GENERICARCH)
-       help
+       depends on X86_32 && !SMP && !X86_32_NON_STANDARD
+       ---help---
          A local APIC (Advanced Programmable Interrupt Controller) is an
          integrated interrupt controller in the CPU. If you have a single-CPU
          system which has a processor with a local APIC, you can say Y here to
@@ -673,7 +709,7 @@ config X86_UP_APIC
 config X86_UP_IOAPIC
        bool "IO-APIC support on uniprocessors"
        depends on X86_UP_APIC
-       help
+       ---help---
          An IO-APIC (I/O Advanced Programmable Interrupt Controller) is an
          SMP-capable replacement for PC-style interrupt controllers. Most
          SMP systems and many recent uniprocessor systems have one.
@@ -684,11 +720,11 @@ config X86_UP_IOAPIC
 
 config X86_LOCAL_APIC
        def_bool y
-       depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
+       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
 
 config X86_IO_APIC
        def_bool y
-       depends on X86_64 || (X86_32 && (X86_UP_IOAPIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
+       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
 
 config X86_VISWS_APIC
        def_bool y
@@ -698,7 +734,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
        bool "Reroute for broken boot IRQs"
        default n
        depends on X86_IO_APIC
-       help
+       ---help---
          This option enables a workaround that fixes a source of
          spurious interrupts. This is recommended when threaded
          interrupt handling is used on systems where the generation of
@@ -720,7 +756,6 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
 
 config X86_MCE
        bool "Machine Check Exception"
-       depends on !X86_VOYAGER
        ---help---
          Machine Check Exception support allows the processor to notify the
          kernel if it detects a problem (e.g. overheating, component failure).
@@ -739,7 +774,7 @@ config X86_MCE_INTEL
        def_bool y
        prompt "Intel MCE features"
        depends on X86_64 && X86_MCE && X86_LOCAL_APIC
-       help
+       ---help---
           Additional support for intel specific MCE features such as
           the thermal monitor.
 
@@ -747,14 +782,19 @@ config X86_MCE_AMD
        def_bool y
        prompt "AMD MCE features"
        depends on X86_64 && X86_MCE && X86_LOCAL_APIC
-       help
+       ---help---
           Additional support for AMD specific MCE features such as
           the DRAM Error Threshold.
 
+config X86_MCE_THRESHOLD
+       depends on X86_MCE_AMD || X86_MCE_INTEL
+       bool
+       default y
+
 config X86_MCE_NONFATAL
        tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4"
        depends on X86_32 && X86_MCE
-       help
+       ---help---
          Enabling this feature starts a timer that triggers every 5 seconds which
          will look at the machine check registers to see if anything happened.
          Non-fatal problems automatically get corrected (but still logged).
@@ -767,7 +807,7 @@ config X86_MCE_NONFATAL
 config X86_MCE_P4THERMAL
        bool "check for P4 thermal throttling interrupt."
        depends on X86_32 && X86_MCE && (X86_UP_APIC || SMP)
-       help
+       ---help---
          Enabling this feature will cause a message to be printed when the P4
          enters thermal throttling.
 
@@ -775,11 +815,11 @@ config VM86
        bool "Enable VM86 support" if EMBEDDED
        default y
        depends on X86_32
-       help
-          This option is required by programs like DOSEMU to run 16-bit legacy
+       ---help---
+         This option is required by programs like DOSEMU to run 16-bit legacy
          code on X86 processors. It also may be needed by software like
-          XFree86 to initialize some video cards via BIOS. Disabling this
-          option saves about 6k.
+         XFree86 to initialize some video cards via BIOS. Disabling this
+         option saves about 6k.
 
 config TOSHIBA
        tristate "Toshiba Laptop support"
@@ -853,33 +893,33 @@ config MICROCODE
          module will be called microcode.
 
 config MICROCODE_INTEL
-       bool "Intel microcode patch loading support"
-       depends on MICROCODE
-       default MICROCODE
-       select FW_LOADER
-       --help---
-         This options enables microcode patch loading support for Intel
-         processors.
-
-         For latest news and information on obtaining all the required
-         Intel ingredients for this driver, check:
-         <http://www.urbanmyth.org/microcode/>.
+       bool "Intel microcode patch loading support"
+       depends on MICROCODE
+       default MICROCODE
+       select FW_LOADER
+       ---help---
+         This options enables microcode patch loading support for Intel
+         processors.
+
+         For latest news and information on obtaining all the required
+         Intel ingredients for this driver, check:
+         <http://www.urbanmyth.org/microcode/>.
 
 config MICROCODE_AMD
-       bool "AMD microcode patch loading support"
-       depends on MICROCODE
-       select FW_LOADER
-       --help---
-         If you select this option, microcode patch loading support for AMD
-        processors will be enabled.
+       bool "AMD microcode patch loading support"
+       depends on MICROCODE
+       select FW_LOADER
+       ---help---
+         If you select this option, microcode patch loading support for AMD
+         processors will be enabled.
 
-   config MICROCODE_OLD_INTERFACE
+config MICROCODE_OLD_INTERFACE
        def_bool y
        depends on MICROCODE
 
 config X86_MSR
        tristate "/dev/cpu/*/msr - Model-specific register support"
-       help
+       ---help---
          This device gives privileged processes access to the x86
          Model-Specific Registers (MSRs).  It is a character device with
          major 202 and minors 0 to 31 for /dev/cpu/0/msr to /dev/cpu/31/msr.
@@ -888,12 +928,18 @@ config X86_MSR
 
 config X86_CPUID
        tristate "/dev/cpu/*/cpuid - CPU information support"
-       help
+       ---help---
          This device gives processes access to the x86 CPUID instruction to
          be executed on a specific processor.  It is a character device
          with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
          /dev/cpu/31/cpuid.
 
+config X86_CPU_DEBUG
+       tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support"
+       ---help---
+         If you select this option, this will provide various x86 CPUs
+         information through debugfs.
+
 choice
        prompt "High Memory Support"
        default HIGHMEM4G if !X86_NUMAQ
@@ -940,7 +986,7 @@ config NOHIGHMEM
 config HIGHMEM4G
        bool "4GB"
        depends on !X86_NUMAQ
-       help
+       ---help---
          Select this if you have a 32-bit processor and between 1 and 4
          gigabytes of physical RAM.
 
@@ -948,7 +994,7 @@ config HIGHMEM64G
        bool "64GB"
        depends on !M386 && !M486
        select X86_PAE
-       help
+       ---help---
          Select this if you have a 32-bit processor and more than 4
          gigabytes of physical RAM.
 
@@ -959,7 +1005,7 @@ choice
        prompt "Memory split" if EMBEDDED
        default VMSPLIT_3G
        depends on X86_32
-       help
+       ---help---
          Select the desired split between kernel and user memory.
 
          If the address range available to the kernel is less than the
@@ -1005,20 +1051,20 @@ config HIGHMEM
 config X86_PAE
        bool "PAE (Physical Address Extension) Support"
        depends on X86_32 && !HIGHMEM4G
-       help
+       ---help---
          PAE is required for NX support, and furthermore enables
          larger swapspace support for non-overcommit purposes. It
          has the cost of more pagetable lookup overhead, and also
          consumes more pagetable space per process.
 
 config ARCH_PHYS_ADDR_T_64BIT
-       def_bool X86_64 || X86_PAE
+       def_bool X86_64 || X86_PAE
 
 config DIRECT_GBPAGES
        bool "Enable 1GB pages for kernel pagetables" if EMBEDDED
        default y
        depends on X86_64
-       help
+       ---help---
          Allow the kernel linear mapping to use 1GB pages on CPUs that
          support it. This can improve the kernel's performance a tiny bit by
          reducing TLB pressure. If in doubt, say "Y".
@@ -1028,9 +1074,8 @@ config NUMA
        bool "Numa Memory Allocation and Scheduler Support"
        depends on SMP
        depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
-       default n if X86_PC
        default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
-       help
+       ---help---
          Enable NUMA (Non Uniform Memory Access) support.
 
          The kernel will try to allocate memory used by a CPU on the
@@ -1053,19 +1098,19 @@ config K8_NUMA
        def_bool y
        prompt "Old style AMD Opteron NUMA detection"
        depends on X86_64 && NUMA && PCI
-       help
-        Enable K8 NUMA node topology detection.  You should say Y here if
-        you have a multi processor AMD K8 system. This uses an old
-        method to read the NUMA configuration directly from the builtin
-        Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
-        instead, which also takes priority if both are compiled in.
+       ---help---
+         Enable K8 NUMA node topology detection.  You should say Y here if
+         you have a multi processor AMD K8 system. This uses an old
+         method to read the NUMA configuration directly from the builtin
+         Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
+         instead, which also takes priority if both are compiled in.
 
 config X86_64_ACPI_NUMA
        def_bool y
        prompt "ACPI NUMA detection"
        depends on X86_64 && NUMA && ACPI && PCI
        select ACPI_NUMA
-       help
+       ---help---
          Enable ACPI SRAT based node topology detection.
 
 # Some NUMA nodes have memory ranges that span
@@ -1080,24 +1125,24 @@ config NODES_SPAN_OTHER_NODES
 config NUMA_EMU
        bool "NUMA emulation"
        depends on X86_64 && NUMA
-       help
+       ---help---
          Enable NUMA emulation. A flat machine will be split
          into virtual nodes when booted with "numa=fake=N", where N is the
          number of nodes. This is only useful for debugging.
 
 config NODES_SHIFT
        int "Maximum NUMA Nodes (as a power of 2)" if !MAXSMP
-       range 1 9   if X86_64
+       range 1 9
        default "9" if MAXSMP
        default "6" if X86_64
        default "4" if X86_NUMAQ
        default "3"
        depends on NEED_MULTIPLE_NODES
-       help
+       ---help---
          Specify the maximum number of NUMA Nodes available on the target
          system.  Increases memory reserved to accomodate various tables.
 
-config HAVE_ARCH_BOOTMEM_NODE
+config HAVE_ARCH_BOOTMEM
        def_bool y
        depends on X86_32 && NUMA
 
@@ -1131,7 +1176,7 @@ config ARCH_SPARSEMEM_DEFAULT
 
 config ARCH_SPARSEMEM_ENABLE
        def_bool y
-       depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC) || X86_GENERICARCH
+       depends on X86_64 || NUMA || (EXPERIMENTAL && X86_32) || X86_32_NON_STANDARD
        select SPARSEMEM_STATIC if X86_32
        select SPARSEMEM_VMEMMAP_ENABLE if X86_64
 
@@ -1148,61 +1193,61 @@ source "mm/Kconfig"
 config HIGHPTE
        bool "Allocate 3rd-level pagetables from highmem"
        depends on X86_32 && (HIGHMEM4G || HIGHMEM64G)
-       help
+       ---help---
          The VM uses one page table entry for each page of physical memory.
          For systems with a lot of RAM, this can be wasteful of precious
          low memory.  Setting this option will put user-space page table
          entries in high memory.
 
 config X86_CHECK_BIOS_CORRUPTION
-        bool "Check for low memory corruption"
-       help
-        Periodically check for memory corruption in low memory, which
-        is suspected to be caused by BIOS.  Even when enabled in the
-        configuration, it is disabled at runtime.  Enable it by
-        setting "memory_corruption_check=1" on the kernel command
-        line.  By default it scans the low 64k of memory every 60
-        seconds; see the memory_corruption_check_size and
-        memory_corruption_check_period parameters in
-        Documentation/kernel-parameters.txt to adjust this.
-
-        When enabled with the default parameters, this option has
-        almost no overhead, as it reserves a relatively small amount
-        of memory and scans it infrequently.  It both detects corruption
-        and prevents it from affecting the running system.
-
-        It is, however, intended as a diagnostic tool; if repeatable
-        BIOS-originated corruption always affects the same memory,
-        you can use memmap= to prevent the kernel from using that
-        memory.
+       bool "Check for low memory corruption"
+       ---help---
+         Periodically check for memory corruption in low memory, which
+         is suspected to be caused by BIOS.  Even when enabled in the
+         configuration, it is disabled at runtime.  Enable it by
+         setting "memory_corruption_check=1" on the kernel command
+         line.  By default it scans the low 64k of memory every 60
+         seconds; see the memory_corruption_check_size and
+         memory_corruption_check_period parameters in
+         Documentation/kernel-parameters.txt to adjust this.
+
+         When enabled with the default parameters, this option has
+         almost no overhead, as it reserves a relatively small amount
+         of memory and scans it infrequently.  It both detects corruption
+         and prevents it from affecting the running system.
+
+         It is, however, intended as a diagnostic tool; if repeatable
+         BIOS-originated corruption always affects the same memory,
+         you can use memmap= to prevent the kernel from using that
+         memory.
 
 config X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK
-        bool "Set the default setting of memory_corruption_check"
+       bool "Set the default setting of memory_corruption_check"
        depends on X86_CHECK_BIOS_CORRUPTION
        default y
-       help
-        Set whether the default state of memory_corruption_check is
-        on or off.
+       ---help---
+         Set whether the default state of memory_corruption_check is
+         on or off.
 
 config X86_RESERVE_LOW_64K
-        bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
+       bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
        default y
-       help
-        Reserve the first 64K of physical RAM on BIOSes that are known
-        to potentially corrupt that memory range. A numbers of BIOSes are
-        known to utilize this area during suspend/resume, so it must not
-        be used by the kernel.
+       ---help---
+         Reserve the first 64K of physical RAM on BIOSes that are known
+         to potentially corrupt that memory range. A numbers of BIOSes are
+         known to utilize this area during suspend/resume, so it must not
+         be used by the kernel.
 
-        Set this to N if you are absolutely sure that you trust the BIOS
-        to get all its memory reservations and usages right.
+         Set this to N if you are absolutely sure that you trust the BIOS
+         to get all its memory reservations and usages right.
 
-        If you have doubts about the BIOS (e.g. suspend/resume does not
-        work or there's kernel crashes after certain hardware hotplug
-        events) and it's not AMI or Phoenix, then you might want to enable
-        X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
-        corruption patterns.
+         If you have doubts about the BIOS (e.g. suspend/resume does not
+         work or there's kernel crashes after certain hardware hotplug
+         events) and it's not AMI or Phoenix, then you might want to enable
+         X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
+         corruption patterns.
 
-        Say Y if unsure.
+         Say Y if unsure.
 
 config MATH_EMULATION
        bool
@@ -1268,7 +1313,7 @@ config MTRR_SANITIZER
        def_bool y
        prompt "MTRR cleanup support"
        depends on MTRR
-       help
+       ---help---
          Convert MTRR layout from continuous to discrete, so X drivers can
          add writeback entries.
 
@@ -1283,7 +1328,7 @@ config MTRR_SANITIZER_ENABLE_DEFAULT
        range 0 1
        default "0"
        depends on MTRR_SANITIZER
-       help
+       ---help---
          Enable mtrr cleanup default value
 
 config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
@@ -1291,7 +1336,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT
        range 0 7
        default "1"
        depends on MTRR_SANITIZER
-       help
+       ---help---
          mtrr cleanup spare entries default, it can be changed via
          mtrr_spare_reg_nr=N on the kernel command line.
 
@@ -1299,7 +1344,7 @@ config X86_PAT
        bool
        prompt "x86 PAT support"
        depends on MTRR
-       help
+       ---help---
          Use PAT attributes to setup page level cache control.
 
          PATs are the modern equivalents of MTRRs and are much more
@@ -1314,20 +1359,20 @@ config EFI
        bool "EFI runtime service support"
        depends on ACPI
        ---help---
-       This enables the kernel to use EFI runtime services that are
-       available (such as the EFI variable services).
+         This enables the kernel to use EFI runtime services that are
+         available (such as the EFI variable services).
 
-       This option is only useful on systems that have EFI firmware.
-       In addition, you should use the latest ELILO loader available
-       at <http://elilo.sourceforge.net> in order to take advantage
-       of EFI runtime services. However, even with this option, the
-       resultant kernel should continue to boot on existing non-EFI
-       platforms.
+         This option is only useful on systems that have EFI firmware.
+         In addition, you should use the latest ELILO loader available
+         at <http://elilo.sourceforge.net> in order to take advantage
+         of EFI runtime services. However, even with this option, the
+         resultant kernel should continue to boot on existing non-EFI
+         platforms.
 
 config SECCOMP
        def_bool y
        prompt "Enable seccomp to safely compute untrusted bytecode"
-       help
+       ---help---
          This kernel feature is useful for number crunching applications
          that may need to compute untrusted bytecode during their
          execution. By using pipes or other transports made available to
@@ -1340,13 +1385,16 @@ config SECCOMP
 
          If unsure, say Y. Only embedded should say N here.
 
+config CC_STACKPROTECTOR_ALL
+       bool
+
 config CC_STACKPROTECTOR
        bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
-       depends on X86_64 && EXPERIMENTAL && BROKEN
-       help
-         This option turns on the -fstack-protector GCC feature. This
-         feature puts, at the beginning of critical functions, a canary
-         value on the stack just before the return address, and validates
+       select CC_STACKPROTECTOR_ALL
+       ---help---
+         This option turns on the -fstack-protector GCC feature. This
+         feature puts, at the beginning of functions, a canary value on
+         the stack just before the return address, and validates
          the value just before actually returning.  Stack based buffer
          overflows (that need to overwrite this return address) now also
          overwrite the canary, which gets detected and the attack is then
@@ -1354,22 +1402,14 @@ config CC_STACKPROTECTOR
 
          This feature requires gcc version 4.2 or above, or a distribution
          gcc with the feature backported. Older versions are automatically
-         detected and for those versions, this configuration option is ignored.
-
-config CC_STACKPROTECTOR_ALL
-       bool "Use stack-protector for all functions"
-       depends on CC_STACKPROTECTOR
-       help
-         Normally, GCC only inserts the canary value protection for
-         functions that use large-ish on-stack buffers. By enabling
-         this option, GCC will be asked to do this for ALL functions.
+         detected and for those versions, this configuration option is
+         ignored. (and a warning is printed during bootup)
 
 source kernel/Kconfig.hz
 
 config KEXEC
        bool "kexec system call"
-       depends on X86_BIOS_REBOOT
-       help
+       ---help---
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
          but it is independent of the system firmware.   And like a reboot
@@ -1386,7 +1426,7 @@ config KEXEC
 config CRASH_DUMP
        bool "kernel crash dumps"
        depends on X86_64 || (X86_32 && HIGHMEM)
-       help
+       ---help---
          Generate crash dump after being started by kexec.
          This should be normally only set in special crash dump kernels
          which are loaded in the main kernel with kexec-tools into
@@ -1400,8 +1440,8 @@ config CRASH_DUMP
 config KEXEC_JUMP
        bool "kexec jump (EXPERIMENTAL)"
        depends on EXPERIMENTAL
-       depends on KEXEC && HIBERNATION && X86_32
-       help
+       depends on KEXEC && HIBERNATION
+       ---help---
          Jump between original kernel and kexeced kernel and invoke
          code in physical address mode via KEXEC
 
@@ -1410,7 +1450,7 @@ config PHYSICAL_START
        default "0x1000000" if X86_NUMAQ
        default "0x200000" if X86_64
        default "0x100000"
-       help
+       ---help---
          This gives the physical address where the kernel is loaded.
 
          If kernel is a not relocatable (CONFIG_RELOCATABLE=n) then
@@ -1451,7 +1491,7 @@ config PHYSICAL_START
 config RELOCATABLE
        bool "Build a relocatable kernel (EXPERIMENTAL)"
        depends on EXPERIMENTAL
-       help
+       ---help---
          This builds a kernel image that retains relocation information
          so it can be loaded someplace besides the default 1MB.
          The relocations tend to make the kernel binary about 10% larger,
@@ -1471,7 +1511,7 @@ config PHYSICAL_ALIGN
        default "0x100000" if X86_32
        default "0x200000" if X86_64
        range 0x2000 0x400000
-       help
+       ---help---
          This value puts the alignment restrictions on physical address
          where kernel is loaded and run from. Kernel is compiled for an
          address which meets above alignment restriction.
@@ -1492,7 +1532,7 @@ config PHYSICAL_ALIGN
 
 config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs"
-       depends on SMP && HOTPLUG && !X86_VOYAGER
+       depends on SMP && HOTPLUG
        ---help---
          Say Y here to allow turning CPUs off and on. CPUs can be
          controlled through /sys/devices/system/cpu.
@@ -1504,7 +1544,7 @@ config COMPAT_VDSO
        def_bool y
        prompt "Compat VDSO support"
        depends on X86_32 || IA32_EMULATION
-       help
+       ---help---
          Map the 32-bit VDSO to the predictable old-style address too.
        ---help---
          Say N here if you are running a sufficiently recent glibc
@@ -1516,7 +1556,7 @@ config COMPAT_VDSO
 config CMDLINE_BOOL
        bool "Built-in kernel command line"
        default n
-       help
+       ---help---
          Allow for specifying boot arguments to the kernel at
          build time.  On some systems (e.g. embedded ones), it is
          necessary or convenient to provide some or all of the
@@ -1534,7 +1574,7 @@ config CMDLINE
        string "Built-in kernel command string"
        depends on CMDLINE_BOOL
        default ""
-       help
+       ---help---
          Enter arguments here that should be compiled into the kernel
          image and used at boot time.  If the boot loader provides a
          command line at boot time, it is appended to this string to
@@ -1551,7 +1591,7 @@ config CMDLINE_OVERRIDE
        bool "Built-in command line overrides boot loader arguments"
        default n
        depends on CMDLINE_BOOL
-       help
+       ---help---
          Set this option to 'Y' to have the kernel ignore the boot loader
          command line, and use ONLY the built-in command line.
 
@@ -1573,7 +1613,6 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
        depends on NUMA
 
 menu "Power management and ACPI options"
-       depends on !X86_VOYAGER
 
 config ARCH_HIBERNATION_HEADER
        def_bool y
@@ -1651,7 +1690,7 @@ if APM
 
 config APM_IGNORE_USER_SUSPEND
        bool "Ignore USER SUSPEND"
-       help
+       ---help---
          This option will ignore USER SUSPEND requests. On machines with a
          compliant APM BIOS, you want to say N. However, on the NEC Versa M
          series notebooks, it is necessary to say Y because of a BIOS bug.
@@ -1675,7 +1714,7 @@ config APM_DO_ENABLE
 
 config APM_CPU_IDLE
        bool "Make CPU Idle calls when idle"
-       help
+       ---help---
          Enable calls to APM CPU Idle/CPU Busy inside the kernel's idle loop.
          On some machines, this can activate improved power savings, such as
          a slowed CPU clock rate, when the machine is idle. These idle calls
@@ -1686,7 +1725,7 @@ config APM_CPU_IDLE
 
 config APM_DISPLAY_BLANK
        bool "Enable console blanking using APM"
-       help
+       ---help---
          Enable console blanking using the APM. Some laptops can use this to
          turn off the LCD backlight when the screen blanker of the Linux
          virtual console blanks the screen. Note that this is only used by
@@ -1699,7 +1738,7 @@ config APM_DISPLAY_BLANK
 
 config APM_ALLOW_INTS
        bool "Allow interrupts during APM BIOS calls"
-       help
+       ---help---
          Normally we disable external interrupts while we are making calls to
          the APM BIOS as a measure to lessen the effects of a badly behaving
          BIOS implementation.  The BIOS should reenable interrupts if it
@@ -1724,7 +1763,7 @@ config PCI
        bool "PCI support"
        default y
        select ARCH_SUPPORTS_MSI if (X86_LOCAL_APIC && X86_IO_APIC)
-       help
+       ---help---
          Find out whether you have a PCI motherboard. PCI is the name of a
          bus system, i.e. the way the CPU talks to the other stuff inside
          your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
@@ -1795,7 +1834,7 @@ config PCI_MMCONFIG
 config DMAR
        bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
        depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL
-       help
+       ---help---
          DMA remapping (DMAR) devices support enables independent address
          translations for Direct Memory Access (DMA) from devices.
          These DMA remapping devices are reported via ACPI tables
@@ -1817,29 +1856,30 @@ config DMAR_GFX_WA
        def_bool y
        prompt "Support for Graphics workaround"
        depends on DMAR
-       help
-        Current Graphics drivers tend to use physical address
-        for DMA and avoid using DMA APIs. Setting this config
-        option permits the IOMMU driver to set a unity map for
-        all the OS-visible memory. Hence the driver can continue
-        to use physical addresses for DMA.
+       ---help---
+         Current Graphics drivers tend to use physical address
+         for DMA and avoid using DMA APIs. Setting this config
+         option permits the IOMMU driver to set a unity map for
+         all the OS-visible memory. Hence the driver can continue
+         to use physical addresses for DMA.
 
 config DMAR_FLOPPY_WA
        def_bool y
        depends on DMAR
-       help
-        Floppy disk drivers are know to bypass DMA API calls
-        thereby failing to work when IOMMU is enabled. This
-        workaround will setup a 1:1 mapping for the first
-        16M to make floppy (an ISA device) work.
+       ---help---
+         Floppy disk drivers are know to bypass DMA API calls
+         thereby failing to work when IOMMU is enabled. This
+         workaround will setup a 1:1 mapping for the first
+         16M to make floppy (an ISA device) work.
 
 config INTR_REMAP
        bool "Support for Interrupt Remapping (EXPERIMENTAL)"
        depends on X86_64 && X86_IO_APIC && PCI_MSI && ACPI && EXPERIMENTAL
-       help
-        Supports Interrupt remapping for IO-APIC and MSI devices.
-        To use x2apic mode in the CPU's which support x2APIC enhancements or
-        to support platforms with CPU's having > 8 bit APIC ID, say Y.
+       select X86_X2APIC
+       ---help---
+         Supports Interrupt remapping for IO-APIC and MSI devices.
+         To use x2apic mode in the CPU's which support x2APIC enhancements or
+         to support platforms with CPU's having > 8 bit APIC ID, say Y.
 
 source "drivers/pci/pcie/Kconfig"
 
@@ -1853,8 +1893,7 @@ if X86_32
 
 config ISA
        bool "ISA support"
-       depends on !X86_VOYAGER
-       help
+       ---help---
          Find out whether you have ISA slots on your motherboard.  ISA is the
          name of a bus system, i.e. the way the CPU talks to the other stuff
          inside your box.  Other bus systems are PCI, EISA, MicroChannel
@@ -1880,9 +1919,8 @@ config EISA
 source "drivers/eisa/Kconfig"
 
 config MCA
-       bool "MCA support" if !X86_VOYAGER
-       default y if X86_VOYAGER
-       help
+       bool "MCA support"
+       ---help---
          MicroChannel Architecture is found in some IBM PS/2 machines and
          laptops.  It is a bus system similar to PCI or ISA. See
          <file:Documentation/mca.txt> (and especially the web page given
@@ -1892,8 +1930,7 @@ source "drivers/mca/Kconfig"
 
 config SCx200
        tristate "NatSemi SCx200 support"
-       depends on !X86_VOYAGER
-       help
+       ---help---
          This provides basic support for National Semiconductor's
          (now AMD's) Geode processors.  The driver probes for the
          PCI-IDs of several on-chip devices, so its a good dependency
@@ -1905,7 +1942,7 @@ config SCx200HR_TIMER
        tristate "NatSemi SCx200 27MHz High-Resolution Timer Support"
        depends on SCx200 && GENERIC_TIME
        default y
-       help
+       ---help---
          This driver provides a clocksource built upon the on-chip
          27MHz high-resolution timer.  Its also a workaround for
          NSC Geode SC-1100's buggy TSC, which loses time when the
@@ -1916,7 +1953,7 @@ config GEODE_MFGPT_TIMER
        def_bool y
        prompt "Geode Multi-Function General Purpose Timer (MFGPT) events"
        depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
-       help
+       ---help---
          This driver provides a clock event source based on the MFGPT
          timer(s) in the CS5535 and CS5536 companion chip for the geode.
          MFGPTs have a better resolution and max interval than the
@@ -1925,7 +1962,7 @@ config GEODE_MFGPT_TIMER
 config OLPC
        bool "One Laptop Per Child support"
        default n
-       help
+       ---help---
          Add support for detecting the unique features of the OLPC
          XO hardware.
 
@@ -1950,16 +1987,16 @@ config IA32_EMULATION
        bool "IA32 Emulation"
        depends on X86_64
        select COMPAT_BINFMT_ELF
-       help
+       ---help---
          Include code to run 32-bit programs under a 64-bit kernel. You should
          likely turn this on, unless you're 100% sure that you don't have any
          32-bit programs left.
 
 config IA32_AOUT
-       tristate "IA32 a.out support"
-       depends on IA32_EMULATION
-       help
-         Support old a.out binaries in the 32bit emulation.
+       tristate "IA32 a.out support"
+       depends on IA32_EMULATION
+       ---help---
+         Support old a.out binaries in the 32bit emulation.
 
 config COMPAT
        def_bool y
index c98d52e82966f9a965b4b2ddc2af2e1272bea7b2..a95eaf0e582ada3e141ee0ae35fa5d17eb056f1b 100644 (file)
@@ -50,7 +50,7 @@ config M386
 config M486
        bool "486"
        depends on X86_32
-       help
+       ---help---
          Select this for a 486 series processor, either Intel or one of the
          compatible processors from AMD, Cyrix, IBM, or Intel.  Includes DX,
          DX2, and DX4 variants; also SL/SLC/SLC2/SLC3/SX/SX2 and UMC U5D or
@@ -59,7 +59,7 @@ config M486
 config M586
        bool "586/K5/5x86/6x86/6x86MX"
        depends on X86_32
-       help
+       ---help---
          Select this for an 586 or 686 series processor such as the AMD K5,
          the Cyrix 5x86, 6x86 and 6x86MX.  This choice does not
          assume the RDTSC (Read Time Stamp Counter) instruction.
@@ -67,21 +67,21 @@ config M586
 config M586TSC
        bool "Pentium-Classic"
        depends on X86_32
-       help
+       ---help---
          Select this for a Pentium Classic processor with the RDTSC (Read
          Time Stamp Counter) instruction for benchmarking.
 
 config M586MMX
        bool "Pentium-MMX"
        depends on X86_32
-       help
+       ---help---
          Select this for a Pentium with the MMX graphics/multimedia
          extended instructions.
 
 config M686
        bool "Pentium-Pro"
        depends on X86_32
-       help
+       ---help---
          Select this for Intel Pentium Pro chips.  This enables the use of
          Pentium Pro extended instructions, and disables the init-time guard
          against the f00f bug found in earlier Pentiums.
@@ -89,7 +89,7 @@ config M686
 config MPENTIUMII
        bool "Pentium-II/Celeron(pre-Coppermine)"
        depends on X86_32
-       help
+       ---help---
          Select this for Intel chips based on the Pentium-II and
          pre-Coppermine Celeron core.  This option enables an unaligned
          copy optimization, compiles the kernel with optimization flags
@@ -99,7 +99,7 @@ config MPENTIUMII
 config MPENTIUMIII
        bool "Pentium-III/Celeron(Coppermine)/Pentium-III Xeon"
        depends on X86_32
-       help
+       ---help---
          Select this for Intel chips based on the Pentium-III and
          Celeron-Coppermine core.  This option enables use of some
          extended prefetch instructions in addition to the Pentium II
@@ -108,14 +108,14 @@ config MPENTIUMIII
 config MPENTIUMM
        bool "Pentium M"
        depends on X86_32
-       help
+       ---help---
          Select this for Intel Pentium M (not Pentium-4 M)
          notebook chips.
 
 config MPENTIUM4
        bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
        depends on X86_32
-       help
+       ---help---
          Select this for Intel Pentium 4 chips.  This includes the
          Pentium 4, Pentium D, P4-based Celeron and Xeon, and
          Pentium-4 M (not Pentium M) chips.  This option enables compile
@@ -151,7 +151,7 @@ config MPENTIUM4
 config MK6
        bool "K6/K6-II/K6-III"
        depends on X86_32
-       help
+       ---help---
          Select this for an AMD K6-family processor.  Enables use of
          some extended instructions, and passes appropriate optimization
          flags to GCC.
@@ -159,14 +159,14 @@ config MK6
 config MK7
        bool "Athlon/Duron/K7"
        depends on X86_32
-       help
+       ---help---
          Select this for an AMD Athlon K7-family processor.  Enables use of
          some extended instructions, and passes appropriate optimization
          flags to GCC.
 
 config MK8
        bool "Opteron/Athlon64/Hammer/K8"
-       help
+       ---help---
          Select this for an AMD Opteron or Athlon64 Hammer-family processor.
          Enables use of some extended instructions, and passes appropriate
          optimization flags to GCC.
@@ -174,7 +174,7 @@ config MK8
 config MCRUSOE
        bool "Crusoe"
        depends on X86_32
-       help
+       ---help---
          Select this for a Transmeta Crusoe processor.  Treats the processor
          like a 586 with TSC, and sets some GCC optimization flags (like a
          Pentium Pro with no alignment requirements).
@@ -182,13 +182,13 @@ config MCRUSOE
 config MEFFICEON
        bool "Efficeon"
        depends on X86_32
-       help
+       ---help---
          Select this for a Transmeta Efficeon processor.
 
 config MWINCHIPC6
        bool "Winchip-C6"
        depends on X86_32
-       help
+       ---help---
          Select this for an IDT Winchip C6 chip.  Linux and GCC
          treat this chip as a 586TSC with some extended instructions
          and alignment requirements.
@@ -196,7 +196,7 @@ config MWINCHIPC6
 config MWINCHIP3D
        bool "Winchip-2/Winchip-2A/Winchip-3"
        depends on X86_32
-       help
+       ---help---
          Select this for an IDT Winchip-2, 2A or 3.  Linux and GCC
          treat this chip as a 586TSC with some extended instructions
          and alignment requirements.  Also enable out of order memory
@@ -206,19 +206,19 @@ config MWINCHIP3D
 config MGEODEGX1
        bool "GeodeGX1"
        depends on X86_32
-       help
+       ---help---
          Select this for a Geode GX1 (Cyrix MediaGX) chip.
 
 config MGEODE_LX
        bool "Geode GX/LX"
        depends on X86_32
-       help
+       ---help---
          Select this for AMD Geode GX and LX processors.
 
 config MCYRIXIII
        bool "CyrixIII/VIA-C3"
        depends on X86_32
-       help
+       ---help---
          Select this for a Cyrix III or C3 chip.  Presently Linux and GCC
          treat this chip as a generic 586. Whilst the CPU is 686 class,
          it lacks the cmov extension which gcc assumes is present when
@@ -230,7 +230,7 @@ config MCYRIXIII
 config MVIAC3_2
        bool "VIA C3-2 (Nehemiah)"
        depends on X86_32
-       help
+       ---help---
          Select this for a VIA C3 "Nehemiah". Selecting this enables usage
          of SSE and tells gcc to treat the CPU as a 686.
          Note, this kernel will not boot on older (pre model 9) C3s.
@@ -238,14 +238,14 @@ config MVIAC3_2
 config MVIAC7
        bool "VIA C7"
        depends on X86_32
-       help
+       ---help---
          Select this for a VIA C7.  Selecting this uses the correct cache
          shift and tells gcc to treat the CPU as a 686.
 
 config MPSC
        bool "Intel P4 / older Netburst based Xeon"
        depends on X86_64
-       help
+       ---help---
          Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
          Xeon CPUs with Intel 64bit which is compatible with x86-64.
          Note that the latest Xeons (Xeon 51xx and 53xx) are not based on the
@@ -255,7 +255,7 @@ config MPSC
 
 config MCORE2
        bool "Core 2/newer Xeon"
-       help
+       ---help---
 
          Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
          53xx) CPUs. You can distinguish newer from older Xeons by the CPU
@@ -265,7 +265,7 @@ config MCORE2
 config GENERIC_CPU
        bool "Generic-x86-64"
        depends on X86_64
-       help
+       ---help---
          Generic x86-64 CPU.
          Run equally well on all x86-64 CPUs.
 
@@ -274,7 +274,7 @@ endchoice
 config X86_GENERIC
        bool "Generic x86 support"
        depends on X86_32
-       help
+       ---help---
          Instead of just including optimizations for the selected
          x86 variant (e.g. PII, Crusoe or Athlon), include some more
          generic optimizations as well. This will make the kernel
@@ -294,25 +294,23 @@ config X86_CPU
 # Define implied options from the CPU selection here
 config X86_L1_CACHE_BYTES
        int
-       default "128" if GENERIC_CPU || MPSC
-       default "64" if MK8 || MCORE2
-       depends on X86_64
+       default "128" if MPSC
+       default "64" if GENERIC_CPU || MK8 || MCORE2 || X86_32
 
 config X86_INTERNODE_CACHE_BYTES
        int
        default "4096" if X86_VSMP
        default X86_L1_CACHE_BYTES if !X86_VSMP
-       depends on X86_64
 
 config X86_CMPXCHG
        def_bool X86_64 || (X86_32 && !M386)
 
 config X86_L1_CACHE_SHIFT
        int
-       default "7" if MPENTIUM4 || X86_GENERIC || GENERIC_CPU || MPSC
+       default "7" if MPENTIUM4 || MPSC
        default "4" if X86_ELAN || M486 || M386 || MGEODEGX1
        default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
-       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7
+       default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MVIAC7 || X86_GENERIC || GENERIC_CPU
 
 config X86_XADD
        def_bool y
@@ -321,7 +319,7 @@ config X86_XADD
 config X86_PPRO_FENCE
        bool "PentiumPro memory ordering errata workaround"
        depends on M686 || M586MMX || M586TSC || M586 || M486 || M386 || MGEODEGX1
-       help
+       ---help---
          Old PentiumPro multiprocessor systems had errata that could cause
          memory operations to violate the x86 ordering standard in rare cases.
          Enabling this option will attempt to work around some (but not all)
@@ -414,14 +412,14 @@ config X86_DEBUGCTLMSR
 
 menuconfig PROCESSOR_SELECT
        bool "Supported processor vendors" if EMBEDDED
-       help
+       ---help---
          This lets you choose what x86 vendor support code your kernel
          will include.
 
 config CPU_SUP_INTEL
        default y
        bool "Support Intel processors" if PROCESSOR_SELECT
-       help
+       ---help---
          This enables detection, tunings and quirks for Intel processors
 
          You need this enabled if you want your kernel to run on an
@@ -435,7 +433,7 @@ config CPU_SUP_CYRIX_32
        default y
        bool "Support Cyrix processors" if PROCESSOR_SELECT
        depends on !64BIT
-       help
+       ---help---
          This enables detection, tunings and quirks for Cyrix processors
 
          You need this enabled if you want your kernel to run on a
@@ -448,7 +446,7 @@ config CPU_SUP_CYRIX_32
 config CPU_SUP_AMD
        default y
        bool "Support AMD processors" if PROCESSOR_SELECT
-       help
+       ---help---
          This enables detection, tunings and quirks for AMD processors
 
          You need this enabled if you want your kernel to run on an
@@ -462,7 +460,7 @@ config CPU_SUP_CENTAUR_32
        default y
        bool "Support Centaur processors" if PROCESSOR_SELECT
        depends on !64BIT
-       help
+       ---help---
          This enables detection, tunings and quirks for Centaur processors
 
          You need this enabled if you want your kernel to run on a
@@ -476,7 +474,7 @@ config CPU_SUP_CENTAUR_64
        default y
        bool "Support Centaur processors" if PROCESSOR_SELECT
        depends on 64BIT
-       help
+       ---help---
          This enables detection, tunings and quirks for Centaur processors
 
          You need this enabled if you want your kernel to run on a
@@ -490,7 +488,7 @@ config CPU_SUP_TRANSMETA_32
        default y
        bool "Support Transmeta processors" if PROCESSOR_SELECT
        depends on !64BIT
-       help
+       ---help---
          This enables detection, tunings and quirks for Transmeta processors
 
          You need this enabled if you want your kernel to run on a
@@ -504,7 +502,7 @@ config CPU_SUP_UMC_32
        default y
        bool "Support UMC processors" if PROCESSOR_SELECT
        depends on !64BIT
-       help
+       ---help---
          This enables detection, tunings and quirks for UMC processors
 
          You need this enabled if you want your kernel to run on a
@@ -523,7 +521,7 @@ config X86_PTRACE_BTS
        bool "Branch Trace Store"
        default y
        depends on X86_DEBUGCTLMSR
-       help
+       ---help---
          This adds a ptrace interface to the hardware's branch trace store.
 
          Debuggers may use it to collect an execution trace of the debugged
index e1983fa025d2a941f86c5ac6dc4012fc3b1d4e71..fdb45df608b627c4efd79bba410ab4a0cb0f9b4a 100644 (file)
@@ -7,7 +7,7 @@ source "lib/Kconfig.debug"
 
 config STRICT_DEVMEM
        bool "Filter access to /dev/mem"
-       help
+       ---help---
          If this option is disabled, you allow userspace (root) access to all
          of memory, including kernel and userspace memory. Accidental
          access to this is obviously disastrous, but specific access can
@@ -25,7 +25,7 @@ config STRICT_DEVMEM
 config X86_VERBOSE_BOOTUP
        bool "Enable verbose x86 bootup info messages"
        default y
-       help
+       ---help---
          Enables the informational output from the decompression stage
          (e.g. bzImage) of the boot. If you disable this you will still
          see errors. Disable this if you want silent bootup.
@@ -33,7 +33,7 @@ config X86_VERBOSE_BOOTUP
 config EARLY_PRINTK
        bool "Early printk" if EMBEDDED
        default y
-       help
+       ---help---
          Write kernel log output directly into the VGA buffer or to a serial
          port.
 
@@ -47,7 +47,7 @@ config EARLY_PRINTK_DBGP
        bool "Early printk via EHCI debug port"
        default n
        depends on EARLY_PRINTK && PCI
-       help
+       ---help---
          Write kernel log output directly into the EHCI debug port.
 
          This is useful for kernel debugging when your machine crashes very
@@ -59,14 +59,14 @@ config EARLY_PRINTK_DBGP
 config DEBUG_STACKOVERFLOW
        bool "Check for stack overflows"
        depends on DEBUG_KERNEL
-       help
+       ---help---
          This option will cause messages to be printed if free stack space
          drops below a certain limit.
 
 config DEBUG_STACK_USAGE
        bool "Stack utilization instrumentation"
        depends on DEBUG_KERNEL
-       help
+       ---help---
          Enables the display of the minimum amount of free stack which each
          task has ever had available in the sysrq-T and sysrq-P debug output.
 
@@ -75,7 +75,7 @@ config DEBUG_STACK_USAGE
 config DEBUG_PAGEALLOC
        bool "Debug page memory allocations"
        depends on DEBUG_KERNEL
-       help
+       ---help---
          Unmap pages from the kernel linear mapping after free_pages().
          This results in a large slowdown, but helps to find certain types
          of memory corruptions.
@@ -83,9 +83,9 @@ config DEBUG_PAGEALLOC
 config DEBUG_PER_CPU_MAPS
        bool "Debug access to per_cpu maps"
        depends on DEBUG_KERNEL
-       depends on X86_SMP
+       depends on SMP
        default n
-       help
+       ---help---
          Say Y to verify that the per_cpu map being accessed has
          been setup.  Adds a fair amount of code to kernel memory
          and decreases performance.
@@ -96,7 +96,7 @@ config X86_PTDUMP
        bool "Export kernel pagetable layout to userspace via debugfs"
        depends on DEBUG_KERNEL
        select DEBUG_FS
-       help
+       ---help---
          Say Y here if you want to show the kernel pagetable layout in a
          debugfs file. This information is only useful for kernel developers
          who are working in architecture specific areas of the kernel.
@@ -108,7 +108,7 @@ config DEBUG_RODATA
        bool "Write protect kernel read-only data structures"
        default y
        depends on DEBUG_KERNEL
-       help
+       ---help---
          Mark the kernel read-only data as write-protected in the pagetables,
          in order to catch accidental (and incorrect) writes to such const
          data. This is recommended so that we can catch kernel bugs sooner.
@@ -117,7 +117,8 @@ config DEBUG_RODATA
 config DEBUG_RODATA_TEST
        bool "Testcase for the DEBUG_RODATA feature"
        depends on DEBUG_RODATA
-       help
+       default y
+       ---help---
          This option enables a testcase for the DEBUG_RODATA
          feature as well as for the change_page_attr() infrastructure.
          If in doubt, say "N"
@@ -125,7 +126,7 @@ config DEBUG_RODATA_TEST
 config DEBUG_NX_TEST
        tristate "Testcase for the NX non-executable stack feature"
        depends on DEBUG_KERNEL && m
-       help
+       ---help---
          This option enables a testcase for the CPU NX capability
          and the software setup of this feature.
          If in doubt, say "N"
@@ -133,7 +134,7 @@ config DEBUG_NX_TEST
 config 4KSTACKS
        bool "Use 4Kb for kernel stacks instead of 8Kb"
        depends on X86_32
-       help
+       ---help---
          If you say Y here the kernel will use a 4Kb stacksize for the
          kernel stack attached to each process/thread. This facilitates
          running more threads on a system and also reduces the pressure
@@ -144,7 +145,7 @@ config DOUBLEFAULT
        default y
        bool "Enable doublefault exception handler" if EMBEDDED
        depends on X86_32
-       help
+       ---help---
          This option allows trapping of rare doublefault exceptions that
          would otherwise cause a system to silently reboot. Disabling this
          option saves about 4k and might cause you much additional grey
@@ -154,7 +155,7 @@ config IOMMU_DEBUG
        bool "Enable IOMMU debugging"
        depends on GART_IOMMU && DEBUG_KERNEL
        depends on X86_64
-       help
+       ---help---
          Force the IOMMU to on even when you have less than 4GB of
          memory and add debugging code. On overflow always panic. And
          allow to enable IOMMU leak tracing. Can be disabled at boot
@@ -170,7 +171,7 @@ config IOMMU_LEAK
        bool "IOMMU leak tracing"
        depends on DEBUG_KERNEL
        depends on IOMMU_DEBUG
-       help
+       ---help---
          Add a simple leak tracer to the IOMMU code. This is useful when you
          are debugging a buggy device driver that leaks IOMMU mappings.
 
@@ -203,25 +204,25 @@ choice
 
 config IO_DELAY_0X80
        bool "port 0x80 based port-IO delay [recommended]"
-       help
+       ---help---
          This is the traditional Linux IO delay used for in/out_p.
          It is the most tested hence safest selection here.
 
 config IO_DELAY_0XED
        bool "port 0xed based port-IO delay"
-       help
+       ---help---
          Use port 0xed as the IO delay. This frees up port 0x80 which is
          often used as a hardware-debug port.
 
 config IO_DELAY_UDELAY
        bool "udelay based port-IO delay"
-       help
+       ---help---
          Use udelay(2) as the IO delay method. This provides the delay
          while not having any side-effect on the IO port space.
 
 config IO_DELAY_NONE
        bool "no port-IO delay"
-       help
+       ---help---
          No port-IO delay. Will break on old boxes that require port-IO
          delay for certain operations. Should work on most new machines.
 
@@ -255,18 +256,18 @@ config DEBUG_BOOT_PARAMS
        bool "Debug boot parameters"
        depends on DEBUG_KERNEL
        depends on DEBUG_FS
-       help
+       ---help---
          This option will cause struct boot_params to be exported via debugfs.
 
 config CPA_DEBUG
        bool "CPA self-test code"
        depends on DEBUG_KERNEL
-       help
+       ---help---
          Do change_page_attr() self-tests every 30 seconds.
 
 config OPTIMIZE_INLINING
        bool "Allow gcc to uninline functions marked 'inline'"
-       help
+       ---help---
          This option determines if the kernel forces gcc to inline the functions
          developers have marked 'inline'. Doing so takes away freedom from gcc to
          do what it thinks is best, which is desirable for the gcc 3.x series of
@@ -279,4 +280,3 @@ config OPTIMIZE_INLINING
          If unsure, say N.
 
 endmenu
-
index d1a47adb5aec0b14cfd6b52297be80f87fbf0682..f05d8c91d9e51ed29b21a6b99588398182879bb2 100644 (file)
@@ -70,14 +70,17 @@ else
         # this works around some issues with generating unwind tables in older gccs
         # newer gccs do it by default
         KBUILD_CFLAGS += -maccumulate-outgoing-args
+endif
 
-        stackp := $(CONFIG_SHELL) $(srctree)/scripts/gcc-x86_64-has-stack-protector.sh
-        stackp-$(CONFIG_CC_STACKPROTECTOR) := $(shell $(stackp) \
-                "$(CC)" -fstack-protector )
-        stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += $(shell $(stackp) \
-                "$(CC)" -fstack-protector-all )
-
-        KBUILD_CFLAGS += $(stackp-y)
+ifdef CONFIG_CC_STACKPROTECTOR
+       cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
+        ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC)),y)
+                stackp-y := -fstack-protector
+                stackp-$(CONFIG_CC_STACKPROTECTOR_ALL) += -fstack-protector-all
+                KBUILD_CFLAGS += $(stackp-y)
+        else
+                $(warning stack protector enabled but no compiler support)
+        endif
 endif
 
 # Stackpointer is addressed different for 32 bit and 64 bit x86
@@ -102,29 +105,6 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
 # prevent gcc from generating any FP code by mistake
 KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
 
-###
-# Sub architecture support
-# fcore-y is linked before mcore-y files.
-
-# Default subarch .c files
-mcore-y  := arch/x86/mach-default/
-
-# Voyager subarch support
-mflags-$(CONFIG_X86_VOYAGER)   := -Iarch/x86/include/asm/mach-voyager
-mcore-$(CONFIG_X86_VOYAGER)    := arch/x86/mach-voyager/
-
-# generic subarchitecture
-mflags-$(CONFIG_X86_GENERICARCH):= -Iarch/x86/include/asm/mach-generic
-fcore-$(CONFIG_X86_GENERICARCH)        += arch/x86/mach-generic/
-mcore-$(CONFIG_X86_GENERICARCH)        := arch/x86/mach-default/
-
-# default subarch .h files
-mflags-y += -Iarch/x86/include/asm/mach-default
-
-# 64 bit does not support subarch support - clear sub arch variables
-fcore-$(CONFIG_X86_64)  :=
-mcore-$(CONFIG_X86_64)  :=
-
 KBUILD_CFLAGS += $(mflags-y)
 KBUILD_AFLAGS += $(mflags-y)
 
@@ -150,9 +130,6 @@ core-$(CONFIG_LGUEST_GUEST) += arch/x86/lguest/
 core-y += arch/x86/kernel/
 core-y += arch/x86/mm/
 
-# Remaining sub architecture files
-core-y += $(mcore-y)
-
 core-y += arch/x86/crypto/
 core-y += arch/x86/vdso/
 core-$(CONFIG_IA32_EMULATION) += arch/x86/ia32/
@@ -176,34 +153,23 @@ endif
 
 boot := arch/x86/boot
 
-PHONY += zImage bzImage compressed zlilo bzlilo \
-         zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
+BOOT_TARGETS = bzlilo bzdisk fdimage fdimage144 fdimage288 isoimage install
+
+PHONY += bzImage $(BOOT_TARGETS)
 
 # Default kernel to build
 all: bzImage
 
 # KBUILD_IMAGE specify target image being built
-                    KBUILD_IMAGE := $(boot)/bzImage
-zImage zlilo zdisk: KBUILD_IMAGE := $(boot)/zImage
+KBUILD_IMAGE := $(boot)/bzImage
 
-zImage bzImage: vmlinux
+bzImage: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
        $(Q)mkdir -p $(objtree)/arch/$(UTS_MACHINE)/boot
        $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/$(UTS_MACHINE)/boot/$@
 
-compressed: zImage
-
-zlilo bzlilo: vmlinux
-       $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zlilo
-
-zdisk bzdisk: vmlinux
-       $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk
-
-fdimage fdimage144 fdimage288 isoimage: vmlinux
-       $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
-
-install:
-       $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
+$(BOOT_TARGETS): vmlinux
+       $(Q)$(MAKE) $(build)=$(boot) $@
 
 PHONY += vdso_install
 vdso_install:
@@ -228,7 +194,3 @@ define archhelp
   echo  '                  FDARGS="..."  arguments for the booted kernel'
   echo  '                  FDINITRD=file initrd for the booted kernel'
 endef
-
-CLEAN_FILES += arch/x86/boot/fdimage \
-              arch/x86/boot/image.iso \
-              arch/x86/boot/mtools.conf
index cd48c7210016f0f2be76daa999b2a5d542019bce..fb737ce5888dfbd7213c171bf07ea43f88e66f6f 100644 (file)
@@ -6,33 +6,30 @@
 # for more details.
 #
 # Copyright (C) 1994 by Linus Torvalds
+# Changed by many, many contributors over the years.
 #
 
 # ROOT_DEV specifies the default root-device when making the image.
 # This can be either FLOPPY, CURRENT, /dev/xxxx or empty, in which case
 # the default of FLOPPY is used by 'build'.
 
-ROOT_DEV := CURRENT
+ROOT_DEV       := CURRENT
 
 # If you want to preset the SVGA mode, uncomment the next line and
 # set SVGA_MODE to whatever number you want.
 # Set it to -DSVGA_MODE=NORMAL_VGA if you just want the EGA/VGA mode.
 # The number is the same as you would ordinarily press at bootup.
 
-SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
+SVGA_MODE      := -DSVGA_MODE=NORMAL_VGA
 
-# If you want the RAM disk device, define this to be the size in blocks.
-
-#RAMDISK := -DRAMDISK=512
-
-targets                := vmlinux.bin setup.bin setup.elf zImage bzImage
+targets                := vmlinux.bin setup.bin setup.elf bzImage
+targets                += fdimage fdimage144 fdimage288 image.iso mtools.conf
 subdir-                := compressed
 
 setup-y                += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
 setup-y                += header.o main.o mca.o memory.o pm.o pmjump.o
 setup-y                += printf.o string.o tty.o video.o video-mode.o version.o
 setup-$(CONFIG_X86_APM_BOOT) += apm.o
-setup-$(CONFIG_X86_VOYAGER) += voyager.o
 
 # The link order of the video-*.o modules can matter.  In particular,
 # video-vga.o *must* be listed first, followed by video-vesa.o.
@@ -72,17 +69,13 @@ KBUILD_CFLAGS       := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
 KBUILD_CFLAGS +=   $(call cc-option,-m32)
 KBUILD_AFLAGS  := $(KBUILD_CFLAGS) -D__ASSEMBLY__
 
-$(obj)/zImage:  asflags-y := $(SVGA_MODE) $(RAMDISK)
-$(obj)/bzImage: ccflags-y := -D__BIG_KERNEL__
-$(obj)/bzImage: asflags-y := $(SVGA_MODE) $(RAMDISK) -D__BIG_KERNEL__
-$(obj)/bzImage: BUILDFLAGS   := -b
+$(obj)/bzImage: asflags-y  := $(SVGA_MODE)
 
 quiet_cmd_image = BUILD   $@
-cmd_image = $(obj)/tools/build $(BUILDFLAGS) $(obj)/setup.bin \
-           $(obj)/vmlinux.bin $(ROOT_DEV) > $@
+cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin \
+       $(ROOT_DEV) > $@
 
-$(obj)/zImage $(obj)/bzImage: $(obj)/setup.bin \
-                             $(obj)/vmlinux.bin $(obj)/tools/build FORCE
+$(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE
        $(call if_changed,image)
        @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
 
@@ -117,9 +110,11 @@ $(obj)/setup.bin: $(obj)/setup.elf FORCE
 $(obj)/compressed/vmlinux: FORCE
        $(Q)$(MAKE) $(build)=$(obj)/compressed $@
 
-# Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel
+# Set this if you want to pass append arguments to the
+# bzdisk/fdimage/isoimage kernel
 FDARGS =
-# Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel
+# Set this if you want an initrd included with the
+# bzdisk/fdimage/isoimage kernel
 FDINITRD =
 
 image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,)
@@ -128,7 +123,7 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in
        sed -e 's|@OBJ@|$(obj)|g' < $< > $@
 
 # This requires write access to /dev/fd0
-zdisk: $(BOOTIMAGE) $(obj)/mtools.conf
+bzdisk: $(obj)/bzImage $(obj)/mtools.conf
        MTOOLSRC=$(obj)/mtools.conf mformat a:                  ; sync
        syslinux /dev/fd0                                       ; sync
        echo '$(image_cmdline)' | \
@@ -136,10 +131,10 @@ zdisk: $(BOOTIMAGE) $(obj)/mtools.conf
        if [ -f '$(FDINITRD)' ] ; then \
                MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \
        fi
-       MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux  ; sync
+       MTOOLSRC=$(obj)/mtools.conf mcopy $(obj)/bzImage a:linux        ; sync
 
 # These require being root or having syslinux 2.02 or higher installed
-fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf
+fdimage fdimage144: $(obj)/bzImage $(obj)/mtools.conf
        dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440
        MTOOLSRC=$(obj)/mtools.conf mformat v:                  ; sync
        syslinux $(obj)/fdimage                                 ; sync
@@ -148,9 +143,9 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf
        if [ -f '$(FDINITRD)' ] ; then \
                MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \
        fi
-       MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux  ; sync
+       MTOOLSRC=$(obj)/mtools.conf mcopy $(obj)/bzImage v:linux        ; sync
 
-fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
+fdimage288: $(obj)/bzImage $(obj)/mtools.conf
        dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880
        MTOOLSRC=$(obj)/mtools.conf mformat w:                  ; sync
        syslinux $(obj)/fdimage                                 ; sync
@@ -159,9 +154,9 @@ fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf
        if [ -f '$(FDINITRD)' ] ; then \
                MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \
        fi
-       MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux  ; sync
+       MTOOLSRC=$(obj)/mtools.conf mcopy $(obj)/bzImage w:linux        ; sync
 
-isoimage: $(BOOTIMAGE)
+isoimage: $(obj)/bzImage
        -rm -rf $(obj)/isoimage
        mkdir $(obj)/isoimage
        for i in lib lib64 share end ; do \
@@ -171,7 +166,7 @@ isoimage: $(BOOTIMAGE)
                fi ; \
                if [ $$i = end ] ; then exit 1 ; fi ; \
        done
-       cp $(BOOTIMAGE) $(obj)/isoimage/linux
+       cp $(obj)/bzImage $(obj)/isoimage/linux
        echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg
        if [ -f '$(FDINITRD)' ] ; then \
                cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \
@@ -182,12 +177,13 @@ isoimage: $(BOOTIMAGE)
        isohybrid $(obj)/image.iso 2>/dev/null || true
        rm -rf $(obj)/isoimage
 
-zlilo: $(BOOTIMAGE)
+bzlilo: $(obj)/bzImage
        if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi
        if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
-       cat $(BOOTIMAGE) > $(INSTALL_PATH)/vmlinuz
+       cat $(obj)/bzImage > $(INSTALL_PATH)/vmlinuz
        cp System.map $(INSTALL_PATH)/
        if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
 
 install:
-       sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
+       sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
+               System.map "$(INSTALL_PATH)"
index 4063d630deff23cf2974e9839bc714de42b01f4c..7c19ce8c2442c219a1e85ee22798a8abaa0bdf2c 100644 (file)
@@ -2,6 +2,7 @@
  *
  *   Copyright (C) 1991, 1992 Linus Torvalds
  *   Copyright 2007-2008 rPath, Inc. - All Rights Reserved
+ *   Copyright 2009 Intel Corporation
  *
  *   This file is part of the Linux kernel, and is made available under
  *   the terms of the GNU General Public License version 2.
 #include "boot.h"
 
 #define MAX_8042_LOOPS 100000
+#define MAX_8042_FF    32
 
 static int empty_8042(void)
 {
        u8 status;
        int loops = MAX_8042_LOOPS;
+       int ffs   = MAX_8042_FF;
 
        while (loops--) {
                io_delay();
 
                status = inb(0x64);
+               if (status == 0xff) {
+                       /* FF is a plausible, but very unlikely status */
+                       if (!--ffs)
+                               return -1; /* Assume no KBC present */
+               }
                if (status & 1) {
                        /* Read and discard input data */
                        io_delay();
@@ -118,44 +126,37 @@ static void enable_a20_fast(void)
 
 int enable_a20(void)
 {
-#if defined(CONFIG_X86_ELAN)
-       /* Elan croaks if we try to touch the KBC */
-       enable_a20_fast();
-       while (!a20_test_long())
-               ;
-       return 0;
-#elif defined(CONFIG_X86_VOYAGER)
-       /* On Voyager, a20_test() is unsafe? */
-       enable_a20_kbc();
-       return 0;
-#else
        int loops = A20_ENABLE_LOOPS;
-       while (loops--) {
-               /* First, check to see if A20 is already enabled
-                  (legacy free, etc.) */
-               if (a20_test_short())
-                       return 0;
-
-               /* Next, try the BIOS (INT 0x15, AX=0x2401) */
-               enable_a20_bios();
-               if (a20_test_short())
-                       return 0;
-
-               /* Try enabling A20 through the keyboard controller */
-               empty_8042();
-               if (a20_test_short())
-                       return 0; /* BIOS worked, but with delayed reaction */
-
-               enable_a20_kbc();
-               if (a20_test_long())
-                       return 0;
-
-               /* Finally, try enabling the "fast A20 gate" */
-               enable_a20_fast();
-               if (a20_test_long())
-                       return 0;
-       }
-
-       return -1;
-#endif
+       int kbc_err;
+
+       while (loops--) {
+              /* First, check to see if A20 is already enabled
+                 (legacy free, etc.) */
+              if (a20_test_short())
+                      return 0;
+              
+              /* Next, try the BIOS (INT 0x15, AX=0x2401) */
+              enable_a20_bios();
+              if (a20_test_short())
+                      return 0;
+              
+              /* Try enabling A20 through the keyboard controller */
+              kbc_err = empty_8042();
+
+              if (a20_test_short())
+                      return 0; /* BIOS worked, but with delayed reaction */
+       
+              if (!kbc_err) {
+                      enable_a20_kbc();
+                      if (a20_test_long())
+                              return 0;
+              }
+              
+              /* Finally, try enabling the "fast A20 gate" */
+              enable_a20_fast();
+              if (a20_test_long())
+                      return 0;
+       }
+       
+       return -1;
 }
index cc0ef13fba7a60c4c20943589efecc7108661112..7b2692e897e5fcd75f53073dcf543665d838eb6b 100644 (file)
@@ -302,9 +302,6 @@ void probe_cards(int unsafe);
 /* video-vesa.c */
 void vesa_store_edid(void);
 
-/* voyager.c */
-int query_voyager(void);
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* BOOT_BOOT_H */
index 1771c804e02f57b26d5d4f9f1db8cdb1f7a4ed62..3ca4c194b8e588217dabef4dc5da54bcb2489edd 100644 (file)
@@ -4,7 +4,7 @@
 # create a compressed vmlinux image from the original vmlinux
 #
 
-targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o
+targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o
 
 KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
 KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -47,18 +47,35 @@ ifeq ($(CONFIG_X86_32),y)
 ifdef CONFIG_RELOCATABLE
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
        $(call if_changed,gzip)
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE
+       $(call if_changed,bzip2)
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE
+       $(call if_changed,lzma)
 else
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
        $(call if_changed,gzip)
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,bzip2)
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,lzma)
 endif
 LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
 
 else
+
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
        $(call if_changed,gzip)
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,bzip2)
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,lzma)
 
 LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
 endif
 
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE
+suffix_$(CONFIG_KERNEL_GZIP)  = gz
+suffix_$(CONFIG_KERNEL_BZIP2) = bz2
+suffix_$(CONFIG_KERNEL_LZMA)  = lzma
+
+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
        $(call if_changed,ld)
index 29c5fbf08392359ec77f4149b7b45c24cd8d071b..3a8a866fb2e291e958478cb741a56d16b1016e5f 100644 (file)
 
 #include <linux/linkage.h>
 #include <asm/segment.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/boot.h>
 #include <asm/asm-offsets.h>
 
 .section ".text.head","ax",@progbits
-       .globl startup_32
-
-startup_32:
+ENTRY(startup_32)
        cld
        /* test KEEP_SEGMENTS flag to see if the bootloader is asking
         * us to not reload segments */
@@ -113,6 +111,8 @@ startup_32:
  */
        leal relocated(%ebx), %eax
        jmp *%eax
+ENDPROC(startup_32)
+
 .section ".text"
 relocated:
 
index 1d5dff4123e15a3cebf9c59f6edaa9575c58dc70..ed4a8294800268c25667b5ede9bc42e2cdd229c9 100644 (file)
@@ -26,8 +26,8 @@
 
 #include <linux/linkage.h>
 #include <asm/segment.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
+#include <asm/pgtable_types.h>
+#include <asm/page_types.h>
 #include <asm/boot.h>
 #include <asm/msr.h>
 #include <asm/processor-flags.h>
@@ -35,9 +35,7 @@
 
 .section ".text.head"
        .code32
-       .globl startup_32
-
-startup_32:
+ENTRY(startup_32)
        cld
        /* test KEEP_SEGMENTS flag to see if the bootloader is asking
         * us to not reload segments */
@@ -176,6 +174,7 @@ startup_32:
 
        /* Jump from 32bit compatibility mode into 64bit mode. */
        lret
+ENDPROC(startup_32)
 
 no_longmode:
        /* This isn't an x86-64 CPU so hang */
@@ -295,7 +294,6 @@ relocated:
        call    decompress_kernel
        popq    %rsi
 
-
 /*
  * Jump to the decompressed kernel.
  */
index da062216948a6951990f354fa11ee9125b865fe7..e45be73684ffe3bd5559029eb86cf66ec00ff250 100644 (file)
 /*
  * gzip declarations
  */
-
-#define OF(args)       args
 #define STATIC         static
 
 #undef memset
 #undef memcpy
 #define memzero(s, n)  memset((s), 0, (n))
 
-typedef unsigned char  uch;
-typedef unsigned short ush;
-typedef unsigned long  ulg;
-
-/*
- * Window size must be at least 32k, and a power of two.
- * We don't actually have a window just a huge output buffer,
- * so we report a 2G window size, as that should always be
- * larger than our output buffer:
- */
-#define WSIZE          0x80000000
-
-/* Input buffer: */
-static unsigned char   *inbuf;
-
-/* Sliding window buffer (and final output buffer): */
-static unsigned char   *window;
-
-/* Valid bytes in inbuf: */
-static unsigned                insize;
-
-/* Index of next byte to be processed in inbuf: */
-static unsigned                inptr;
-
-/* Bytes in output buffer: */
-static unsigned                outcnt;
-
-/* gzip flag byte */
-#define ASCII_FLAG     0x01 /* bit 0 set: file probably ASCII text */
-#define CONTINUATION   0x02 /* bit 1 set: continuation of multi-part gz file */
-#define EXTRA_FIELD    0x04 /* bit 2 set: extra field present */
-#define ORIG_NAM       0x08 /* bit 3 set: original file name present */
-#define COMMENT                0x10 /* bit 4 set: file comment present */
-#define ENCRYPTED      0x20 /* bit 5 set: file is encrypted */
-#define RESERVED       0xC0 /* bit 6, 7:  reserved */
-
-#define get_byte()     (inptr < insize ? inbuf[inptr++] : fill_inbuf())
-
-/* Diagnostic functions */
-#ifdef DEBUG
-#  define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0)
-#  define Trace(x)     do { fprintf x; } while (0)
-#  define Tracev(x)    do { if (verbose) fprintf x ; } while (0)
-#  define Tracevv(x)   do { if (verbose > 1) fprintf x ; } while (0)
-#  define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0)
-#  define Tracecv(c, x)        do { if (verbose > 1 && (c)) fprintf x ; } while (0)
-#else
-#  define Assert(cond, msg)
-#  define Trace(x)
-#  define Tracev(x)
-#  define Tracevv(x)
-#  define Tracec(c, x)
-#  define Tracecv(c, x)
-#endif
 
-static int  fill_inbuf(void);
-static void flush_window(void);
 static void error(char *m);
 
 /*
@@ -189,13 +131,8 @@ static void error(char *m);
 static struct boot_params *real_mode;          /* Pointer to real-mode data */
 static int quiet;
 
-extern unsigned char input_data[];
-extern int input_len;
-
-static long bytes_out;
-
 static void *memset(void *s, int c, unsigned n);
-static void *memcpy(void *dest, const void *src, unsigned n);
+void *memcpy(void *dest, const void *src, unsigned n);
 
 static void __putstr(int, const char *);
 #define putstr(__x)  __putstr(0, __x)
@@ -213,7 +150,17 @@ static char *vidmem;
 static int vidport;
 static int lines, cols;
 
-#include "../../../../lib/inflate.c"
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
+
+#ifdef CONFIG_KERNEL_BZIP2
+#include "../../../../lib/decompress_bunzip2.c"
+#endif
+
+#ifdef CONFIG_KERNEL_LZMA
+#include "../../../../lib/decompress_unlzma.c"
+#endif
 
 static void scroll(void)
 {
@@ -282,7 +229,7 @@ static void *memset(void *s, int c, unsigned n)
        return s;
 }
 
-static void *memcpy(void *dest, const void *src, unsigned n)
+void *memcpy(void *dest, const void *src, unsigned n)
 {
        int i;
        const char *s = src;
@@ -293,38 +240,6 @@ static void *memcpy(void *dest, const void *src, unsigned n)
        return dest;
 }
 
-/* ===========================================================================
- * Fill the input buffer. This is called only when the buffer is empty
- * and at least one byte is really needed.
- */
-static int fill_inbuf(void)
-{
-       error("ran out of input data");
-       return 0;
-}
-
-/* ===========================================================================
- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
- * (Used for the decompressed data only.)
- */
-static void flush_window(void)
-{
-       /* With my window equal to my output buffer
-        * I only need to compute the crc here.
-        */
-       unsigned long c = crc;         /* temporary variable */
-       unsigned n;
-       unsigned char *in, ch;
-
-       in = window;
-       for (n = 0; n < outcnt; n++) {
-               ch = *in++;
-               c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
-       }
-       crc = c;
-       bytes_out += (unsigned long)outcnt;
-       outcnt = 0;
-}
 
 static void error(char *x)
 {
@@ -407,12 +322,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
        lines = real_mode->screen_info.orig_video_lines;
        cols = real_mode->screen_info.orig_video_cols;
 
-       window = output;                /* Output buffer (Normally at 1M) */
        free_mem_ptr     = heap;        /* Heap */
        free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
-       inbuf  = input_data;            /* Input buffer */
-       insize = input_len;
-       inptr  = 0;
 
 #ifdef CONFIG_X86_64
        if ((unsigned long)output & (__KERNEL_ALIGN - 1))
@@ -430,10 +341,9 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
 #endif
 #endif
 
-       makecrc();
        if (!quiet)
                putstr("\nDecompressing Linux... ");
-       gunzip();
+       decompress(input_data, input_len, NULL, NULL, output, NULL, error);
        parse_elf(output);
        if (!quiet)
                putstr("done.\nBooting the kernel.\n");
index ef50c84e8b4bd6a517417d84c9275240ae19dd0c..11f272c6f5e9e0c2fcf11b030ee6d6df3cf2dd64 100644 (file)
@@ -8,6 +8,8 @@
  *
  * ----------------------------------------------------------------------- */
 
+#include <linux/linkage.h>
+
 /*
  * Memory copy routines
  */
@@ -15,9 +17,7 @@
        .code16gcc
        .text
 
-       .globl  memcpy
-       .type   memcpy, @function
-memcpy:
+GLOBAL(memcpy)
        pushw   %si
        pushw   %di
        movw    %ax, %di
@@ -31,11 +31,9 @@ memcpy:
        popw    %di
        popw    %si
        ret
-       .size   memcpy, .-memcpy
+ENDPROC(memcpy)
 
-       .globl  memset
-       .type   memset, @function
-memset:
+GLOBAL(memset)
        pushw   %di
        movw    %ax, %di
        movzbl  %dl, %eax
@@ -48,52 +46,42 @@ memset:
        rep; stosb
        popw    %di
        ret
-       .size   memset, .-memset
+ENDPROC(memset)
 
-       .globl  copy_from_fs
-       .type   copy_from_fs, @function
-copy_from_fs:
+GLOBAL(copy_from_fs)
        pushw   %ds
        pushw   %fs
        popw    %ds
        call    memcpy
        popw    %ds
        ret
-       .size   copy_from_fs, .-copy_from_fs
+ENDPROC(copy_from_fs)
 
-       .globl  copy_to_fs
-       .type   copy_to_fs, @function
-copy_to_fs:
+GLOBAL(copy_to_fs)
        pushw   %es
        pushw   %fs
        popw    %es
        call    memcpy
        popw    %es
        ret
-       .size   copy_to_fs, .-copy_to_fs
+ENDPROC(copy_to_fs)
 
 #if 0 /* Not currently used, but can be enabled as needed */
-
-       .globl  copy_from_gs
-       .type   copy_from_gs, @function
-copy_from_gs:
+GLOBAL(copy_from_gs)
        pushw   %ds
        pushw   %gs
        popw    %ds
        call    memcpy
        popw    %ds
        ret
-       .size   copy_from_gs, .-copy_from_gs
-       .globl  copy_to_gs
+ENDPROC(copy_from_gs)
 
-       .type   copy_to_gs, @function
-copy_to_gs:
+GLOBAL(copy_to_gs)
        pushw   %es
        pushw   %gs
        popw    %es
        call    memcpy
        popw    %es
        ret
-       .size   copy_to_gs, .-copy_to_gs
-
+ENDPROC(copy_to_gs)
 #endif
index b993062e9a5f7fae2d2492abd78cfb9933a0f01a..5d84d1c74e4c6d2a84666c7b7125641b00a1dcad 100644 (file)
 #include <linux/utsrelease.h>
 #include <asm/boot.h>
 #include <asm/e820.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/setup.h>
 #include "boot.h"
 #include "offsets.h"
 
-SETUPSECTS     = 4                     /* default nr of setup-sectors */
 BOOTSEG                = 0x07C0                /* original address of boot-sector */
-SYSSEG         = DEF_SYSSEG            /* system loaded at 0x10000 (65536) */
-SYSSIZE                = DEF_SYSSIZE           /* system size: # of 16-byte clicks */
-                                       /* to be loaded */
-ROOT_DEV       = 0                     /* ROOT_DEV is now written by "build" */
+SYSSEG         = 0x1000                /* historical load address >> 4 */
 
 #ifndef SVGA_MODE
 #define SVGA_MODE ASK_VGA
@@ -97,12 +93,12 @@ bugger_off_msg:
        .section ".header", "a"
        .globl  hdr
 hdr:
-setup_sects:   .byte SETUPSECTS
+setup_sects:   .byte 0                 /* Filled in by build.c */
 root_flags:    .word ROOT_RDONLY
-syssize:       .long SYSSIZE
-ram_size:      .word RAMDISK
+syssize:       .long 0                 /* Filled in by build.c */
+ram_size:      .word 0                 /* Obsolete */
 vid_mode:      .word SVGA_MODE
-root_dev:      .word ROOT_DEV
+root_dev:      .word 0                 /* Filled in by build.c */
 boot_flag:     .word 0xAA55
 
        # offset 512, entry point
@@ -123,14 +119,15 @@ _start:
                                        # or else old loadlin-1.5 will fail)
                .globl realmode_swtch
 realmode_swtch:        .word   0, 0            # default_switch, SETUPSEG
-start_sys_seg: .word   SYSSEG
+start_sys_seg: .word   SYSSEG          # obsolete and meaningless, but just
+                                       # in case something decided to "use" it
                .word   kernel_version-512 # pointing to kernel version string
                                        # above section of header is compatible
                                        # with loadlin-1.5 (header v1.5). Don't
                                        # change it.
 
-type_of_loader:        .byte   0               # = 0, old one (LILO, Loadlin,
-                                       #      Bootlin, SYSLX, bootsect...)
+type_of_loader:        .byte   0               # 0 means ancient bootloader, newer
+                                       # bootloaders know to change this.
                                        # See Documentation/i386/boot.txt for
                                        # assigned ids
 
@@ -142,11 +139,7 @@ CAN_USE_HEAP       = 0x80                  # If set, the loader also has set
                                        # space behind setup.S can be used for
                                        # heap purposes.
                                        # Only the loader knows what is free
-#ifndef __BIG_KERNEL__
-               .byte   0
-#else
                .byte   LOADED_HIGH
-#endif
 
 setup_move_size: .word  0x8000         # size to move, when setup is not
                                        # loaded at 0x90000. We will move setup
@@ -157,11 +150,7 @@ setup_move_size: .word  0x8000             # size to move, when setup is not
 
 code32_start:                          # here loaders can put a different
                                        # start address for 32-bit code.
-#ifndef __BIG_KERNEL__
-               .long   0x1000          #   0x1000 = default for zImage
-#else
                .long   0x100000        # 0x100000 = default for big kernel
-#endif
 
 ramdisk_image: .long   0               # address of loaded ramdisk image
                                        # Here the loader puts the 32-bit
index 197421db1af130d7ac3295e3cc1708b1e3b3e30f..58f0415d3ae09261d55c11d12855e3bc75fc4bdf 100644 (file)
@@ -149,11 +149,6 @@ void main(void)
        /* Query MCA information */
        query_mca();
 
-       /* Voyager */
-#ifdef CONFIG_X86_VOYAGER
-       query_voyager();
-#endif
-
        /* Query Intel SpeedStep (IST) information */
        query_ist();
 
index 85a1cd8a8ff8a4daee99ee11d86176bd7a97c166..8062f89152504b19babbd9e293791356ac2791aa 100644 (file)
@@ -32,47 +32,6 @@ static void realmode_switch_hook(void)
        }
 }
 
-/*
- * A zImage kernel is loaded at 0x10000 but wants to run at 0x1000.
- * A bzImage kernel is loaded and runs at 0x100000.
- */
-static void move_kernel_around(void)
-{
-       /* Note: rely on the compile-time option here rather than
-          the LOADED_HIGH flag.  The Qemu kernel loader unconditionally
-          sets the loadflags to zero. */
-#ifndef __BIG_KERNEL__
-       u16 dst_seg, src_seg;
-       u32 syssize;
-
-       dst_seg =  0x1000 >> 4;
-       src_seg = 0x10000 >> 4;
-       syssize = boot_params.hdr.syssize; /* Size in 16-byte paragraphs */
-
-       while (syssize) {
-               int paras  = (syssize >= 0x1000) ? 0x1000 : syssize;
-               int dwords = paras << 2;
-
-               asm volatile("pushw %%es ; "
-                            "pushw %%ds ; "
-                            "movw %1,%%es ; "
-                            "movw %2,%%ds ; "
-                            "xorw %%di,%%di ; "
-                            "xorw %%si,%%si ; "
-                            "rep;movsl ; "
-                            "popw %%ds ; "
-                            "popw %%es"
-                            : "+c" (dwords)
-                            : "r" (dst_seg), "r" (src_seg)
-                            : "esi", "edi");
-
-               syssize -= paras;
-               dst_seg += paras;
-               src_seg += paras;
-       }
-#endif
-}
-
 /*
  * Disable all interrupts at the legacy PIC.
  */
@@ -147,9 +106,6 @@ void go_to_protected_mode(void)
        /* Hook before leaving real mode, also disables interrupts */
        realmode_switch_hook();
 
-       /* Move the kernel/setup to their final resting places */
-       move_kernel_around();
-
        /* Enable the A20 gate */
        if (enable_a20()) {
                puts("A20 gate not responding, unable to boot...\n");
index 141b6e20ed3124dc102104991367de8b4779be68..019c17a7585115a7cfdb0c27696544b8127599b5 100644 (file)
 #include <asm/boot.h>
 #include <asm/processor-flags.h>
 #include <asm/segment.h>
+#include <linux/linkage.h>
 
        .text
-
-       .globl  protected_mode_jump
-       .type   protected_mode_jump, @function
-
        .code16
 
 /*
  * void protected_mode_jump(u32 entrypoint, u32 bootparams);
  */
-protected_mode_jump:
+GLOBAL(protected_mode_jump)
        movl    %edx, %esi              # Pointer to boot_params table
 
        xorl    %ebx, %ebx
@@ -47,12 +44,10 @@ protected_mode_jump:
        .byte   0x66, 0xea              # ljmpl opcode
 2:     .long   in_pm32                 # offset
        .word   __BOOT_CS               # segment
-
-       .size   protected_mode_jump, .-protected_mode_jump
+ENDPROC(protected_mode_jump)
 
        .code32
-       .type   in_pm32, @function
-in_pm32:
+GLOBAL(in_pm32)
        # Set up data segments for flat 32-bit mode
        movl    %ecx, %ds
        movl    %ecx, %es
@@ -78,5 +73,4 @@ in_pm32:
        lldt    %cx
 
        jmpl    *%eax                   # Jump to the 32-bit entrypoint
-
-       .size   in_pm32, .-in_pm32
+ENDPROC(in_pm32)
index 44dc1923c0e3eb277a90c582addb524cd4163629..ee3a4ea923ace82fc0c1f4aa07e4c1adc32ee318 100644 (file)
@@ -130,7 +130,7 @@ static void die(const char * str, ...)
 
 static void usage(void)
 {
-       die("Usage: build [-b] setup system [rootdev] [> image]");
+       die("Usage: build setup system [rootdev] [> image]");
 }
 
 int main(int argc, char ** argv)
@@ -145,11 +145,6 @@ int main(int argc, char ** argv)
        void *kernel;
        u32 crc = 0xffffffffUL;
 
-       if (argc > 2 && !strcmp(argv[1], "-b"))
-         {
-           is_big_kernel = 1;
-           argc--, argv++;
-         }
        if ((argc < 3) || (argc > 4))
                usage();
        if (argc > 3) {
@@ -216,8 +211,6 @@ int main(int argc, char ** argv)
                die("Unable to mmap '%s': %m", argv[2]);
        /* Number of 16-byte paragraphs, including space for a 4-byte CRC */
        sys_size = (sz + 15 + 4) / 16;
-       if (!is_big_kernel && sys_size > DEF_SYSSIZE)
-               die("System is too big. Try using bzImage or modules.");
 
        /* Patch the setup code with the appropriate size parameters */
        buf[0x1f1] = setup_sectors-1;
diff --git a/arch/x86/boot/voyager.c b/arch/x86/boot/voyager.c
deleted file mode 100644 (file)
index 433909d..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- *   Copyright (C) 1991, 1992 Linus Torvalds
- *   Copyright 2007 rPath, Inc. - All Rights Reserved
- *
- *   This file is part of the Linux kernel, and is made available under
- *   the terms of the GNU General Public License version 2.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * Get the Voyager config information
- */
-
-#include "boot.h"
-
-int query_voyager(void)
-{
-       u8 err;
-       u16 es, di;
-       /* Abuse the apm_bios_info area for this */
-       u8 *data_ptr = (u8 *)&boot_params.apm_bios_info;
-
-       data_ptr[0] = 0xff;     /* Flag on config not found(?) */
-
-       asm("pushw %%es ; "
-           "int $0x15 ; "
-           "setc %0 ; "
-           "movw %%es, %1 ; "
-           "popw %%es"
-           : "=q" (err), "=r" (es), "=D" (di)
-           : "a" (0xffc0));
-
-       if (err)
-               return -1;      /* Not Voyager */
-
-       set_fs(es);
-       copy_from_fs(data_ptr, di, 7);  /* Table is 7 bytes apparently */
-       return 0;
-}
index edba00d98ac352b5b8ae363a091446ab031d3537..235b81d0f6f2b1083c06cc46210df33141eb2b4b 100644 (file)
@@ -1,14 +1,13 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.27-rc5
-# Wed Sep  3 17:23:09 2008
+# Linux kernel version: 2.6.29-rc4
+# Tue Feb 24 15:50:58 2009
 #
 # CONFIG_64BIT is not set
 CONFIG_X86_32=y
 # CONFIG_X86_64 is not set
 CONFIG_X86=y
 CONFIG_ARCH_DEFCONFIG="arch/x86/configs/i386_defconfig"
-# CONFIG_GENERIC_LOCKBREAK is not set
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CMOS_UPDATE=y
 CONFIG_CLOCKSOURCE_WATCHDOG=y
@@ -24,16 +23,14 @@ CONFIG_GENERIC_ISA_DMA=y
 CONFIG_GENERIC_IOMAP=y
 CONFIG_GENERIC_BUG=y
 CONFIG_GENERIC_HWEIGHT=y
-# CONFIG_GENERIC_GPIO is not set
 CONFIG_ARCH_MAY_HAVE_PC_FDC=y
 # CONFIG_RWSEM_GENERIC_SPINLOCK is not set
 CONFIG_RWSEM_XCHGADD_ALGORITHM=y
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 # CONFIG_GENERIC_TIME_VSYSCALL is not set
 CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_DEFAULT_IDLE=y
 CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
 CONFIG_HAVE_SETUP_PER_CPU_AREA=y
 # CONFIG_HAVE_CPUMASK_OF_CPU_MAP is not set
@@ -42,12 +39,12 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
 # CONFIG_ZONE_DMA32 is not set
 CONFIG_ARCH_POPULATES_NODE_MAP=y
 # CONFIG_AUDIT_ARCH is not set
-CONFIG_ARCH_SUPPORTS_AOUT=y
 CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_GENERIC_PENDING_IRQ=y
 CONFIG_X86_SMP=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
 CONFIG_X86_32_SMP=y
 CONFIG_X86_HT=y
 CONFIG_X86_BIOS_REBOOT=y
@@ -76,30 +73,44 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_AUDIT_TREE=y
+
+#
+# RCU Subsystem
+#
+# CONFIG_CLASSIC_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=18
-CONFIG_CGROUPS=y
-# CONFIG_CGROUP_DEBUG is not set
-CONFIG_CGROUP_NS=y
-# CONFIG_CGROUP_DEVICE is not set
-CONFIG_CPUSETS=y
 CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
 CONFIG_GROUP_SCHED=y
 CONFIG_FAIR_GROUP_SCHED=y
 # CONFIG_RT_GROUP_SCHED is not set
 # CONFIG_USER_SCHED is not set
 CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+# CONFIG_CGROUP_DEVICE is not set
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_RESOURCE_COUNTERS=y
 # CONFIG_CGROUP_MEM_RES_CTLR is not set
 # CONFIG_SYSFS_DEPRECATED_V2 is not set
-CONFIG_PROC_PID_CPUSET=y
 CONFIG_RELAY=y
 CONFIG_NAMESPACES=y
 CONFIG_UTS_NS=y
 CONFIG_IPC_NS=y
 CONFIG_USER_NS=y
 CONFIG_PID_NS=y
+CONFIG_NET_NS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -124,12 +135,15 @@ CONFIG_SIGNALFD=y
 CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
 CONFIG_SLUB_DEBUG=y
 # CONFIG_SLAB is not set
 CONFIG_SLUB=y
 # CONFIG_SLOB is not set
 CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
 CONFIG_MARKERS=y
 # CONFIG_OPROFILE is not set
 CONFIG_HAVE_OPROFILE=y
@@ -139,15 +153,10 @@ CONFIG_KRETPROBES=y
 CONFIG_HAVE_IOREMAP_PROT=y
 CONFIG_HAVE_KPROBES=y
 CONFIG_HAVE_KRETPROBES=y
-# CONFIG_HAVE_ARCH_TRACEHOOK is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-CONFIG_USE_GENERIC_SMP_HELPERS=y
-# CONFIG_HAVE_CLK is not set
-CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
 CONFIG_HAVE_GENERIC_DMA_COHERENT=y
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
 # CONFIG_MODULE_FORCE_LOAD is not set
@@ -155,12 +164,10 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_MODVERSIONS is not set
 # CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
 CONFIG_STOP_MACHINE=y
 CONFIG_BLOCK=y
 # CONFIG_LBD is not set
 CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_LSF is not set
 CONFIG_BLK_DEV_BSG=y
 # CONFIG_BLK_DEV_INTEGRITY is not set
 
@@ -176,7 +183,7 @@ CONFIG_IOSCHED_CFQ=y
 CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
 
 #
 # Processor type and features
@@ -186,15 +193,14 @@ CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
 CONFIG_SMP=y
+CONFIG_SPARSE_IRQ=y
 CONFIG_X86_FIND_SMP_CONFIG=y
 CONFIG_X86_MPPARSE=y
-CONFIG_X86_PC=y
 # CONFIG_X86_ELAN is not set
-# CONFIG_X86_VOYAGER is not set
 # CONFIG_X86_GENERICARCH is not set
 # CONFIG_X86_VSMP is not set
 # CONFIG_X86_RDC321X is not set
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
 # CONFIG_PARAVIRT_GUEST is not set
 # CONFIG_MEMTEST is not set
 # CONFIG_M386 is not set
@@ -238,10 +244,19 @@ CONFIG_X86_TSC=y
 CONFIG_X86_CMOV=y
 CONFIG_X86_MINIMUM_CPU_FAMILY=4
 CONFIG_X86_DEBUGCTLMSR=y
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_CYRIX_32=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR_32=y
+CONFIG_CPU_SUP_TRANSMETA_32=y
+CONFIG_CPU_SUP_UMC_32=y
+CONFIG_X86_DS=y
+CONFIG_X86_PTRACE_BTS=y
 CONFIG_HPET_TIMER=y
 CONFIG_HPET_EMULATE_RTC=y
 CONFIG_DMI=y
 # CONFIG_IOMMU_HELPER is not set
+# CONFIG_IOMMU_API is not set
 CONFIG_NR_CPUS=64
 CONFIG_SCHED_SMT=y
 CONFIG_SCHED_MC=y
@@ -250,12 +265,17 @@ CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_PREEMPT is not set
 CONFIG_X86_LOCAL_APIC=y
 CONFIG_X86_IO_APIC=y
-# CONFIG_X86_MCE is not set
+CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_NONFATAL=y
+CONFIG_X86_MCE_P4THERMAL=y
 CONFIG_VM86=y
 # CONFIG_TOSHIBA is not set
 # CONFIG_I8K is not set
 CONFIG_X86_REBOOTFIXUPS=y
 CONFIG_MICROCODE=y
+CONFIG_MICROCODE_INTEL=y
+CONFIG_MICROCODE_AMD=y
 CONFIG_MICROCODE_OLD_INTERFACE=y
 CONFIG_X86_MSR=y
 CONFIG_X86_CPUID=y
@@ -264,6 +284,7 @@ CONFIG_HIGHMEM4G=y
 # CONFIG_HIGHMEM64G is not set
 CONFIG_PAGE_OFFSET=0xC0000000
 CONFIG_HIGHMEM=y
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
 CONFIG_ARCH_FLATMEM_ENABLE=y
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
 CONFIG_ARCH_SELECT_MEMORY_MODEL=y
@@ -274,14 +295,17 @@ CONFIG_FLATMEM_MANUAL=y
 CONFIG_FLATMEM=y
 CONFIG_FLAT_NODE_MEM_MAP=y
 CONFIG_SPARSEMEM_STATIC=y
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
 CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
-CONFIG_RESOURCES_64BIT=y
+# CONFIG_PHYS_ADDR_T_64BIT is not set
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_BOUNCE=y
 CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
 CONFIG_HIGHPTE=y
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
+CONFIG_X86_RESERVE_LOW_64K=y
 # CONFIG_MATH_EMULATION is not set
 CONFIG_MTRR=y
 # CONFIG_MTRR_SANITIZER is not set
@@ -302,10 +326,11 @@ CONFIG_PHYSICAL_START=0x1000000
 CONFIG_PHYSICAL_ALIGN=0x200000
 CONFIG_HOTPLUG_CPU=y
 # CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
 
 #
-# Power management options
+# Power management and ACPI options
 #
 CONFIG_PM=y
 CONFIG_PM_DEBUG=y
@@ -331,19 +356,13 @@ CONFIG_ACPI_BATTERY=y
 CONFIG_ACPI_BUTTON=y
 CONFIG_ACPI_FAN=y
 CONFIG_ACPI_DOCK=y
-# CONFIG_ACPI_BAY is not set
 CONFIG_ACPI_PROCESSOR=y
 CONFIG_ACPI_HOTPLUG_CPU=y
 CONFIG_ACPI_THERMAL=y
-# CONFIG_ACPI_WMI is not set
-# CONFIG_ACPI_ASUS is not set
-# CONFIG_ACPI_TOSHIBA is not set
 # CONFIG_ACPI_CUSTOM_DSDT is not set
 CONFIG_ACPI_BLACKLIST_YEAR=0
 # CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_EC=y
 # CONFIG_ACPI_PCI_SLOT is not set
-CONFIG_ACPI_POWER=y
 CONFIG_ACPI_SYSTEM=y
 CONFIG_X86_PM_TIMER=y
 CONFIG_ACPI_CONTAINER=y
@@ -388,7 +407,6 @@ CONFIG_X86_ACPI_CPUFREQ=y
 #
 # shared options
 #
-# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
 # CONFIG_X86_SPEEDSTEP_LIB is not set
 CONFIG_CPU_IDLE=y
 CONFIG_CPU_IDLE_GOV_LADDER=y
@@ -415,6 +433,7 @@ CONFIG_ARCH_SUPPORTS_MSI=y
 CONFIG_PCI_MSI=y
 # CONFIG_PCI_LEGACY is not set
 # CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_STUB is not set
 CONFIG_HT_IRQ=y
 CONFIG_ISA_DMA_API=y
 # CONFIG_ISA is not set
@@ -452,13 +471,17 @@ CONFIG_HOTPLUG_PCI=y
 # Executable file formats / Emulations
 #
 CONFIG_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_HAVE_AOUT=y
 # CONFIG_BINFMT_AOUT is not set
 CONFIG_BINFMT_MISC=y
+CONFIG_HAVE_ATOMIC_IOMAP=y
 CONFIG_NET=y
 
 #
 # Networking options
 #
+CONFIG_COMPAT_NET_DEV_OPS=y
 CONFIG_PACKET=y
 CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
@@ -519,7 +542,6 @@ CONFIG_DEFAULT_CUBIC=y
 # CONFIG_DEFAULT_RENO is not set
 CONFIG_DEFAULT_TCP_CONG="cubic"
 CONFIG_TCP_MD5SIG=y
-# CONFIG_IP_VS is not set
 CONFIG_IPV6=y
 # CONFIG_IPV6_PRIVACY is not set
 # CONFIG_IPV6_ROUTER_PREF is not set
@@ -557,19 +579,21 @@ CONFIG_NF_CONNTRACK_IRC=y
 CONFIG_NF_CONNTRACK_SIP=y
 CONFIG_NF_CT_NETLINK=y
 CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
 CONFIG_NETFILTER_XT_MATCH_MARK=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_IP_VS is not set
 
 #
 # IP: Netfilter Configuration
 #
+CONFIG_NF_DEFRAG_IPV4=y
 CONFIG_NF_CONNTRACK_IPV4=y
 CONFIG_NF_CONNTRACK_PROC_COMPAT=y
 CONFIG_IP_NF_IPTABLES=y
@@ -595,8 +619,8 @@ CONFIG_IP_NF_MANGLE=y
 CONFIG_NF_CONNTRACK_IPV6=y
 CONFIG_IP6_NF_IPTABLES=y
 CONFIG_IP6_NF_MATCH_IPV6HEADER=y
-CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_LOG=y
+CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 # CONFIG_IP_DCCP is not set
@@ -604,6 +628,7 @@ CONFIG_IP6_NF_MANGLE=y
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 CONFIG_LLC=y
@@ -623,6 +648,7 @@ CONFIG_NET_SCHED=y
 # CONFIG_NET_SCH_HTB is not set
 # CONFIG_NET_SCH_HFSC is not set
 # CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
 # CONFIG_NET_SCH_RED is not set
 # CONFIG_NET_SCH_SFQ is not set
 # CONFIG_NET_SCH_TEQL is not set
@@ -630,6 +656,7 @@ CONFIG_NET_SCHED=y
 # CONFIG_NET_SCH_GRED is not set
 # CONFIG_NET_SCH_DSMARK is not set
 # CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
 # CONFIG_NET_SCH_INGRESS is not set
 
 #
@@ -644,6 +671,7 @@ CONFIG_NET_CLS=y
 # CONFIG_NET_CLS_RSVP is not set
 # CONFIG_NET_CLS_RSVP6 is not set
 # CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_STACK=32
 # CONFIG_NET_EMATCH_CMP is not set
@@ -659,7 +687,9 @@ CONFIG_NET_CLS_ACT=y
 # CONFIG_NET_ACT_NAT is not set
 # CONFIG_NET_ACT_PEDIT is not set
 # CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
 CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
 
 #
 # Network testing
@@ -676,29 +706,33 @@ CONFIG_HAMRADIO=y
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
 CONFIG_FIB_RULES=y
-
-#
-# Wireless
-#
+CONFIG_WIRELESS=y
 CONFIG_CFG80211=y
+# CONFIG_CFG80211_REG_DEBUG is not set
 CONFIG_NL80211=y
+CONFIG_WIRELESS_OLD_REGULATORY=y
 CONFIG_WIRELESS_EXT=y
 CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
 CONFIG_MAC80211=y
 
 #
 # Rate control algorithm selection
 #
-CONFIG_MAC80211_RC_PID=y
-CONFIG_MAC80211_RC_DEFAULT_PID=y
-CONFIG_MAC80211_RC_DEFAULT="pid"
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
 # CONFIG_MAC80211_MESH is not set
 CONFIG_MAC80211_LEDS=y
 # CONFIG_MAC80211_DEBUGFS is not set
 # CONFIG_MAC80211_DEBUG_MENU is not set
-# CONFIG_IEEE80211 is not set
-# CONFIG_RFKILL is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+# CONFIG_RFKILL_INPUT is not set
+CONFIG_RFKILL_LEDS=y
 # CONFIG_NET_9P is not set
 
 #
@@ -722,7 +756,7 @@ CONFIG_PROC_EVENTS=y
 # CONFIG_MTD is not set
 # CONFIG_PARPORT is not set
 CONFIG_PNP=y
-# CONFIG_PNP_DEBUG is not set
+CONFIG_PNP_DEBUG_MESSAGES=y
 
 #
 # Protocols
@@ -750,20 +784,19 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
 CONFIG_MISC_DEVICES=y
 # CONFIG_IBM_ASM is not set
 # CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
 # CONFIG_SGI_IOC4 is not set
 # CONFIG_TIFM_CORE is not set
-# CONFIG_ACER_WMI is not set
-# CONFIG_ASUS_LAPTOP is not set
-# CONFIG_FUJITSU_LAPTOP is not set
-# CONFIG_TC1100_WMI is not set
-# CONFIG_MSI_LAPTOP is not set
-# CONFIG_COMPAL_LAPTOP is not set
-# CONFIG_SONY_LAPTOP is not set
-# CONFIG_THINKPAD_ACPI is not set
-# CONFIG_INTEL_MENLOW is not set
+# CONFIG_ICS932S401 is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
 # CONFIG_HP_ILO is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_93CX6 is not set
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
@@ -802,7 +835,7 @@ CONFIG_SCSI_WAIT_SCAN=m
 #
 CONFIG_SCSI_SPI_ATTRS=y
 # CONFIG_SCSI_FC_ATTRS is not set
-CONFIG_SCSI_ISCSI_ATTRS=y
+# CONFIG_SCSI_ISCSI_ATTRS is not set
 # CONFIG_SCSI_SAS_ATTRS is not set
 # CONFIG_SCSI_SAS_LIBSAS is not set
 # CONFIG_SCSI_SRP_ATTRS is not set
@@ -875,6 +908,7 @@ CONFIG_PATA_OLDPIIX=y
 CONFIG_PATA_SCH=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
 # CONFIG_MD_LINEAR is not set
 # CONFIG_MD_RAID0 is not set
 # CONFIG_MD_RAID1 is not set
@@ -930,6 +964,9 @@ CONFIG_PHYLIB=y
 # CONFIG_BROADCOM_PHY is not set
 # CONFIG_ICPLUS_PHY is not set
 # CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
 # CONFIG_FIXED_PHY is not set
 # CONFIG_MDIO_BITBANG is not set
 CONFIG_NET_ETHERNET=y
@@ -953,6 +990,9 @@ CONFIG_NET_TULIP=y
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NET_PCI=y
 # CONFIG_PCNET32 is not set
 # CONFIG_AMD8111_ETH is not set
@@ -960,7 +1000,6 @@ CONFIG_NET_PCI=y
 # CONFIG_B44 is not set
 CONFIG_FORCEDETH=y
 # CONFIG_FORCEDETH_NAPI is not set
-# CONFIG_EEPRO100 is not set
 CONFIG_E100=y
 # CONFIG_FEALNX is not set
 # CONFIG_NATSEMI is not set
@@ -974,15 +1013,16 @@ CONFIG_8139TOO=y
 # CONFIG_R6040 is not set
 # CONFIG_SIS900 is not set
 # CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
 # CONFIG_SUNDANCE is not set
 # CONFIG_TLAN is not set
 # CONFIG_VIA_RHINE is not set
 # CONFIG_SC92031 is not set
+# CONFIG_ATL2 is not set
 CONFIG_NETDEV_1000=y
 # CONFIG_ACENIC is not set
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
-# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 CONFIG_E1000E=y
 # CONFIG_IP1000 is not set
 # CONFIG_IGB is not set
@@ -1000,18 +1040,23 @@ CONFIG_BNX2=y
 # CONFIG_QLA3XXX is not set
 # CONFIG_ATL1 is not set
 # CONFIG_ATL1E is not set
+# CONFIG_JME is not set
 CONFIG_NETDEV_10000=y
 # CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3_DEPENDS=y
 # CONFIG_CHELSIO_T3 is not set
+# CONFIG_ENIC is not set
 # CONFIG_IXGBE is not set
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
 # CONFIG_MYRI10GE is not set
 # CONFIG_NETXEN_NIC is not set
 # CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
 # CONFIG_MLX4_CORE is not set
 # CONFIG_TEHUTI is not set
 # CONFIG_BNX2X is not set
+# CONFIG_QLGE is not set
 # CONFIG_SFC is not set
 CONFIG_TR=y
 # CONFIG_IBMOL is not set
@@ -1025,9 +1070,8 @@ CONFIG_TR=y
 # CONFIG_WLAN_PRE80211 is not set
 CONFIG_WLAN_80211=y
 # CONFIG_PCMCIA_RAYCS is not set
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
 # CONFIG_LIBERTAS is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
 # CONFIG_AIRO is not set
 # CONFIG_HERMES is not set
 # CONFIG_ATMEL is not set
@@ -1044,6 +1088,8 @@ CONFIG_WLAN_80211=y
 CONFIG_ATH5K=y
 # CONFIG_ATH5K_DEBUG is not set
 # CONFIG_ATH9K is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
 # CONFIG_IWLCORE is not set
 # CONFIG_IWLWIFI_LEDS is not set
 # CONFIG_IWLAGN is not set
@@ -1054,6 +1100,10 @@ CONFIG_ATH5K=y
 # CONFIG_ZD1211RW is not set
 # CONFIG_RT2X00 is not set
 
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
 #
 # USB Network Adapters
 #
@@ -1062,6 +1112,7 @@ CONFIG_ATH5K=y
 # CONFIG_USB_PEGASUS is not set
 # CONFIG_USB_RTL8150 is not set
 # CONFIG_USB_USBNET is not set
+# CONFIG_USB_HSO is not set
 CONFIG_NET_PCMCIA=y
 # CONFIG_PCMCIA_3C589 is not set
 # CONFIG_PCMCIA_3C574 is not set
@@ -1123,6 +1174,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
 CONFIG_MOUSE_PS2_SYNAPTICS=y
 CONFIG_MOUSE_PS2_LIFEBOOK=y
 CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
 # CONFIG_MOUSE_PS2_TOUCHKIT is not set
 # CONFIG_MOUSE_SERIAL is not set
 # CONFIG_MOUSE_APPLETOUCH is not set
@@ -1160,15 +1212,16 @@ CONFIG_INPUT_TOUCHSCREEN=y
 # CONFIG_TOUCHSCREEN_FUJITSU is not set
 # CONFIG_TOUCHSCREEN_GUNZE is not set
 # CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
 # CONFIG_TOUCHSCREEN_MTOUCH is not set
 # CONFIG_TOUCHSCREEN_INEXIO is not set
 # CONFIG_TOUCHSCREEN_MK712 is not set
 # CONFIG_TOUCHSCREEN_PENMOUNT is not set
 # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
 # CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
 # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
 # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
 CONFIG_INPUT_MISC=y
 # CONFIG_INPUT_PCSPKR is not set
 # CONFIG_INPUT_APANEL is not set
@@ -1179,6 +1232,7 @@ CONFIG_INPUT_MISC=y
 # CONFIG_INPUT_KEYSPAN_REMOTE is not set
 # CONFIG_INPUT_POWERMATE is not set
 # CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
 # CONFIG_INPUT_UINPUT is not set
 
 #
@@ -1245,6 +1299,7 @@ CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 # CONFIG_SERIAL_JSM is not set
 CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_IPMI_HANDLER is not set
 CONFIG_HW_RANDOM=y
@@ -1279,6 +1334,7 @@ CONFIG_I2C=y
 CONFIG_I2C_BOARDINFO=y
 # CONFIG_I2C_CHARDEV is not set
 CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
 
 #
 # I2C Hardware Bus support
@@ -1331,8 +1387,6 @@ CONFIG_I2C_I801=y
 # Miscellaneous I2C Chip support
 #
 # CONFIG_DS1682 is not set
-# CONFIG_EEPROM_AT24 is not set
-# CONFIG_EEPROM_LEGACY is not set
 # CONFIG_SENSORS_PCF8574 is not set
 # CONFIG_PCF8575 is not set
 # CONFIG_SENSORS_PCA9539 is not set
@@ -1351,8 +1405,78 @@ CONFIG_POWER_SUPPLY=y
 # CONFIG_POWER_SUPPLY_DEBUG is not set
 # CONFIG_PDA_POWER is not set
 # CONFIG_BATTERY_DS2760 is not set
-# CONFIG_HWMON is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_CORETEMP is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
 CONFIG_THERMAL=y
+# CONFIG_THERMAL_HWMON is not set
 CONFIG_WATCHDOG=y
 # CONFIG_WATCHDOG_NOWAYOUT is not set
 
@@ -1372,6 +1496,7 @@ CONFIG_WATCHDOG=y
 # CONFIG_I6300ESB_WDT is not set
 # CONFIG_ITCO_WDT is not set
 # CONFIG_IT8712F_WDT is not set
+# CONFIG_IT87_WDT is not set
 # CONFIG_HP_WATCHDOG is not set
 # CONFIG_SC1200_WDT is not set
 # CONFIG_PC87413_WDT is not set
@@ -1379,9 +1504,11 @@ CONFIG_WATCHDOG=y
 # CONFIG_SBC8360_WDT is not set
 # CONFIG_SBC7240_WDT is not set
 # CONFIG_CPU5_WDT is not set
+# CONFIG_SMSC_SCH311X_WDT is not set
 # CONFIG_SMSC37B787_WDT is not set
 # CONFIG_W83627HF_WDT is not set
 # CONFIG_W83697HF_WDT is not set
+# CONFIG_W83697UG_WDT is not set
 # CONFIG_W83877F_WDT is not set
 # CONFIG_W83977F_WDT is not set
 # CONFIG_MACHZ_WDT is not set
@@ -1397,11 +1524,11 @@ CONFIG_WATCHDOG=y
 # USB-based Watchdog Cards
 #
 # CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
 
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB_POSSIBLE=y
 # CONFIG_SSB is not set
 
 #
@@ -1410,7 +1537,13 @@ CONFIG_SSB_POSSIBLE=y
 # CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_TWL4030_CORE is not set
 # CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_REGULATOR is not set
 
 #
 # Multimedia devices
@@ -1450,6 +1583,7 @@ CONFIG_DRM=y
 # CONFIG_DRM_I810 is not set
 # CONFIG_DRM_I830 is not set
 CONFIG_DRM_I915=y
+# CONFIG_DRM_I915_KMS is not set
 # CONFIG_DRM_MGA is not set
 # CONFIG_DRM_SIS is not set
 # CONFIG_DRM_VIA is not set
@@ -1459,6 +1593,7 @@ CONFIG_DRM_I915=y
 CONFIG_FB=y
 # CONFIG_FIRMWARE_EDID is not set
 # CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
 CONFIG_FB_CFB_FILLRECT=y
 CONFIG_FB_CFB_COPYAREA=y
 CONFIG_FB_CFB_IMAGEBLIT=y
@@ -1487,7 +1622,6 @@ CONFIG_FB_TILEBLITTING=y
 # CONFIG_FB_UVESA is not set
 # CONFIG_FB_VESA is not set
 CONFIG_FB_EFI=y
-# CONFIG_FB_IMAC is not set
 # CONFIG_FB_N411 is not set
 # CONFIG_FB_HGA is not set
 # CONFIG_FB_S1D13XXX is not set
@@ -1503,6 +1637,7 @@ CONFIG_FB_EFI=y
 # CONFIG_FB_S3 is not set
 # CONFIG_FB_SAVAGE is not set
 # CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
 # CONFIG_FB_NEOMAGIC is not set
 # CONFIG_FB_KYRO is not set
 # CONFIG_FB_3DFX is not set
@@ -1515,12 +1650,15 @@ CONFIG_FB_EFI=y
 # CONFIG_FB_CARMINE is not set
 # CONFIG_FB_GEODE is not set
 # CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 # CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_BACKLIGHT_CORGI is not set
+CONFIG_BACKLIGHT_GENERIC=y
 # CONFIG_BACKLIGHT_PROGEAR is not set
 # CONFIG_BACKLIGHT_MBP_NVIDIA is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
 
 #
 # Display device support
@@ -1540,10 +1678,12 @@ CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_VGA16 is not set
 CONFIG_LOGO_LINUX_CLUT224=y
 CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
 CONFIG_SND=y
 CONFIG_SND_TIMER=y
 CONFIG_SND_PCM=y
 CONFIG_SND_HWDEP=y
+CONFIG_SND_JACK=y
 CONFIG_SND_SEQUENCER=y
 CONFIG_SND_SEQ_DUMMY=y
 CONFIG_SND_OSSEMUL=y
@@ -1551,6 +1691,8 @@ CONFIG_SND_MIXER_OSS=y
 CONFIG_SND_PCM_OSS=y
 CONFIG_SND_PCM_OSS_PLUGINS=y
 CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_HRTIMER=y
+CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_SUPPORT_OLD_API=y
 CONFIG_SND_VERBOSE_PROCFS=y
@@ -1605,11 +1747,16 @@ CONFIG_SND_PCI=y
 # CONFIG_SND_FM801 is not set
 CONFIG_SND_HDA_INTEL=y
 CONFIG_SND_HDA_HWDEP=y
+# CONFIG_SND_HDA_RECONFIG is not set
+# CONFIG_SND_HDA_INPUT_BEEP is not set
 CONFIG_SND_HDA_CODEC_REALTEK=y
 CONFIG_SND_HDA_CODEC_ANALOG=y
 CONFIG_SND_HDA_CODEC_SIGMATEL=y
 CONFIG_SND_HDA_CODEC_VIA=y
 CONFIG_SND_HDA_CODEC_ATIHDMI=y
+CONFIG_SND_HDA_CODEC_NVHDMI=y
+CONFIG_SND_HDA_CODEC_INTELHDMI=y
+CONFIG_SND_HDA_ELD=y
 CONFIG_SND_HDA_CODEC_CONEXANT=y
 CONFIG_SND_HDA_CODEC_CMEDIA=y
 CONFIG_SND_HDA_CODEC_SI3054=y
@@ -1643,6 +1790,7 @@ CONFIG_SND_USB=y
 # CONFIG_SND_USB_AUDIO is not set
 # CONFIG_SND_USB_USX2Y is not set
 # CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_US122L is not set
 CONFIG_SND_PCMCIA=y
 # CONFIG_SND_VXPOCKET is not set
 # CONFIG_SND_PDAUDIOCF is not set
@@ -1657,15 +1805,37 @@ CONFIG_HIDRAW=y
 # USB Input Devices
 #
 CONFIG_USB_HID=y
-CONFIG_USB_HIDINPUT_POWERBOOK=y
-CONFIG_HID_FF=y
 CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
 CONFIG_LOGITECH_FF=y
 # CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
 CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_TOPSEED=y
 CONFIG_THRUSTMASTER_FF=y
 CONFIG_ZEROPLUS_FF=y
-CONFIG_USB_HIDDEV=y
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1683,6 +1853,8 @@ CONFIG_USB_DEVICEFS=y
 CONFIG_USB_SUSPEND=y
 # CONFIG_USB_OTG is not set
 CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
 
 #
 # USB Host Controller Drivers
@@ -1691,6 +1863,7 @@ CONFIG_USB_MON=y
 CONFIG_USB_EHCI_HCD=y
 # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
 # CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_OXU210HP_HCD is not set
 # CONFIG_USB_ISP116X_HCD is not set
 # CONFIG_USB_ISP1760_HCD is not set
 CONFIG_USB_OHCI_HCD=y
@@ -1700,6 +1873,8 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
 CONFIG_USB_UHCI_HCD=y
 # CONFIG_USB_SL811_HCD is not set
 # CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
 
 #
 # USB Device Class drivers
@@ -1707,20 +1882,20 @@ CONFIG_USB_UHCI_HCD=y
 # CONFIG_USB_ACM is not set
 CONFIG_USB_PRINTER=y
 # CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
 #
 
 #
-# may also be needed; see USB_STORAGE Help for more information
+# see USB_STORAGE Help for more information
 #
 CONFIG_USB_STORAGE=y
 # CONFIG_USB_STORAGE_DEBUG is not set
 # CONFIG_USB_STORAGE_DATAFAB is not set
 # CONFIG_USB_STORAGE_FREECOM is not set
 # CONFIG_USB_STORAGE_ISD200 is not set
-# CONFIG_USB_STORAGE_DPCM is not set
 # CONFIG_USB_STORAGE_USBAT is not set
 # CONFIG_USB_STORAGE_SDDR09 is not set
 # CONFIG_USB_STORAGE_SDDR55 is not set
@@ -1728,7 +1903,6 @@ CONFIG_USB_STORAGE=y
 # CONFIG_USB_STORAGE_ALAUDA is not set
 # CONFIG_USB_STORAGE_ONETOUCH is not set
 # CONFIG_USB_STORAGE_KARMA is not set
-# CONFIG_USB_STORAGE_SIERRA is not set
 # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
 CONFIG_USB_LIBUSUAL=y
 
@@ -1749,6 +1923,7 @@ CONFIG_USB_LIBUSUAL=y
 # CONFIG_USB_EMI62 is not set
 # CONFIG_USB_EMI26 is not set
 # CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 # CONFIG_USB_LCD is not set
@@ -1766,7 +1941,13 @@ CONFIG_USB_LIBUSUAL=y
 # CONFIG_USB_IOWARRIOR is not set
 # CONFIG_USB_TEST is not set
 # CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
 # CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
 # CONFIG_MMC is not set
 # CONFIG_MEMSTICK is not set
 CONFIG_NEW_LEDS=y
@@ -1775,6 +1956,7 @@ CONFIG_LEDS_CLASS=y
 #
 # LED drivers
 #
+# CONFIG_LEDS_ALIX2 is not set
 # CONFIG_LEDS_PCA9532 is not set
 # CONFIG_LEDS_CLEVO_MAIL is not set
 # CONFIG_LEDS_PCA955X is not set
@@ -1785,6 +1967,7 @@ CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_TRIGGERS=y
 # CONFIG_LEDS_TRIGGER_TIMER is not set
 # CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
 # CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
 # CONFIG_ACCESSIBILITY is not set
 # CONFIG_INFINIBAND is not set
@@ -1824,6 +2007,7 @@ CONFIG_RTC_INTF_DEV=y
 # CONFIG_RTC_DRV_M41T80 is not set
 # CONFIG_RTC_DRV_S35390A is not set
 # CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
 
 #
 # SPI RTC drivers
@@ -1833,12 +2017,15 @@ CONFIG_RTC_INTF_DEV=y
 # Platform RTC drivers
 #
 CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_DS1286 is not set
 # CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
 # CONFIG_RTC_DRV_DS1742 is not set
 # CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
@@ -1851,6 +2038,22 @@ CONFIG_DMADEVICES=y
 #
 # CONFIG_INTEL_IOATDMA is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_ACER_WMI is not set
+# CONFIG_ASUS_LAPTOP is not set
+# CONFIG_FUJITSU_LAPTOP is not set
+# CONFIG_TC1100_WMI is not set
+# CONFIG_MSI_LAPTOP is not set
+# CONFIG_PANASONIC_LAPTOP is not set
+# CONFIG_COMPAL_LAPTOP is not set
+# CONFIG_SONY_LAPTOP is not set
+# CONFIG_THINKPAD_ACPI is not set
+# CONFIG_INTEL_MENLOW is not set
+CONFIG_EEEPC_LAPTOP=y
+# CONFIG_ACPI_WMI is not set
+# CONFIG_ACPI_ASUS is not set
+# CONFIG_ACPI_TOSHIBA is not set
 
 #
 # Firmware Drivers
@@ -1861,8 +2064,7 @@ CONFIG_EFI_VARS=y
 # CONFIG_DELL_RBU is not set
 # CONFIG_DCDBAS is not set
 CONFIG_DMIID=y
-CONFIG_ISCSI_IBFT_FIND=y
-CONFIG_ISCSI_IBFT=y
+# CONFIG_ISCSI_IBFT_FIND is not set
 
 #
 # File systems
@@ -1872,21 +2074,24 @@ CONFIG_EXT3_FS=y
 CONFIG_EXT3_FS_XATTR=y
 CONFIG_EXT3_FS_POSIX_ACL=y
 CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 CONFIG_JBD=y
 # CONFIG_JBD_DEBUG is not set
 CONFIG_FS_MBCACHE=y
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
 # CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
 CONFIG_DNOTIFY=y
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QUOTA_TREE=y
 # CONFIG_QFMT_V1 is not set
 CONFIG_QFMT_V2=y
 CONFIG_QUOTACTL=y
@@ -1920,16 +2125,14 @@ CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_VMCORE=y
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_HUGETLBFS=y
 CONFIG_HUGETLB_PAGE=y
 # CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
 # CONFIG_ECRYPT_FS is not set
@@ -1939,6 +2142,7 @@ CONFIG_HUGETLB_PAGE=y
 # CONFIG_BFS_FS is not set
 # CONFIG_EFS_FS is not set
 # CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
 # CONFIG_VXFS_FS is not set
 # CONFIG_MINIX_FS is not set
 # CONFIG_OMFS_FS is not set
@@ -1960,6 +2164,7 @@ CONFIG_NFS_ACL_SUPPORT=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=y
 CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 CONFIG_RPCSEC_GSS_KRB5=y
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
@@ -2036,7 +2241,7 @@ CONFIG_NLS_UTF8=y
 #
 CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 CONFIG_PRINTK_TIME=y
-CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
 CONFIG_ENABLE_MUST_CHECK=y
 CONFIG_FRAME_WARN=2048
 CONFIG_MAGIC_SYSRQ=y
@@ -2066,33 +2271,54 @@ CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
 # CONFIG_DEBUG_WRITECOUNT is not set
 CONFIG_DEBUG_MEMORY_INIT=y
 # CONFIG_DEBUG_LIST is not set
 # CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
 CONFIG_FRAME_POINTER=y
 # CONFIG_BOOT_PRINTK_DELAY is not set
 # CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_KPROBES_SANITY_TEST is not set
 # CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_LKDTM is not set
 # CONFIG_FAULT_INJECTION is not set
 # CONFIG_LATENCYTOP is not set
 CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_HAVE_FTRACE=y
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
 CONFIG_HAVE_DYNAMIC_FTRACE=y
-# CONFIG_FTRACE is not set
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_HW_BRANCH_TRACER=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
 # CONFIG_IRQSOFF_TRACER is not set
 # CONFIG_SYSPROF_TRACER is not set
 # CONFIG_SCHED_TRACER is not set
 # CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_POWER_TRACER is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_HW_BRANCH_TRACER is not set
 CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
 CONFIG_HAVE_ARCH_KGDB=y
 # CONFIG_KGDB is not set
 # CONFIG_STRICT_DEVMEM is not set
 CONFIG_X86_VERBOSE_BOOTUP=y
 CONFIG_EARLY_PRINTK=y
+CONFIG_EARLY_PRINTK_DBGP=y
 CONFIG_DEBUG_STACKOVERFLOW=y
 CONFIG_DEBUG_STACK_USAGE=y
 # CONFIG_DEBUG_PAGEALLOC is not set
@@ -2123,8 +2349,10 @@ CONFIG_OPTIMIZE_INLINING=y
 CONFIG_KEYS=y
 CONFIG_KEYS_DEBUG_PROC_KEYS=y
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 CONFIG_SECURITY_NETWORK=y
 # CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
 CONFIG_SECURITY_FILE_CAPABILITIES=y
 # CONFIG_SECURITY_ROOTPLUG is not set
 CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536
@@ -2135,7 +2363,6 @@ CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_SECURITY_SELINUX_DEVELOP=y
 CONFIG_SECURITY_SELINUX_AVC_STATS=y
 CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set
 # CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
 # CONFIG_SECURITY_SMACK is not set
 CONFIG_CRYPTO=y
@@ -2143,11 +2370,18 @@ CONFIG_CRYPTO=y
 #
 # Crypto core or helper
 #
+# CONFIG_CRYPTO_FIPS is not set
 CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
 CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
 CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
 CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
 # CONFIG_CRYPTO_GF128MUL is not set
 # CONFIG_CRYPTO_NULL is not set
 # CONFIG_CRYPTO_CRYPTD is not set
@@ -2182,6 +2416,7 @@ CONFIG_CRYPTO_HMAC=y
 # Digest
 #
 # CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
 # CONFIG_CRYPTO_MD4 is not set
 CONFIG_CRYPTO_MD5=y
 # CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -2222,6 +2457,11 @@ CONFIG_CRYPTO_DES=y
 #
 # CONFIG_CRYPTO_DEFLATE is not set
 # CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRYPTO_HW=y
 # CONFIG_CRYPTO_DEV_PADLOCK is not set
 # CONFIG_CRYPTO_DEV_GEODE is not set
@@ -2239,6 +2479,7 @@ CONFIG_VIRTUALIZATION=y
 CONFIG_BITREVERSE=y
 CONFIG_GENERIC_FIND_FIRST_BIT=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
 # CONFIG_CRC_CCITT is not set
 # CONFIG_CRC16 is not set
 CONFIG_CRC_T10DIF=y
index 322dd2748fc905770ccaeed92d5298d7612cc264..9fe5d212ab4cc8291e0c55ebdc619a57c85f2d64 100644 (file)
@@ -1,14 +1,13 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.27-rc5
-# Wed Sep  3 17:13:39 2008
+# Linux kernel version: 2.6.29-rc4
+# Tue Feb 24 15:44:16 2009
 #
 CONFIG_64BIT=y
 # CONFIG_X86_32 is not set
 CONFIG_X86_64=y
 CONFIG_X86=y
 CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig"
-# CONFIG_GENERIC_LOCKBREAK is not set
 CONFIG_GENERIC_TIME=y
 CONFIG_GENERIC_CMOS_UPDATE=y
 CONFIG_CLOCKSOURCE_WATCHDOG=y
@@ -23,17 +22,16 @@ CONFIG_ZONE_DMA=y
 CONFIG_GENERIC_ISA_DMA=y
 CONFIG_GENERIC_IOMAP=y
 CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
 CONFIG_GENERIC_HWEIGHT=y
-# CONFIG_GENERIC_GPIO is not set
 CONFIG_ARCH_MAY_HAVE_PC_FDC=y
 CONFIG_RWSEM_GENERIC_SPINLOCK=y
 # CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
-# CONFIG_ARCH_HAS_ILOG2_U32 is not set
-# CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_ARCH_HAS_CPU_IDLE_WAIT=y
 CONFIG_GENERIC_CALIBRATE_DELAY=y
 CONFIG_GENERIC_TIME_VSYSCALL=y
 CONFIG_ARCH_HAS_CPU_RELAX=y
+CONFIG_ARCH_HAS_DEFAULT_IDLE=y
 CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
 CONFIG_HAVE_SETUP_PER_CPU_AREA=y
 CONFIG_HAVE_CPUMASK_OF_CPU_MAP=y
@@ -42,12 +40,12 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y
 CONFIG_ZONE_DMA32=y
 CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_AUDIT_ARCH=y
-CONFIG_ARCH_SUPPORTS_AOUT=y
 CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y
 CONFIG_GENERIC_HARDIRQS=y
 CONFIG_GENERIC_IRQ_PROBE=y
 CONFIG_GENERIC_PENDING_IRQ=y
 CONFIG_X86_SMP=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
 CONFIG_X86_64_SMP=y
 CONFIG_X86_HT=y
 CONFIG_X86_BIOS_REBOOT=y
@@ -76,30 +74,44 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_AUDIT=y
 CONFIG_AUDITSYSCALL=y
 CONFIG_AUDIT_TREE=y
+
+#
+# RCU Subsystem
+#
+# CONFIG_CLASSIC_RCU is not set
+CONFIG_TREE_RCU=y
+# CONFIG_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=64
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU_TRACE is not set
 # CONFIG_IKCONFIG is not set
 CONFIG_LOG_BUF_SHIFT=18
-CONFIG_CGROUPS=y
-# CONFIG_CGROUP_DEBUG is not set
-CONFIG_CGROUP_NS=y
-# CONFIG_CGROUP_DEVICE is not set
-CONFIG_CPUSETS=y
 CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y
 CONFIG_GROUP_SCHED=y
 CONFIG_FAIR_GROUP_SCHED=y
 # CONFIG_RT_GROUP_SCHED is not set
 # CONFIG_USER_SCHED is not set
 CONFIG_CGROUP_SCHED=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+# CONFIG_CGROUP_DEVICE is not set
+CONFIG_CPUSETS=y
+CONFIG_PROC_PID_CPUSET=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_RESOURCE_COUNTERS=y
 # CONFIG_CGROUP_MEM_RES_CTLR is not set
 # CONFIG_SYSFS_DEPRECATED_V2 is not set
-CONFIG_PROC_PID_CPUSET=y
 CONFIG_RELAY=y
 CONFIG_NAMESPACES=y
 CONFIG_UTS_NS=y
 CONFIG_IPC_NS=y
 CONFIG_USER_NS=y
 CONFIG_PID_NS=y
+CONFIG_NET_NS=y
 CONFIG_BLK_DEV_INITRD=y
 CONFIG_INITRAMFS_SOURCE=""
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -124,12 +136,15 @@ CONFIG_SIGNALFD=y
 CONFIG_TIMERFD=y
 CONFIG_EVENTFD=y
 CONFIG_SHMEM=y
+CONFIG_AIO=y
 CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_PCI_QUIRKS=y
 CONFIG_SLUB_DEBUG=y
 # CONFIG_SLAB is not set
 CONFIG_SLUB=y
 # CONFIG_SLOB is not set
 CONFIG_PROFILING=y
+CONFIG_TRACEPOINTS=y
 CONFIG_MARKERS=y
 # CONFIG_OPROFILE is not set
 CONFIG_HAVE_OPROFILE=y
@@ -139,15 +154,10 @@ CONFIG_KRETPROBES=y
 CONFIG_HAVE_IOREMAP_PROT=y
 CONFIG_HAVE_KPROBES=y
 CONFIG_HAVE_KRETPROBES=y
-# CONFIG_HAVE_ARCH_TRACEHOOK is not set
-# CONFIG_HAVE_DMA_ATTRS is not set
-CONFIG_USE_GENERIC_SMP_HELPERS=y
-# CONFIG_HAVE_CLK is not set
-CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
 # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
 CONFIG_BASE_SMALL=0
 CONFIG_MODULES=y
 # CONFIG_MODULE_FORCE_LOAD is not set
@@ -155,7 +165,6 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 # CONFIG_MODVERSIONS is not set
 # CONFIG_MODULE_SRCVERSION_ALL is not set
-CONFIG_KMOD=y
 CONFIG_STOP_MACHINE=y
 CONFIG_BLOCK=y
 CONFIG_BLK_DEV_IO_TRACE=y
@@ -175,7 +184,7 @@ CONFIG_IOSCHED_CFQ=y
 CONFIG_DEFAULT_CFQ=y
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="cfq"
-CONFIG_CLASSIC_RCU=y
+CONFIG_FREEZER=y
 
 #
 # Processor type and features
@@ -185,13 +194,14 @@ CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
 CONFIG_SMP=y
+CONFIG_SPARSE_IRQ=y
+# CONFIG_NUMA_MIGRATE_IRQ_DESC is not set
 CONFIG_X86_FIND_SMP_CONFIG=y
 CONFIG_X86_MPPARSE=y
-CONFIG_X86_PC=y
 # CONFIG_X86_ELAN is not set
-# CONFIG_X86_VOYAGER is not set
 # CONFIG_X86_GENERICARCH is not set
 # CONFIG_X86_VSMP is not set
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
 # CONFIG_PARAVIRT_GUEST is not set
 # CONFIG_MEMTEST is not set
 # CONFIG_M386 is not set
@@ -230,6 +240,11 @@ CONFIG_X86_CMPXCHG64=y
 CONFIG_X86_CMOV=y
 CONFIG_X86_MINIMUM_CPU_FAMILY=64
 CONFIG_X86_DEBUGCTLMSR=y
+CONFIG_CPU_SUP_INTEL=y
+CONFIG_CPU_SUP_AMD=y
+CONFIG_CPU_SUP_CENTAUR_64=y
+CONFIG_X86_DS=y
+CONFIG_X86_PTRACE_BTS=y
 CONFIG_HPET_TIMER=y
 CONFIG_HPET_EMULATE_RTC=y
 CONFIG_DMI=y
@@ -237,8 +252,11 @@ CONFIG_GART_IOMMU=y
 CONFIG_CALGARY_IOMMU=y
 CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y
 CONFIG_AMD_IOMMU=y
+CONFIG_AMD_IOMMU_STATS=y
 CONFIG_SWIOTLB=y
 CONFIG_IOMMU_HELPER=y
+CONFIG_IOMMU_API=y
+# CONFIG_MAXSMP is not set
 CONFIG_NR_CPUS=64
 CONFIG_SCHED_SMT=y
 CONFIG_SCHED_MC=y
@@ -247,12 +265,19 @@ CONFIG_PREEMPT_VOLUNTARY=y
 # CONFIG_PREEMPT is not set
 CONFIG_X86_LOCAL_APIC=y
 CONFIG_X86_IO_APIC=y
-# CONFIG_X86_MCE is not set
+CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
+CONFIG_X86_MCE=y
+CONFIG_X86_MCE_INTEL=y
+CONFIG_X86_MCE_AMD=y
 # CONFIG_I8K is not set
 CONFIG_MICROCODE=y
+CONFIG_MICROCODE_INTEL=y
+CONFIG_MICROCODE_AMD=y
 CONFIG_MICROCODE_OLD_INTERFACE=y
 CONFIG_X86_MSR=y
 CONFIG_X86_CPUID=y
+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y
+CONFIG_DIRECT_GBPAGES=y
 CONFIG_NUMA=y
 CONFIG_K8_NUMA=y
 CONFIG_X86_64_ACPI_NUMA=y
@@ -269,7 +294,6 @@ CONFIG_SPARSEMEM_MANUAL=y
 CONFIG_SPARSEMEM=y
 CONFIG_NEED_MULTIPLE_NODES=y
 CONFIG_HAVE_MEMORY_PRESENT=y
-# CONFIG_SPARSEMEM_STATIC is not set
 CONFIG_SPARSEMEM_EXTREME=y
 CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
 CONFIG_SPARSEMEM_VMEMMAP=y
@@ -280,10 +304,14 @@ CONFIG_SPARSEMEM_VMEMMAP=y
 CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 CONFIG_MIGRATION=y
-CONFIG_RESOURCES_64BIT=y
+CONFIG_PHYS_ADDR_T_64BIT=y
 CONFIG_ZONE_DMA_FLAG=1
 CONFIG_BOUNCE=y
 CONFIG_VIRT_TO_BUS=y
+CONFIG_UNEVICTABLE_LRU=y
+CONFIG_X86_CHECK_BIOS_CORRUPTION=y
+CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y
+CONFIG_X86_RESERVE_LOW_64K=y
 CONFIG_MTRR=y
 # CONFIG_MTRR_SANITIZER is not set
 CONFIG_X86_PAT=y
@@ -302,11 +330,12 @@ CONFIG_PHYSICAL_START=0x1000000
 CONFIG_PHYSICAL_ALIGN=0x200000
 CONFIG_HOTPLUG_CPU=y
 # CONFIG_COMPAT_VDSO is not set
+# CONFIG_CMDLINE_BOOL is not set
 CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
 CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
 
 #
-# Power management options
+# Power management and ACPI options
 #
 CONFIG_ARCH_HIBERNATION_HEADER=y
 CONFIG_PM=y
@@ -333,20 +362,14 @@ CONFIG_ACPI_BATTERY=y
 CONFIG_ACPI_BUTTON=y
 CONFIG_ACPI_FAN=y
 CONFIG_ACPI_DOCK=y
-# CONFIG_ACPI_BAY is not set
 CONFIG_ACPI_PROCESSOR=y
 CONFIG_ACPI_HOTPLUG_CPU=y
 CONFIG_ACPI_THERMAL=y
 CONFIG_ACPI_NUMA=y
-# CONFIG_ACPI_WMI is not set
-# CONFIG_ACPI_ASUS is not set
-# CONFIG_ACPI_TOSHIBA is not set
 # CONFIG_ACPI_CUSTOM_DSDT is not set
 CONFIG_ACPI_BLACKLIST_YEAR=0
 # CONFIG_ACPI_DEBUG is not set
-CONFIG_ACPI_EC=y
 # CONFIG_ACPI_PCI_SLOT is not set
-CONFIG_ACPI_POWER=y
 CONFIG_ACPI_SYSTEM=y
 CONFIG_X86_PM_TIMER=y
 CONFIG_ACPI_CONTAINER=y
@@ -381,12 +404,16 @@ CONFIG_X86_ACPI_CPUFREQ=y
 #
 # shared options
 #
-# CONFIG_X86_ACPI_CPUFREQ_PROC_INTF is not set
 # CONFIG_X86_SPEEDSTEP_LIB is not set
 CONFIG_CPU_IDLE=y
 CONFIG_CPU_IDLE_GOV_LADDER=y
 CONFIG_CPU_IDLE_GOV_MENU=y
 
+#
+# Memory power savings
+#
+# CONFIG_I7300_IDLE is not set
+
 #
 # Bus options (PCI etc.)
 #
@@ -395,8 +422,10 @@ CONFIG_PCI_DIRECT=y
 CONFIG_PCI_MMCONFIG=y
 CONFIG_PCI_DOMAINS=y
 CONFIG_DMAR=y
+# CONFIG_DMAR_DEFAULT_ON is not set
 CONFIG_DMAR_GFX_WA=y
 CONFIG_DMAR_FLOPPY_WA=y
+# CONFIG_INTR_REMAP is not set
 CONFIG_PCIEPORTBUS=y
 # CONFIG_HOTPLUG_PCI_PCIE is not set
 CONFIG_PCIEAER=y
@@ -405,6 +434,7 @@ CONFIG_ARCH_SUPPORTS_MSI=y
 CONFIG_PCI_MSI=y
 # CONFIG_PCI_LEGACY is not set
 # CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_STUB is not set
 CONFIG_HT_IRQ=y
 CONFIG_ISA_DMA_API=y
 CONFIG_K8_NB=y
@@ -438,6 +468,8 @@ CONFIG_HOTPLUG_PCI=y
 #
 CONFIG_BINFMT_ELF=y
 CONFIG_COMPAT_BINFMT_ELF=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+# CONFIG_HAVE_AOUT is not set
 CONFIG_BINFMT_MISC=y
 CONFIG_IA32_EMULATION=y
 # CONFIG_IA32_AOUT is not set
@@ -449,6 +481,7 @@ CONFIG_NET=y
 #
 # Networking options
 #
+CONFIG_COMPAT_NET_DEV_OPS=y
 CONFIG_PACKET=y
 CONFIG_PACKET_MMAP=y
 CONFIG_UNIX=y
@@ -509,7 +542,6 @@ CONFIG_DEFAULT_CUBIC=y
 # CONFIG_DEFAULT_RENO is not set
 CONFIG_DEFAULT_TCP_CONG="cubic"
 CONFIG_TCP_MD5SIG=y
-# CONFIG_IP_VS is not set
 CONFIG_IPV6=y
 # CONFIG_IPV6_PRIVACY is not set
 # CONFIG_IPV6_ROUTER_PREF is not set
@@ -547,19 +579,21 @@ CONFIG_NF_CONNTRACK_IRC=y
 CONFIG_NF_CONNTRACK_SIP=y
 CONFIG_NF_CT_NETLINK=y
 CONFIG_NETFILTER_XTABLES=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_MARK=y
 CONFIG_NETFILTER_XT_TARGET_NFLOG=y
 CONFIG_NETFILTER_XT_TARGET_SECMARK=y
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
 CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
 CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
 CONFIG_NETFILTER_XT_MATCH_MARK=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_IP_VS is not set
 
 #
 # IP: Netfilter Configuration
 #
+CONFIG_NF_DEFRAG_IPV4=y
 CONFIG_NF_CONNTRACK_IPV4=y
 CONFIG_NF_CONNTRACK_PROC_COMPAT=y
 CONFIG_IP_NF_IPTABLES=y
@@ -585,8 +619,8 @@ CONFIG_IP_NF_MANGLE=y
 CONFIG_NF_CONNTRACK_IPV6=y
 CONFIG_IP6_NF_IPTABLES=y
 CONFIG_IP6_NF_MATCH_IPV6HEADER=y
-CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_LOG=y
+CONFIG_IP6_NF_FILTER=y
 CONFIG_IP6_NF_TARGET_REJECT=y
 CONFIG_IP6_NF_MANGLE=y
 # CONFIG_IP_DCCP is not set
@@ -594,6 +628,7 @@ CONFIG_IP6_NF_MANGLE=y
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
 # CONFIG_VLAN_8021Q is not set
 # CONFIG_DECNET is not set
 CONFIG_LLC=y
@@ -613,6 +648,7 @@ CONFIG_NET_SCHED=y
 # CONFIG_NET_SCH_HTB is not set
 # CONFIG_NET_SCH_HFSC is not set
 # CONFIG_NET_SCH_PRIO is not set
+# CONFIG_NET_SCH_MULTIQ is not set
 # CONFIG_NET_SCH_RED is not set
 # CONFIG_NET_SCH_SFQ is not set
 # CONFIG_NET_SCH_TEQL is not set
@@ -620,6 +656,7 @@ CONFIG_NET_SCHED=y
 # CONFIG_NET_SCH_GRED is not set
 # CONFIG_NET_SCH_DSMARK is not set
 # CONFIG_NET_SCH_NETEM is not set
+# CONFIG_NET_SCH_DRR is not set
 # CONFIG_NET_SCH_INGRESS is not set
 
 #
@@ -634,6 +671,7 @@ CONFIG_NET_CLS=y
 # CONFIG_NET_CLS_RSVP is not set
 # CONFIG_NET_CLS_RSVP6 is not set
 # CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
 CONFIG_NET_EMATCH=y
 CONFIG_NET_EMATCH_STACK=32
 # CONFIG_NET_EMATCH_CMP is not set
@@ -649,7 +687,9 @@ CONFIG_NET_CLS_ACT=y
 # CONFIG_NET_ACT_NAT is not set
 # CONFIG_NET_ACT_PEDIT is not set
 # CONFIG_NET_ACT_SIMP is not set
+# CONFIG_NET_ACT_SKBEDIT is not set
 CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
 
 #
 # Network testing
@@ -666,29 +706,33 @@ CONFIG_HAMRADIO=y
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
 # CONFIG_AF_RXRPC is not set
+# CONFIG_PHONET is not set
 CONFIG_FIB_RULES=y
-
-#
-# Wireless
-#
+CONFIG_WIRELESS=y
 CONFIG_CFG80211=y
+# CONFIG_CFG80211_REG_DEBUG is not set
 CONFIG_NL80211=y
+CONFIG_WIRELESS_OLD_REGULATORY=y
 CONFIG_WIRELESS_EXT=y
 CONFIG_WIRELESS_EXT_SYSFS=y
+# CONFIG_LIB80211 is not set
 CONFIG_MAC80211=y
 
 #
 # Rate control algorithm selection
 #
-CONFIG_MAC80211_RC_PID=y
-CONFIG_MAC80211_RC_DEFAULT_PID=y
-CONFIG_MAC80211_RC_DEFAULT="pid"
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
 # CONFIG_MAC80211_MESH is not set
 CONFIG_MAC80211_LEDS=y
 # CONFIG_MAC80211_DEBUGFS is not set
 # CONFIG_MAC80211_DEBUG_MENU is not set
-# CONFIG_IEEE80211 is not set
-# CONFIG_RFKILL is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+# CONFIG_RFKILL_INPUT is not set
+CONFIG_RFKILL_LEDS=y
 # CONFIG_NET_9P is not set
 
 #
@@ -712,7 +756,7 @@ CONFIG_PROC_EVENTS=y
 # CONFIG_MTD is not set
 # CONFIG_PARPORT is not set
 CONFIG_PNP=y
-# CONFIG_PNP_DEBUG is not set
+CONFIG_PNP_DEBUG_MESSAGES=y
 
 #
 # Protocols
@@ -740,21 +784,21 @@ CONFIG_BLK_DEV_RAM_SIZE=16384
 CONFIG_MISC_DEVICES=y
 # CONFIG_IBM_ASM is not set
 # CONFIG_PHANTOM is not set
-# CONFIG_EEPROM_93CX6 is not set
 # CONFIG_SGI_IOC4 is not set
 # CONFIG_TIFM_CORE is not set
-# CONFIG_ACER_WMI is not set
-# CONFIG_ASUS_LAPTOP is not set
-# CONFIG_FUJITSU_LAPTOP is not set
-# CONFIG_MSI_LAPTOP is not set
-# CONFIG_COMPAL_LAPTOP is not set
-# CONFIG_SONY_LAPTOP is not set
-# CONFIG_THINKPAD_ACPI is not set
-# CONFIG_INTEL_MENLOW is not set
+# CONFIG_ICS932S401 is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
 # CONFIG_SGI_XP is not set
 # CONFIG_HP_ILO is not set
 # CONFIG_SGI_GRU is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_93CX6 is not set
 CONFIG_HAVE_IDE=y
 # CONFIG_IDE is not set
 
@@ -793,7 +837,7 @@ CONFIG_SCSI_WAIT_SCAN=m
 #
 CONFIG_SCSI_SPI_ATTRS=y
 # CONFIG_SCSI_FC_ATTRS is not set
-CONFIG_SCSI_ISCSI_ATTRS=y
+# CONFIG_SCSI_ISCSI_ATTRS is not set
 # CONFIG_SCSI_SAS_ATTRS is not set
 # CONFIG_SCSI_SAS_LIBSAS is not set
 # CONFIG_SCSI_SRP_ATTRS is not set
@@ -864,6 +908,7 @@ CONFIG_PATA_OLDPIIX=y
 CONFIG_PATA_SCH=y
 CONFIG_MD=y
 CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
 # CONFIG_MD_LINEAR is not set
 # CONFIG_MD_RAID0 is not set
 # CONFIG_MD_RAID1 is not set
@@ -919,6 +964,9 @@ CONFIG_PHYLIB=y
 # CONFIG_BROADCOM_PHY is not set
 # CONFIG_ICPLUS_PHY is not set
 # CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
 # CONFIG_FIXED_PHY is not set
 # CONFIG_MDIO_BITBANG is not set
 CONFIG_NET_ETHERNET=y
@@ -942,6 +990,9 @@ CONFIG_NET_TULIP=y
 # CONFIG_IBM_NEW_EMAC_RGMII is not set
 # CONFIG_IBM_NEW_EMAC_TAH is not set
 # CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
 CONFIG_NET_PCI=y
 # CONFIG_PCNET32 is not set
 # CONFIG_AMD8111_ETH is not set
@@ -949,7 +1000,6 @@ CONFIG_NET_PCI=y
 # CONFIG_B44 is not set
 CONFIG_FORCEDETH=y
 # CONFIG_FORCEDETH_NAPI is not set
-# CONFIG_EEPRO100 is not set
 CONFIG_E100=y
 # CONFIG_FEALNX is not set
 # CONFIG_NATSEMI is not set
@@ -963,15 +1013,16 @@ CONFIG_8139TOO_PIO=y
 # CONFIG_R6040 is not set
 # CONFIG_SIS900 is not set
 # CONFIG_EPIC100 is not set
+# CONFIG_SMSC9420 is not set
 # CONFIG_SUNDANCE is not set
 # CONFIG_TLAN is not set
 # CONFIG_VIA_RHINE is not set
 # CONFIG_SC92031 is not set
+# CONFIG_ATL2 is not set
 CONFIG_NETDEV_1000=y
 # CONFIG_ACENIC is not set
 # CONFIG_DL2K is not set
 CONFIG_E1000=y
-# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set
 # CONFIG_E1000E is not set
 # CONFIG_IP1000 is not set
 # CONFIG_IGB is not set
@@ -989,18 +1040,23 @@ CONFIG_TIGON3=y
 # CONFIG_QLA3XXX is not set
 # CONFIG_ATL1 is not set
 # CONFIG_ATL1E is not set
+# CONFIG_JME is not set
 CONFIG_NETDEV_10000=y
 # CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3_DEPENDS=y
 # CONFIG_CHELSIO_T3 is not set
+# CONFIG_ENIC is not set
 # CONFIG_IXGBE is not set
 # CONFIG_IXGB is not set
 # CONFIG_S2IO is not set
 # CONFIG_MYRI10GE is not set
 # CONFIG_NETXEN_NIC is not set
 # CONFIG_NIU is not set
+# CONFIG_MLX4_EN is not set
 # CONFIG_MLX4_CORE is not set
 # CONFIG_TEHUTI is not set
 # CONFIG_BNX2X is not set
+# CONFIG_QLGE is not set
 # CONFIG_SFC is not set
 CONFIG_TR=y
 # CONFIG_IBMOL is not set
@@ -1013,9 +1069,8 @@ CONFIG_TR=y
 # CONFIG_WLAN_PRE80211 is not set
 CONFIG_WLAN_80211=y
 # CONFIG_PCMCIA_RAYCS is not set
-# CONFIG_IPW2100 is not set
-# CONFIG_IPW2200 is not set
 # CONFIG_LIBERTAS is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
 # CONFIG_AIRO is not set
 # CONFIG_HERMES is not set
 # CONFIG_ATMEL is not set
@@ -1032,6 +1087,8 @@ CONFIG_WLAN_80211=y
 CONFIG_ATH5K=y
 # CONFIG_ATH5K_DEBUG is not set
 # CONFIG_ATH9K is not set
+# CONFIG_IPW2100 is not set
+# CONFIG_IPW2200 is not set
 # CONFIG_IWLCORE is not set
 # CONFIG_IWLWIFI_LEDS is not set
 # CONFIG_IWLAGN is not set
@@ -1042,6 +1099,10 @@ CONFIG_ATH5K=y
 # CONFIG_ZD1211RW is not set
 # CONFIG_RT2X00 is not set
 
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
 #
 # USB Network Adapters
 #
@@ -1050,6 +1111,7 @@ CONFIG_ATH5K=y
 # CONFIG_USB_PEGASUS is not set
 # CONFIG_USB_RTL8150 is not set
 # CONFIG_USB_USBNET is not set
+# CONFIG_USB_HSO is not set
 CONFIG_NET_PCMCIA=y
 # CONFIG_PCMCIA_3C589 is not set
 # CONFIG_PCMCIA_3C574 is not set
@@ -1059,6 +1121,7 @@ CONFIG_NET_PCMCIA=y
 # CONFIG_PCMCIA_SMC91C92 is not set
 # CONFIG_PCMCIA_XIRC2PS is not set
 # CONFIG_PCMCIA_AXNET is not set
+# CONFIG_PCMCIA_IBMTR is not set
 # CONFIG_WAN is not set
 CONFIG_FDDI=y
 # CONFIG_DEFXX is not set
@@ -1110,6 +1173,7 @@ CONFIG_MOUSE_PS2_LOGIPS2PP=y
 CONFIG_MOUSE_PS2_SYNAPTICS=y
 CONFIG_MOUSE_PS2_LIFEBOOK=y
 CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
 # CONFIG_MOUSE_PS2_TOUCHKIT is not set
 # CONFIG_MOUSE_SERIAL is not set
 # CONFIG_MOUSE_APPLETOUCH is not set
@@ -1147,15 +1211,16 @@ CONFIG_INPUT_TOUCHSCREEN=y
 # CONFIG_TOUCHSCREEN_FUJITSU is not set
 # CONFIG_TOUCHSCREEN_GUNZE is not set
 # CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
 # CONFIG_TOUCHSCREEN_MTOUCH is not set
 # CONFIG_TOUCHSCREEN_INEXIO is not set
 # CONFIG_TOUCHSCREEN_MK712 is not set
 # CONFIG_TOUCHSCREEN_PENMOUNT is not set
 # CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
 # CONFIG_TOUCHSCREEN_TOUCHWIN is not set
-# CONFIG_TOUCHSCREEN_UCB1400 is not set
 # CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
 # CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
 CONFIG_INPUT_MISC=y
 # CONFIG_INPUT_PCSPKR is not set
 # CONFIG_INPUT_APANEL is not set
@@ -1165,6 +1230,7 @@ CONFIG_INPUT_MISC=y
 # CONFIG_INPUT_KEYSPAN_REMOTE is not set
 # CONFIG_INPUT_POWERMATE is not set
 # CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
 # CONFIG_INPUT_UINPUT is not set
 
 #
@@ -1231,6 +1297,7 @@ CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
 # CONFIG_SERIAL_JSM is not set
 CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_IPMI_HANDLER is not set
 CONFIG_HW_RANDOM=y
@@ -1260,6 +1327,7 @@ CONFIG_I2C=y
 CONFIG_I2C_BOARDINFO=y
 # CONFIG_I2C_CHARDEV is not set
 CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
 
 #
 # I2C Hardware Bus support
@@ -1311,8 +1379,6 @@ CONFIG_I2C_I801=y
 # Miscellaneous I2C Chip support
 #
 # CONFIG_DS1682 is not set
-# CONFIG_EEPROM_AT24 is not set
-# CONFIG_EEPROM_LEGACY is not set
 # CONFIG_SENSORS_PCF8574 is not set
 # CONFIG_PCF8575 is not set
 # CONFIG_SENSORS_PCA9539 is not set
@@ -1331,8 +1397,78 @@ CONFIG_POWER_SUPPLY=y
 # CONFIG_POWER_SUPPLY_DEBUG is not set
 # CONFIG_PDA_POWER is not set
 # CONFIG_BATTERY_DS2760 is not set
-# CONFIG_HWMON is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ABITUGURU is not set
+# CONFIG_SENSORS_ABITUGURU3 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_K8TEMP is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_FSCHMD is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_CORETEMP is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_HDAPS is not set
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_SENSORS_APPLESMC is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
 CONFIG_THERMAL=y
+# CONFIG_THERMAL_HWMON is not set
 CONFIG_WATCHDOG=y
 # CONFIG_WATCHDOG_NOWAYOUT is not set
 
@@ -1352,15 +1488,18 @@ CONFIG_WATCHDOG=y
 # CONFIG_I6300ESB_WDT is not set
 # CONFIG_ITCO_WDT is not set
 # CONFIG_IT8712F_WDT is not set
+# CONFIG_IT87_WDT is not set
 # CONFIG_HP_WATCHDOG is not set
 # CONFIG_SC1200_WDT is not set
 # CONFIG_PC87413_WDT is not set
 # CONFIG_60XX_WDT is not set
 # CONFIG_SBC8360_WDT is not set
 # CONFIG_CPU5_WDT is not set
+# CONFIG_SMSC_SCH311X_WDT is not set
 # CONFIG_SMSC37B787_WDT is not set
 # CONFIG_W83627HF_WDT is not set
 # CONFIG_W83697HF_WDT is not set
+# CONFIG_W83697UG_WDT is not set
 # CONFIG_W83877F_WDT is not set
 # CONFIG_W83977F_WDT is not set
 # CONFIG_MACHZ_WDT is not set
@@ -1376,11 +1515,11 @@ CONFIG_WATCHDOG=y
 # USB-based Watchdog Cards
 #
 # CONFIG_USBPCWATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
 
 #
 # Sonics Silicon Backplane
 #
-CONFIG_SSB_POSSIBLE=y
 # CONFIG_SSB is not set
 
 #
@@ -1389,7 +1528,13 @@ CONFIG_SSB_POSSIBLE=y
 # CONFIG_MFD_CORE is not set
 # CONFIG_MFD_SM501 is not set
 # CONFIG_HTC_PASIC3 is not set
+# CONFIG_TWL4030_CORE is not set
 # CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_REGULATOR is not set
 
 #
 # Multimedia devices
@@ -1423,6 +1568,7 @@ CONFIG_DRM=y
 # CONFIG_DRM_I810 is not set
 # CONFIG_DRM_I830 is not set
 CONFIG_DRM_I915=y
+CONFIG_DRM_I915_KMS=y
 # CONFIG_DRM_MGA is not set
 # CONFIG_DRM_SIS is not set
 # CONFIG_DRM_VIA is not set
@@ -1432,6 +1578,7 @@ CONFIG_DRM_I915=y
 CONFIG_FB=y
 # CONFIG_FIRMWARE_EDID is not set
 # CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
 CONFIG_FB_CFB_FILLRECT=y
 CONFIG_FB_CFB_COPYAREA=y
 CONFIG_FB_CFB_IMAGEBLIT=y
@@ -1460,7 +1607,6 @@ CONFIG_FB_TILEBLITTING=y
 # CONFIG_FB_UVESA is not set
 # CONFIG_FB_VESA is not set
 CONFIG_FB_EFI=y
-# CONFIG_FB_IMAC is not set
 # CONFIG_FB_N411 is not set
 # CONFIG_FB_HGA is not set
 # CONFIG_FB_S1D13XXX is not set
@@ -1475,6 +1621,7 @@ CONFIG_FB_EFI=y
 # CONFIG_FB_S3 is not set
 # CONFIG_FB_SAVAGE is not set
 # CONFIG_FB_SIS is not set
+# CONFIG_FB_VIA is not set
 # CONFIG_FB_NEOMAGIC is not set
 # CONFIG_FB_KYRO is not set
 # CONFIG_FB_3DFX is not set
@@ -1486,12 +1633,15 @@ CONFIG_FB_EFI=y
 # CONFIG_FB_CARMINE is not set
 # CONFIG_FB_GEODE is not set
 # CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
 # CONFIG_LCD_CLASS_DEVICE is not set
 CONFIG_BACKLIGHT_CLASS_DEVICE=y
-# CONFIG_BACKLIGHT_CORGI is not set
+CONFIG_BACKLIGHT_GENERIC=y
 # CONFIG_BACKLIGHT_PROGEAR is not set
 # CONFIG_BACKLIGHT_MBP_NVIDIA is not set
+# CONFIG_BACKLIGHT_SAHARA is not set
 
 #
 # Display device support
@@ -1511,10 +1661,12 @@ CONFIG_LOGO=y
 # CONFIG_LOGO_LINUX_VGA16 is not set
 CONFIG_LOGO_LINUX_CLUT224=y
 CONFIG_SOUND=y
+CONFIG_SOUND_OSS_CORE=y
 CONFIG_SND=y
 CONFIG_SND_TIMER=y
 CONFIG_SND_PCM=y
 CONFIG_SND_HWDEP=y
+CONFIG_SND_JACK=y
 CONFIG_SND_SEQUENCER=y
 CONFIG_SND_SEQ_DUMMY=y
 CONFIG_SND_OSSEMUL=y
@@ -1522,6 +1674,8 @@ CONFIG_SND_MIXER_OSS=y
 CONFIG_SND_PCM_OSS=y
 CONFIG_SND_PCM_OSS_PLUGINS=y
 CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_HRTIMER=y
+CONFIG_SND_SEQ_HRTIMER_DEFAULT=y
 CONFIG_SND_DYNAMIC_MINORS=y
 CONFIG_SND_SUPPORT_OLD_API=y
 CONFIG_SND_VERBOSE_PROCFS=y
@@ -1575,11 +1729,16 @@ CONFIG_SND_PCI=y
 # CONFIG_SND_FM801 is not set
 CONFIG_SND_HDA_INTEL=y
 CONFIG_SND_HDA_HWDEP=y
+# CONFIG_SND_HDA_RECONFIG is not set
+# CONFIG_SND_HDA_INPUT_BEEP is not set
 CONFIG_SND_HDA_CODEC_REALTEK=y
 CONFIG_SND_HDA_CODEC_ANALOG=y
 CONFIG_SND_HDA_CODEC_SIGMATEL=y
 CONFIG_SND_HDA_CODEC_VIA=y
 CONFIG_SND_HDA_CODEC_ATIHDMI=y
+CONFIG_SND_HDA_CODEC_NVHDMI=y
+CONFIG_SND_HDA_CODEC_INTELHDMI=y
+CONFIG_SND_HDA_ELD=y
 CONFIG_SND_HDA_CODEC_CONEXANT=y
 CONFIG_SND_HDA_CODEC_CMEDIA=y
 CONFIG_SND_HDA_CODEC_SI3054=y
@@ -1612,6 +1771,7 @@ CONFIG_SND_USB=y
 # CONFIG_SND_USB_AUDIO is not set
 # CONFIG_SND_USB_USX2Y is not set
 # CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_US122L is not set
 CONFIG_SND_PCMCIA=y
 # CONFIG_SND_VXPOCKET is not set
 # CONFIG_SND_PDAUDIOCF is not set
@@ -1626,15 +1786,37 @@ CONFIG_HIDRAW=y
 # USB Input Devices
 #
 CONFIG_USB_HID=y
-CONFIG_USB_HIDINPUT_POWERBOOK=y
-CONFIG_HID_FF=y
 CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+
+#
+# Special HID drivers
+#
+CONFIG_HID_COMPAT=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
 CONFIG_LOGITECH_FF=y
 # CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
 CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_TOPSEED=y
 CONFIG_THRUSTMASTER_FF=y
 CONFIG_ZEROPLUS_FF=y
-CONFIG_USB_HIDDEV=y
 CONFIG_USB_SUPPORT=y
 CONFIG_USB_ARCH_HAS_HCD=y
 CONFIG_USB_ARCH_HAS_OHCI=y
@@ -1652,6 +1834,8 @@ CONFIG_USB_DEVICEFS=y
 CONFIG_USB_SUSPEND=y
 # CONFIG_USB_OTG is not set
 CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
 
 #
 # USB Host Controller Drivers
@@ -1660,6 +1844,7 @@ CONFIG_USB_MON=y
 CONFIG_USB_EHCI_HCD=y
 # CONFIG_USB_EHCI_ROOT_HUB_TT is not set
 # CONFIG_USB_EHCI_TT_NEWSCHED is not set
+# CONFIG_USB_OXU210HP_HCD is not set
 # CONFIG_USB_ISP116X_HCD is not set
 # CONFIG_USB_ISP1760_HCD is not set
 CONFIG_USB_OHCI_HCD=y
@@ -1669,6 +1854,8 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
 CONFIG_USB_UHCI_HCD=y
 # CONFIG_USB_SL811_HCD is not set
 # CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_WHCI_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
 
 #
 # USB Device Class drivers
@@ -1676,20 +1863,20 @@ CONFIG_USB_UHCI_HCD=y
 # CONFIG_USB_ACM is not set
 CONFIG_USB_PRINTER=y
 # CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
 
 #
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may also be needed;
 #
 
 #
-# may also be needed; see USB_STORAGE Help for more information
+# see USB_STORAGE Help for more information
 #
 CONFIG_USB_STORAGE=y
 # CONFIG_USB_STORAGE_DEBUG is not set
 # CONFIG_USB_STORAGE_DATAFAB is not set
 # CONFIG_USB_STORAGE_FREECOM is not set
 # CONFIG_USB_STORAGE_ISD200 is not set
-# CONFIG_USB_STORAGE_DPCM is not set
 # CONFIG_USB_STORAGE_USBAT is not set
 # CONFIG_USB_STORAGE_SDDR09 is not set
 # CONFIG_USB_STORAGE_SDDR55 is not set
@@ -1697,7 +1884,6 @@ CONFIG_USB_STORAGE=y
 # CONFIG_USB_STORAGE_ALAUDA is not set
 # CONFIG_USB_STORAGE_ONETOUCH is not set
 # CONFIG_USB_STORAGE_KARMA is not set
-# CONFIG_USB_STORAGE_SIERRA is not set
 # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
 CONFIG_USB_LIBUSUAL=y
 
@@ -1718,6 +1904,7 @@ CONFIG_USB_LIBUSUAL=y
 # CONFIG_USB_EMI62 is not set
 # CONFIG_USB_EMI26 is not set
 # CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
 # CONFIG_USB_RIO500 is not set
 # CONFIG_USB_LEGOTOWER is not set
 # CONFIG_USB_LCD is not set
@@ -1735,7 +1922,13 @@ CONFIG_USB_LIBUSUAL=y
 # CONFIG_USB_IOWARRIOR is not set
 # CONFIG_USB_TEST is not set
 # CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
 # CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_UWB is not set
 # CONFIG_MMC is not set
 # CONFIG_MEMSTICK is not set
 CONFIG_NEW_LEDS=y
@@ -1744,6 +1937,7 @@ CONFIG_LEDS_CLASS=y
 #
 # LED drivers
 #
+# CONFIG_LEDS_ALIX2 is not set
 # CONFIG_LEDS_PCA9532 is not set
 # CONFIG_LEDS_CLEVO_MAIL is not set
 # CONFIG_LEDS_PCA955X is not set
@@ -1754,6 +1948,7 @@ CONFIG_LEDS_CLASS=y
 CONFIG_LEDS_TRIGGERS=y
 # CONFIG_LEDS_TRIGGER_TIMER is not set
 # CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
 # CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
 # CONFIG_ACCESSIBILITY is not set
 # CONFIG_INFINIBAND is not set
@@ -1793,6 +1988,7 @@ CONFIG_RTC_INTF_DEV=y
 # CONFIG_RTC_DRV_M41T80 is not set
 # CONFIG_RTC_DRV_S35390A is not set
 # CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
 
 #
 # SPI RTC drivers
@@ -1802,12 +1998,15 @@ CONFIG_RTC_INTF_DEV=y
 # Platform RTC drivers
 #
 CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_DS1286 is not set
 # CONFIG_RTC_DRV_DS1511 is not set
 # CONFIG_RTC_DRV_DS1553 is not set
 # CONFIG_RTC_DRV_DS1742 is not set
 # CONFIG_RTC_DRV_STK17TA8 is not set
 # CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
 # CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
 # CONFIG_RTC_DRV_V3020 is not set
 
 #
@@ -1820,6 +2019,21 @@ CONFIG_DMADEVICES=y
 #
 # CONFIG_INTEL_IOATDMA is not set
 # CONFIG_UIO is not set
+# CONFIG_STAGING is not set
+CONFIG_X86_PLATFORM_DEVICES=y
+# CONFIG_ACER_WMI is not set
+# CONFIG_ASUS_LAPTOP is not set
+# CONFIG_FUJITSU_LAPTOP is not set
+# CONFIG_MSI_LAPTOP is not set
+# CONFIG_PANASONIC_LAPTOP is not set
+# CONFIG_COMPAL_LAPTOP is not set
+# CONFIG_SONY_LAPTOP is not set
+# CONFIG_THINKPAD_ACPI is not set
+# CONFIG_INTEL_MENLOW is not set
+CONFIG_EEEPC_LAPTOP=y
+# CONFIG_ACPI_WMI is not set
+# CONFIG_ACPI_ASUS is not set
+# CONFIG_ACPI_TOSHIBA is not set
 
 #
 # Firmware Drivers
@@ -1830,8 +2044,7 @@ CONFIG_EFI_VARS=y
 # CONFIG_DELL_RBU is not set
 # CONFIG_DCDBAS is not set
 CONFIG_DMIID=y
-CONFIG_ISCSI_IBFT_FIND=y
-CONFIG_ISCSI_IBFT=y
+# CONFIG_ISCSI_IBFT_FIND is not set
 
 #
 # File systems
@@ -1841,22 +2054,25 @@ CONFIG_EXT3_FS=y
 CONFIG_EXT3_FS_XATTR=y
 CONFIG_EXT3_FS_POSIX_ACL=y
 CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
+# CONFIG_EXT4_FS is not set
 CONFIG_JBD=y
 # CONFIG_JBD_DEBUG is not set
 CONFIG_FS_MBCACHE=y
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 CONFIG_FS_POSIX_ACL=y
+CONFIG_FILE_LOCKING=y
 # CONFIG_XFS_FS is not set
 # CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
 CONFIG_DNOTIFY=y
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 CONFIG_QUOTA=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 # CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_QUOTA_TREE=y
 # CONFIG_QFMT_V1 is not set
 CONFIG_QFMT_V2=y
 CONFIG_QUOTACTL=y
@@ -1890,16 +2106,14 @@ CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_PROC_VMCORE=y
 CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_HUGETLBFS=y
 CONFIG_HUGETLB_PAGE=y
 # CONFIG_CONFIGFS_FS is not set
-
-#
-# Miscellaneous filesystems
-#
+CONFIG_MISC_FILESYSTEMS=y
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
 # CONFIG_ECRYPT_FS is not set
@@ -1909,6 +2123,7 @@ CONFIG_HUGETLB_PAGE=y
 # CONFIG_BFS_FS is not set
 # CONFIG_EFS_FS is not set
 # CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
 # CONFIG_VXFS_FS is not set
 # CONFIG_MINIX_FS is not set
 # CONFIG_OMFS_FS is not set
@@ -1930,6 +2145,7 @@ CONFIG_NFS_ACL_SUPPORT=y
 CONFIG_NFS_COMMON=y
 CONFIG_SUNRPC=y
 CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_REGISTER_V4 is not set
 CONFIG_RPCSEC_GSS_KRB5=y
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
@@ -2006,7 +2222,7 @@ CONFIG_NLS_UTF8=y
 #
 CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 CONFIG_PRINTK_TIME=y
-CONFIG_ENABLE_WARN_DEPRECATED=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
 CONFIG_ENABLE_MUST_CHECK=y
 CONFIG_FRAME_WARN=2048
 CONFIG_MAGIC_SYSRQ=y
@@ -2035,40 +2251,60 @@ CONFIG_TIMER_STATS=y
 CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_VIRTUAL is not set
 # CONFIG_DEBUG_WRITECOUNT is not set
 CONFIG_DEBUG_MEMORY_INIT=y
 # CONFIG_DEBUG_LIST is not set
 # CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
 CONFIG_FRAME_POINTER=y
 # CONFIG_BOOT_PRINTK_DELAY is not set
 # CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
 # CONFIG_KPROBES_SANITY_TEST is not set
 # CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
 # CONFIG_LKDTM is not set
 # CONFIG_FAULT_INJECTION is not set
 # CONFIG_LATENCYTOP is not set
 CONFIG_SYSCTL_SYSCALL_CHECK=y
-CONFIG_HAVE_FTRACE=y
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
 CONFIG_HAVE_DYNAMIC_FTRACE=y
-# CONFIG_FTRACE is not set
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_HW_BRANCH_TRACER=y
+
+#
+# Tracers
+#
+# CONFIG_FUNCTION_TRACER is not set
 # CONFIG_IRQSOFF_TRACER is not set
 # CONFIG_SYSPROF_TRACER is not set
 # CONFIG_SCHED_TRACER is not set
 # CONFIG_CONTEXT_SWITCH_TRACER is not set
+# CONFIG_BOOT_TRACER is not set
+# CONFIG_TRACE_BRANCH_PROFILING is not set
+# CONFIG_POWER_TRACER is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_HW_BRANCH_TRACER is not set
 CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+# CONFIG_DYNAMIC_PRINTK_DEBUG is not set
 # CONFIG_SAMPLES is not set
 CONFIG_HAVE_ARCH_KGDB=y
 # CONFIG_KGDB is not set
 # CONFIG_STRICT_DEVMEM is not set
 CONFIG_X86_VERBOSE_BOOTUP=y
 CONFIG_EARLY_PRINTK=y
+CONFIG_EARLY_PRINTK_DBGP=y
 CONFIG_DEBUG_STACKOVERFLOW=y
 CONFIG_DEBUG_STACK_USAGE=y
 # CONFIG_DEBUG_PAGEALLOC is not set
 # CONFIG_DEBUG_PER_CPU_MAPS is not set
 # CONFIG_X86_PTDUMP is not set
 CONFIG_DEBUG_RODATA=y
-# CONFIG_DIRECT_GBPAGES is not set
 # CONFIG_DEBUG_RODATA_TEST is not set
 CONFIG_DEBUG_NX_TEST=m
 # CONFIG_IOMMU_DEBUG is not set
@@ -2092,8 +2328,10 @@ CONFIG_OPTIMIZE_INLINING=y
 CONFIG_KEYS=y
 CONFIG_KEYS_DEBUG_PROC_KEYS=y
 CONFIG_SECURITY=y
+# CONFIG_SECURITYFS is not set
 CONFIG_SECURITY_NETWORK=y
 # CONFIG_SECURITY_NETWORK_XFRM is not set
+# CONFIG_SECURITY_PATH is not set
 CONFIG_SECURITY_FILE_CAPABILITIES=y
 # CONFIG_SECURITY_ROOTPLUG is not set
 CONFIG_SECURITY_DEFAULT_MMAP_MIN_ADDR=65536
@@ -2104,7 +2342,6 @@ CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_SECURITY_SELINUX_DEVELOP=y
 CONFIG_SECURITY_SELINUX_AVC_STATS=y
 CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE=1
-# CONFIG_SECURITY_SELINUX_ENABLE_SECMARK_DEFAULT is not set
 # CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX is not set
 # CONFIG_SECURITY_SMACK is not set
 CONFIG_CRYPTO=y
@@ -2112,11 +2349,18 @@ CONFIG_CRYPTO=y
 #
 # Crypto core or helper
 #
+# CONFIG_CRYPTO_FIPS is not set
 CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
 CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
 CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
 CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
 CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
 # CONFIG_CRYPTO_GF128MUL is not set
 # CONFIG_CRYPTO_NULL is not set
 # CONFIG_CRYPTO_CRYPTD is not set
@@ -2151,6 +2395,7 @@ CONFIG_CRYPTO_HMAC=y
 # Digest
 #
 # CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_CRC32C_INTEL is not set
 # CONFIG_CRYPTO_MD4 is not set
 CONFIG_CRYPTO_MD5=y
 # CONFIG_CRYPTO_MICHAEL_MIC is not set
@@ -2191,6 +2436,11 @@ CONFIG_CRYPTO_DES=y
 #
 # CONFIG_CRYPTO_DEFLATE is not set
 # CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRYPTO_HW=y
 # CONFIG_CRYPTO_DEV_HIFN_795X is not set
 CONFIG_HAVE_KVM=y
@@ -2205,6 +2455,7 @@ CONFIG_VIRTUALIZATION=y
 CONFIG_BITREVERSE=y
 CONFIG_GENERIC_FIND_FIRST_BIT=y
 CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
 # CONFIG_CRC_CCITT is not set
 # CONFIG_CRC16 is not set
 CONFIG_CRC_T10DIF=y
index 9dabd00e98055c8d21526e6f86a63deced16a98b..588a7aa937e145f4d0b6e0eb29860b13298ec363 100644 (file)
@@ -33,8 +33,6 @@
 #include <asm/sigframe.h>
 #include <asm/sys_ia32.h>
 
-#define DEBUG_SIG 0
-
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
 #define FIX_EFLAGS     (X86_EFLAGS_AC | X86_EFLAGS_OF | \
@@ -46,78 +44,83 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where);
 
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
 {
-       int err;
+       int err = 0;
 
        if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
                return -EFAULT;
 
-       /* If you change siginfo_t structure, please make sure that
-          this code is fixed accordingly.
-          It should never copy any pad contained in the structure
-          to avoid security leaks, but must copy the generic
-          3 ints plus the relevant union member.  */
-       err = __put_user(from->si_signo, &to->si_signo);
-       err |= __put_user(from->si_errno, &to->si_errno);
-       err |= __put_user((short)from->si_code, &to->si_code);
-
-       if (from->si_code < 0) {
-               err |= __put_user(from->si_pid, &to->si_pid);
-               err |= __put_user(from->si_uid, &to->si_uid);
-               err |= __put_user(ptr_to_compat(from->si_ptr), &to->si_ptr);
-       } else {
-               /*
-                * First 32bits of unions are always present:
-                * si_pid === si_band === si_tid === si_addr(LS half)
-                */
-               err |= __put_user(from->_sifields._pad[0],
-                                 &to->_sifields._pad[0]);
-               switch (from->si_code >> 16) {
-               case __SI_FAULT >> 16:
-                       break;
-               case __SI_CHLD >> 16:
-                       err |= __put_user(from->si_utime, &to->si_utime);
-                       err |= __put_user(from->si_stime, &to->si_stime);
-                       err |= __put_user(from->si_status, &to->si_status);
-                       /* FALL THROUGH */
-               default:
-               case __SI_KILL >> 16:
-                       err |= __put_user(from->si_uid, &to->si_uid);
-                       break;
-               case __SI_POLL >> 16:
-                       err |= __put_user(from->si_fd, &to->si_fd);
-                       break;
-               case __SI_TIMER >> 16:
-                       err |= __put_user(from->si_overrun, &to->si_overrun);
-                       err |= __put_user(ptr_to_compat(from->si_ptr),
-                                         &to->si_ptr);
-                       break;
-                        /* This is not generated by the kernel as of now.  */
-               case __SI_RT >> 16:
-               case __SI_MESGQ >> 16:
-                       err |= __put_user(from->si_uid, &to->si_uid);
-                       err |= __put_user(from->si_int, &to->si_int);
-                       break;
+       put_user_try {
+               /* If you change siginfo_t structure, please make sure that
+                  this code is fixed accordingly.
+                  It should never copy any pad contained in the structure
+                  to avoid security leaks, but must copy the generic
+                  3 ints plus the relevant union member.  */
+               put_user_ex(from->si_signo, &to->si_signo);
+               put_user_ex(from->si_errno, &to->si_errno);
+               put_user_ex((short)from->si_code, &to->si_code);
+
+               if (from->si_code < 0) {
+                       put_user_ex(from->si_pid, &to->si_pid);
+                       put_user_ex(from->si_uid, &to->si_uid);
+                       put_user_ex(ptr_to_compat(from->si_ptr), &to->si_ptr);
+               } else {
+                       /*
+                        * First 32bits of unions are always present:
+                        * si_pid === si_band === si_tid === si_addr(LS half)
+                        */
+                       put_user_ex(from->_sifields._pad[0],
+                                         &to->_sifields._pad[0]);
+                       switch (from->si_code >> 16) {
+                       case __SI_FAULT >> 16:
+                               break;
+                       case __SI_CHLD >> 16:
+                               put_user_ex(from->si_utime, &to->si_utime);
+                               put_user_ex(from->si_stime, &to->si_stime);
+                               put_user_ex(from->si_status, &to->si_status);
+                               /* FALL THROUGH */
+                       default:
+                       case __SI_KILL >> 16:
+                               put_user_ex(from->si_uid, &to->si_uid);
+                               break;
+                       case __SI_POLL >> 16:
+                               put_user_ex(from->si_fd, &to->si_fd);
+                               break;
+                       case __SI_TIMER >> 16:
+                               put_user_ex(from->si_overrun, &to->si_overrun);
+                               put_user_ex(ptr_to_compat(from->si_ptr),
+                                           &to->si_ptr);
+                               break;
+                                /* This is not generated by the kernel as of now.  */
+                       case __SI_RT >> 16:
+                       case __SI_MESGQ >> 16:
+                               put_user_ex(from->si_uid, &to->si_uid);
+                               put_user_ex(from->si_int, &to->si_int);
+                               break;
+                       }
                }
-       }
+       } put_user_catch(err);
+
        return err;
 }
 
 int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
 {
-       int err;
+       int err = 0;
        u32 ptr32;
 
        if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
                return -EFAULT;
 
-       err = __get_user(to->si_signo, &from->si_signo);
-       err |= __get_user(to->si_errno, &from->si_errno);
-       err |= __get_user(to->si_code, &from->si_code);
+       get_user_try {
+               get_user_ex(to->si_signo, &from->si_signo);
+               get_user_ex(to->si_errno, &from->si_errno);
+               get_user_ex(to->si_code, &from->si_code);
 
-       err |= __get_user(to->si_pid, &from->si_pid);
-       err |= __get_user(to->si_uid, &from->si_uid);
-       err |= __get_user(ptr32, &from->si_ptr);
-       to->si_ptr = compat_ptr(ptr32);
+               get_user_ex(to->si_pid, &from->si_pid);
+               get_user_ex(to->si_uid, &from->si_uid);
+               get_user_ex(ptr32, &from->si_ptr);
+               to->si_ptr = compat_ptr(ptr32);
+       } get_user_catch(err);
 
        return err;
 }
@@ -142,17 +145,23 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
                                  struct pt_regs *regs)
 {
        stack_t uss, uoss;
-       int ret;
+       int ret, err = 0;
        mm_segment_t seg;
 
        if (uss_ptr) {
                u32 ptr;
 
                memset(&uss, 0, sizeof(stack_t));
-               if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)) ||
-                           __get_user(ptr, &uss_ptr->ss_sp) ||
-                           __get_user(uss.ss_flags, &uss_ptr->ss_flags) ||
-                           __get_user(uss.ss_size, &uss_ptr->ss_size))
+               if (!access_ok(VERIFY_READ, uss_ptr, sizeof(stack_ia32_t)))
+                       return -EFAULT;
+
+               get_user_try {
+                       get_user_ex(ptr, &uss_ptr->ss_sp);
+                       get_user_ex(uss.ss_flags, &uss_ptr->ss_flags);
+                       get_user_ex(uss.ss_size, &uss_ptr->ss_size);
+               } get_user_catch(err);
+
+               if (err)
                        return -EFAULT;
                uss.ss_sp = compat_ptr(ptr);
        }
@@ -161,10 +170,16 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
        ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
        set_fs(seg);
        if (ret >= 0 && uoss_ptr)  {
-               if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)) ||
-                   __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
-                   __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
-                   __put_user(uoss.ss_size, &uoss_ptr->ss_size))
+               if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
+                       return -EFAULT;
+
+               put_user_try {
+                       put_user_ex(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp);
+                       put_user_ex(uoss.ss_flags, &uoss_ptr->ss_flags);
+                       put_user_ex(uoss.ss_size, &uoss_ptr->ss_size);
+               } put_user_catch(err);
+
+               if (err)
                        ret = -EFAULT;
        }
        return ret;
@@ -173,75 +188,78 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
 /*
  * Do a signal return; undo the signal stack.
  */
+#define loadsegment_gs(v)      load_gs_index(v)
+#define loadsegment_fs(v)      loadsegment(fs, v)
+#define loadsegment_ds(v)      loadsegment(ds, v)
+#define loadsegment_es(v)      loadsegment(es, v)
+
+#define get_user_seg(seg)      ({ unsigned int v; savesegment(seg, v); v; })
+#define set_user_seg(seg, v)   loadsegment_##seg(v)
+
 #define COPY(x)                        {               \
-       err |= __get_user(regs->x, &sc->x);     \
+       get_user_ex(regs->x, &sc->x);           \
 }
 
-#define COPY_SEG_CPL3(seg)     {                       \
-               unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
-               regs->seg = tmp | 3;                    \
-}
+#define GET_SEG(seg)           ({                      \
+       unsigned short tmp;                             \
+       get_user_ex(tmp, &sc->seg);                     \
+       tmp;                                            \
+})
+
+#define COPY_SEG_CPL3(seg)     do {                    \
+       regs->seg = GET_SEG(seg) | 3;                   \
+} while (0)
 
 #define RELOAD_SEG(seg)                {               \
-       unsigned int cur, pre;                  \
-       err |= __get_user(pre, &sc->seg);       \
-       savesegment(seg, cur);                  \
+       unsigned int pre = GET_SEG(seg);        \
+       unsigned int cur = get_user_seg(seg);   \
        pre |= 3;                               \
        if (pre != cur)                         \
-               loadsegment(seg, pre);          \
+               set_user_seg(seg, pre);         \
 }
 
 static int ia32_restore_sigcontext(struct pt_regs *regs,
                                   struct sigcontext_ia32 __user *sc,
                                   unsigned int *pax)
 {
-       unsigned int tmpflags, gs, oldgs, err = 0;
+       unsigned int tmpflags, err = 0;
        void __user *buf;
        u32 tmp;
 
        /* Always make any pending restarted system calls return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
-#if DEBUG_SIG
-       printk(KERN_DEBUG "SIG restore_sigcontext: "
-              "sc=%p err(%x) eip(%x) cs(%x) flg(%x)\n",
-              sc, sc->err, sc->ip, sc->cs, sc->flags);
-#endif
-
-       /*
-        * Reload fs and gs if they have changed in the signal
-        * handler.  This does not handle long fs/gs base changes in
-        * the handler, but does not clobber them at least in the
-        * normal case.
-        */
-       err |= __get_user(gs, &sc->gs);
-       gs |= 3;
-       savesegment(gs, oldgs);
-       if (gs != oldgs)
-               load_gs_index(gs);
-
-       RELOAD_SEG(fs);
-       RELOAD_SEG(ds);
-       RELOAD_SEG(es);
-
-       COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
-       COPY(dx); COPY(cx); COPY(ip);
-       /* Don't touch extended registers */
-
-       COPY_SEG_CPL3(cs);
-       COPY_SEG_CPL3(ss);
-
-       err |= __get_user(tmpflags, &sc->flags);
-       regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
-       /* disable syscall checks */
-       regs->orig_ax = -1;
-
-       err |= __get_user(tmp, &sc->fpstate);
-       buf = compat_ptr(tmp);
-       err |= restore_i387_xstate_ia32(buf);
-
-       err |= __get_user(*pax, &sc->ax);
+       get_user_try {
+               /*
+                * Reload fs and gs if they have changed in the signal
+                * handler.  This does not handle long fs/gs base changes in
+                * the handler, but does not clobber them at least in the
+                * normal case.
+                */
+               RELOAD_SEG(gs);
+               RELOAD_SEG(fs);
+               RELOAD_SEG(ds);
+               RELOAD_SEG(es);
+
+               COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
+               COPY(dx); COPY(cx); COPY(ip);
+               /* Don't touch extended registers */
+
+               COPY_SEG_CPL3(cs);
+               COPY_SEG_CPL3(ss);
+
+               get_user_ex(tmpflags, &sc->flags);
+               regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+               /* disable syscall checks */
+               regs->orig_ax = -1;
+
+               get_user_ex(tmp, &sc->fpstate);
+               buf = compat_ptr(tmp);
+               err |= restore_i387_xstate_ia32(buf);
+
+               get_user_ex(*pax, &sc->ax);
+       } get_user_catch(err);
+
        return err;
 }
 
@@ -317,38 +335,36 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
                                 void __user *fpstate,
                                 struct pt_regs *regs, unsigned int mask)
 {
-       int tmp, err = 0;
-
-       savesegment(gs, tmp);
-       err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
-       savesegment(fs, tmp);
-       err |= __put_user(tmp, (unsigned int __user *)&sc->fs);
-       savesegment(ds, tmp);
-       err |= __put_user(tmp, (unsigned int __user *)&sc->ds);
-       savesegment(es, tmp);
-       err |= __put_user(tmp, (unsigned int __user *)&sc->es);
-
-       err |= __put_user(regs->di, &sc->di);
-       err |= __put_user(regs->si, &sc->si);
-       err |= __put_user(regs->bp, &sc->bp);
-       err |= __put_user(regs->sp, &sc->sp);
-       err |= __put_user(regs->bx, &sc->bx);
-       err |= __put_user(regs->dx, &sc->dx);
-       err |= __put_user(regs->cx, &sc->cx);
-       err |= __put_user(regs->ax, &sc->ax);
-       err |= __put_user(current->thread.trap_no, &sc->trapno);
-       err |= __put_user(current->thread.error_code, &sc->err);
-       err |= __put_user(regs->ip, &sc->ip);
-       err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
-       err |= __put_user(regs->flags, &sc->flags);
-       err |= __put_user(regs->sp, &sc->sp_at_signal);
-       err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
-
-       err |= __put_user(ptr_to_compat(fpstate), &sc->fpstate);
-
-       /* non-iBCS2 extensions.. */
-       err |= __put_user(mask, &sc->oldmask);
-       err |= __put_user(current->thread.cr2, &sc->cr2);
+       int err = 0;
+
+       put_user_try {
+               put_user_ex(get_user_seg(gs), (unsigned int __user *)&sc->gs);
+               put_user_ex(get_user_seg(fs), (unsigned int __user *)&sc->fs);
+               put_user_ex(get_user_seg(ds), (unsigned int __user *)&sc->ds);
+               put_user_ex(get_user_seg(es), (unsigned int __user *)&sc->es);
+
+               put_user_ex(regs->di, &sc->di);
+               put_user_ex(regs->si, &sc->si);
+               put_user_ex(regs->bp, &sc->bp);
+               put_user_ex(regs->sp, &sc->sp);
+               put_user_ex(regs->bx, &sc->bx);
+               put_user_ex(regs->dx, &sc->dx);
+               put_user_ex(regs->cx, &sc->cx);
+               put_user_ex(regs->ax, &sc->ax);
+               put_user_ex(current->thread.trap_no, &sc->trapno);
+               put_user_ex(current->thread.error_code, &sc->err);
+               put_user_ex(regs->ip, &sc->ip);
+               put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
+               put_user_ex(regs->flags, &sc->flags);
+               put_user_ex(regs->sp, &sc->sp_at_signal);
+               put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
+
+               put_user_ex(ptr_to_compat(fpstate), &sc->fpstate);
+
+               /* non-iBCS2 extensions.. */
+               put_user_ex(mask, &sc->oldmask);
+               put_user_ex(current->thread.cr2, &sc->cr2);
+       } put_user_catch(err);
 
        return err;
 }
@@ -437,13 +453,17 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
                else
                        restorer = &frame->retcode;
        }
-       err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
 
-       /*
-        * These are actually not used anymore, but left because some
-        * gdb versions depend on them as a marker.
-        */
-       err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
+       put_user_try {
+               put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+               /*
+                * These are actually not used anymore, but left because some
+                * gdb versions depend on them as a marker.
+                */
+               put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
+       } put_user_catch(err);
+
        if (err)
                return -EFAULT;
 
@@ -462,11 +482,6 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
        regs->cs = __USER32_CS;
        regs->ss = __USER32_DS;
 
-#if DEBUG_SIG
-       printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
-              current->comm, current->pid, frame, regs->ip, frame->pretcode);
-#endif
-
        return 0;
 }
 
@@ -496,41 +511,40 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
                return -EFAULT;
 
-       err |= __put_user(sig, &frame->sig);
-       err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
-       err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
-       err |= copy_siginfo_to_user32(&frame->info, info);
-       if (err)
-               return -EFAULT;
+       put_user_try {
+               put_user_ex(sig, &frame->sig);
+               put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
+               put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
+               err |= copy_siginfo_to_user32(&frame->info, info);
 
-       /* Create the ucontext.  */
-       if (cpu_has_xsave)
-               err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
-       else
-               err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(regs->sp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
-                                    regs, set->sig[0]);
-       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-       if (err)
-               return -EFAULT;
+               /* Create the ucontext.  */
+               if (cpu_has_xsave)
+                       put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+               put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+               put_user_ex(sas_ss_flags(regs->sp),
+                           &frame->uc.uc_stack.ss_flags);
+               put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+               err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
+                                            regs, set->sig[0]);
+               err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+               if (ka->sa.sa_flags & SA_RESTORER)
+                       restorer = ka->sa.sa_restorer;
+               else
+                       restorer = VDSO32_SYMBOL(current->mm->context.vdso,
+                                                rt_sigreturn);
+               put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
+
+               /*
+                * Not actually used anymore, but left because some gdb
+                * versions need it.
+                */
+               put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
+       } put_user_catch(err);
 
-       if (ka->sa.sa_flags & SA_RESTORER)
-               restorer = ka->sa.sa_restorer;
-       else
-               restorer = VDSO32_SYMBOL(current->mm->context.vdso,
-                                        rt_sigreturn);
-       err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);
-
-       /*
-        * Not actually used anymore, but left because some gdb
-        * versions need it.
-        */
-       err |= __put_user(*((u64 *)&code), (u64 *)frame->retcode);
        if (err)
                return -EFAULT;
 
@@ -549,10 +563,5 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        regs->cs = __USER32_CS;
        regs->ss = __USER32_DS;
 
-#if DEBUG_SIG
-       printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
-              current->comm, current->pid, frame, regs->ip, frame->pretcode);
-#endif
-
        return 0;
 }
index 5a0d76dc56a46431690702be8f745d6813db4cfc..097a6b64c24ddfb3c348b0a16d3b6d90242f68a9 100644 (file)
@@ -112,8 +112,8 @@ ENTRY(ia32_sysenter_target)
        CFI_DEF_CFA     rsp,0
        CFI_REGISTER    rsp,rbp
        SWAPGS_UNSAFE_STACK
-       movq    %gs:pda_kernelstack, %rsp
-       addq    $(PDA_STACKOFFSET),%rsp 
+       movq    PER_CPU_VAR(kernel_stack), %rsp
+       addq    $(KERNEL_STACK_OFFSET),%rsp
        /*
         * No need to follow this irqs on/off section: the syscall
         * disabled irqs, here we enable it straight after entry:
@@ -273,13 +273,13 @@ ENDPROC(ia32_sysenter_target)
 ENTRY(ia32_cstar_target)
        CFI_STARTPROC32 simple
        CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,PDA_STACKOFFSET
+       CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
        CFI_REGISTER    rip,rcx
        /*CFI_REGISTER  rflags,r11*/
        SWAPGS_UNSAFE_STACK
        movl    %esp,%r8d
        CFI_REGISTER    rsp,r8
-       movq    %gs:pda_kernelstack,%rsp
+       movq    PER_CPU_VAR(kernel_stack),%rsp
        /*
         * No need to follow this irqs on/off section: the syscall
         * disabled irqs and here we enable it straight after entry:
index 3c601f8224bec75642bbb86c0a0b9a369f9c81a1..bb70e397aa84c0e7cfa452c1efa9298b8a58c985 100644 (file)
@@ -55,7 +55,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
        dump->regs.ds = (u16)regs->ds;
        dump->regs.es = (u16)regs->es;
        dump->regs.fs = (u16)regs->fs;
-       savesegment(gs, dump->regs.gs);
+       dump->regs.gs = get_user_gs(regs);
        dump->regs.orig_ax = regs->orig_ax;
        dump->regs.ip = regs->ip;
        dump->regs.cs = (u16)regs->cs;
index 9830681446ad4346bacfbbed00efdb95e52eca14..4518dc50090380b6676074b3f2466709ddea5a4b 100644 (file)
@@ -102,9 +102,6 @@ static inline void disable_acpi(void)
        acpi_noirq = 1;
 }
 
-/* Fixmap pages to reserve for ACPI boot-time tables (see fixmap.h) */
-#define FIX_ACPI_PAGES 4
-
 extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq);
 
 static inline void acpi_noirq_set(void) { acpi_noirq = 1; }
index ab1d51a8855e18e80ca4cb24b9ef330e749c13dd..394d177d721b0fdec5917b6cfc9d410ede7cb09a 100644 (file)
@@ -1,15 +1,18 @@
 #ifndef _ASM_X86_APIC_H
 #define _ASM_X86_APIC_H
 
-#include <linux/pm.h>
+#include <linux/cpumask.h>
 #include <linux/delay.h>
+#include <linux/pm.h>
 
 #include <asm/alternative.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
+#include <asm/cpufeature.h>
 #include <asm/processor.h>
+#include <asm/apicdef.h>
+#include <asm/atomic.h>
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
 #include <asm/system.h>
-#include <asm/cpufeature.h>
 #include <asm/msr.h>
 
 #define ARCH_APICTIMER_STOPS_ON_C3     1
        } while (0)
 
 
+#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
 extern void generic_apic_probe(void);
+#else
+static inline void generic_apic_probe(void)
+{
+}
+#endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
 
@@ -41,6 +50,21 @@ extern unsigned int apic_verbosity;
 extern int local_apic_timer_c2_ok;
 
 extern int disable_apic;
+
+#ifdef CONFIG_SMP
+extern void __inquire_remote_apic(int apicid);
+#else /* CONFIG_SMP */
+static inline void __inquire_remote_apic(int apicid)
+{
+}
+#endif /* CONFIG_SMP */
+
+static inline void default_inquire_remote_apic(int apicid)
+{
+       if (apic_verbosity >= APIC_DEBUG)
+               __inquire_remote_apic(apicid);
+}
+
 /*
  * Basic functions accessing APICs.
  */
@@ -51,7 +75,14 @@ extern int disable_apic;
 #define setup_secondary_clock setup_secondary_APIC_clock
 #endif
 
+#ifdef CONFIG_X86_VSMP
 extern int is_vsmp_box(void);
+#else
+static inline int is_vsmp_box(void)
+{
+       return 0;
+}
+#endif
 extern void xapic_wait_icr_idle(void);
 extern u32 safe_xapic_wait_icr_idle(void);
 extern void xapic_icr_write(u32, u32);
@@ -71,6 +102,12 @@ static inline u32 native_apic_mem_read(u32 reg)
        return *((volatile u32 *)(APIC_BASE + reg));
 }
 
+extern void native_apic_wait_icr_idle(void);
+extern u32 native_safe_apic_wait_icr_idle(void);
+extern void native_apic_icr_write(u32 low, u32 id);
+extern u64 native_apic_icr_read(void);
+
+#ifdef CONFIG_X86_X2APIC
 static inline void native_apic_msr_write(u32 reg, u32 v)
 {
        if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR ||
@@ -91,8 +128,32 @@ static inline u32 native_apic_msr_read(u32 reg)
        return low;
 }
 
-#ifndef CONFIG_X86_32
-extern int x2apic;
+static inline void native_x2apic_wait_icr_idle(void)
+{
+       /* no need to wait for icr idle in x2apic */
+       return;
+}
+
+static inline u32 native_safe_x2apic_wait_icr_idle(void)
+{
+       /* no need to wait for icr idle in x2apic */
+       return 0;
+}
+
+static inline void native_x2apic_icr_write(u32 low, u32 id)
+{
+       wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
+}
+
+static inline u64 native_x2apic_icr_read(void)
+{
+       unsigned long val;
+
+       rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
+       return val;
+}
+
+extern int x2apic, x2apic_phys;
 extern void check_x2apic(void);
 extern void enable_x2apic(void);
 extern void enable_IR_x2apic(void);
@@ -110,30 +171,24 @@ static inline int x2apic_enabled(void)
        return 0;
 }
 #else
-#define x2apic_enabled()       0
+static inline void check_x2apic(void)
+{
+}
+static inline void enable_x2apic(void)
+{
+}
+static inline void enable_IR_x2apic(void)
+{
+}
+static inline int x2apic_enabled(void)
+{
+       return 0;
+}
 #endif
 
-struct apic_ops {
-       u32 (*read)(u32 reg);
-       void (*write)(u32 reg, u32 v);
-       u64 (*icr_read)(void);
-       void (*icr_write)(u32 low, u32 high);
-       void (*wait_icr_idle)(void);
-       u32 (*safe_wait_icr_idle)(void);
-};
-
-extern struct apic_ops *apic_ops;
-
-#define apic_read (apic_ops->read)
-#define apic_write (apic_ops->write)
-#define apic_icr_read (apic_ops->icr_read)
-#define apic_icr_write (apic_ops->icr_write)
-#define apic_wait_icr_idle (apic_ops->wait_icr_idle)
-#define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle)
-
 extern int get_physical_broadcast(void);
 
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_X86_X2APIC
 static inline void ack_x2APIC_irq(void)
 {
        /* Docs say use 0 for future compatibility */
@@ -141,18 +196,6 @@ static inline void ack_x2APIC_irq(void)
 }
 #endif
 
-
-static inline void ack_APIC_irq(void)
-{
-       /*
-        * ack_APIC_irq() actually gets compiled as a single instruction
-        * ... yummie.
-        */
-
-       /* Docs say use 0 for future compatibility */
-       apic_write(APIC_EOI, 0);
-}
-
 extern int lapic_get_maxlvt(void);
 extern void clear_local_APIC(void);
 extern void connect_bsp_APIC(void);
@@ -196,4 +239,329 @@ static inline void disable_local_APIC(void) { }
 
 #endif /* !CONFIG_X86_LOCAL_APIC */
 
+#ifdef CONFIG_X86_64
+#define        SET_APIC_ID(x)          (apic->set_apic_id(x))
+#else
+
+#endif
+
+/*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic APIC sub-arch data struct.
+ *
+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
+ * James Cleverdon.
+ */
+struct apic {
+       char *name;
+
+       int (*probe)(void);
+       int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
+       int (*apic_id_registered)(void);
+
+       u32 irq_delivery_mode;
+       u32 irq_dest_mode;
+
+       const struct cpumask *(*target_cpus)(void);
+
+       int disable_esr;
+
+       int dest_logical;
+       unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
+       unsigned long (*check_apicid_present)(int apicid);
+
+       void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
+       void (*init_apic_ldr)(void);
+
+       physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
+
+       void (*setup_apic_routing)(void);
+       int (*multi_timer_check)(int apic, int irq);
+       int (*apicid_to_node)(int logical_apicid);
+       int (*cpu_to_logical_apicid)(int cpu);
+       int (*cpu_present_to_apicid)(int mps_cpu);
+       physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
+       void (*setup_portio_remap)(void);
+       int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
+       void (*enable_apic_mode)(void);
+       int (*phys_pkg_id)(int cpuid_apic, int index_msb);
+
+       /*
+        * When one of the next two hooks returns 1 the apic
+        * is switched to this. Essentially they are additional
+        * probe functions:
+        */
+       int (*mps_oem_check)(struct mpc_table *mpc, char *oem, char *productid);
+
+       unsigned int (*get_apic_id)(unsigned long x);
+       unsigned long (*set_apic_id)(unsigned int id);
+       unsigned long apic_id_mask;
+
+       unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
+       unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
+                                              const struct cpumask *andmask);
+
+       /* ipi */
+       void (*send_IPI_mask)(const struct cpumask *mask, int vector);
+       void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
+                                        int vector);
+       void (*send_IPI_allbutself)(int vector);
+       void (*send_IPI_all)(int vector);
+       void (*send_IPI_self)(int vector);
+
+       /* wakeup_secondary_cpu */
+       int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
+
+       int trampoline_phys_low;
+       int trampoline_phys_high;
+
+       void (*wait_for_init_deassert)(atomic_t *deassert);
+       void (*smp_callin_clear_local_apic)(void);
+       void (*inquire_remote_apic)(int apicid);
+
+       /* apic ops */
+       u32 (*read)(u32 reg);
+       void (*write)(u32 reg, u32 v);
+       u64 (*icr_read)(void);
+       void (*icr_write)(u32 low, u32 high);
+       void (*wait_icr_idle)(void);
+       u32 (*safe_wait_icr_idle)(void);
+};
+
+/*
+ * Pointer to the local APIC driver in use on this system (there's
+ * always just one such driver in use - the kernel decides via an
+ * early probing process which one it picks - and then sticks to it):
+ */
+extern struct apic *apic;
+
+/*
+ * APIC functionality to boot other CPUs - only used on SMP:
+ */
+#ifdef CONFIG_SMP
+extern atomic_t init_deasserted;
+extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
+#endif
+
+static inline u32 apic_read(u32 reg)
+{
+       return apic->read(reg);
+}
+
+static inline void apic_write(u32 reg, u32 val)
+{
+       apic->write(reg, val);
+}
+
+static inline u64 apic_icr_read(void)
+{
+       return apic->icr_read();
+}
+
+static inline void apic_icr_write(u32 low, u32 high)
+{
+       apic->icr_write(low, high);
+}
+
+static inline void apic_wait_icr_idle(void)
+{
+       apic->wait_icr_idle();
+}
+
+static inline u32 safe_apic_wait_icr_idle(void)
+{
+       return apic->safe_wait_icr_idle();
+}
+
+
+static inline void ack_APIC_irq(void)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+       /*
+        * ack_APIC_irq() actually gets compiled as a single instruction
+        * ... yummie.
+        */
+
+       /* Docs say use 0 for future compatibility */
+       apic_write(APIC_EOI, 0);
+#endif
+}
+
+static inline unsigned default_get_apic_id(unsigned long x)
+{
+       unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
+
+       if (APIC_XAPIC(ver))
+               return (x >> 24) & 0xFF;
+       else
+               return (x >> 24) & 0x0F;
+}
+
+/*
+ * Warm reset vector default position:
+ */
+#define DEFAULT_TRAMPOLINE_PHYS_LOW            0x467
+#define DEFAULT_TRAMPOLINE_PHYS_HIGH           0x469
+
+#ifdef CONFIG_X86_64
+extern struct apic apic_flat;
+extern struct apic apic_physflat;
+extern struct apic apic_x2apic_cluster;
+extern struct apic apic_x2apic_phys;
+extern int default_acpi_madt_oem_check(char *, char *);
+
+extern void apic_send_IPI_self(int vector);
+
+extern struct apic apic_x2apic_uv_x;
+DECLARE_PER_CPU(int, x2apic_extra_bits);
+
+extern int default_cpu_present_to_apicid(int mps_cpu);
+extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
+#endif
+
+static inline void default_wait_for_init_deassert(atomic_t *deassert)
+{
+       while (!atomic_read(deassert))
+               cpu_relax();
+       return;
+}
+
+extern void generic_bigsmp_probe(void);
+
+
+#ifdef CONFIG_X86_LOCAL_APIC
+
+#include <asm/smp.h>
+
+#define APIC_DFR_VALUE (APIC_DFR_FLAT)
+
+static inline const struct cpumask *default_target_cpus(void)
+{
+#ifdef CONFIG_SMP
+       return cpu_online_mask;
+#else
+       return cpumask_of(0);
+#endif
+}
+
+DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
+
+
+static inline unsigned int read_apic_id(void)
+{
+       unsigned int reg;
+
+       reg = apic_read(APIC_ID);
+
+       return apic->get_apic_id(reg);
+}
+
+extern void default_setup_apic_routing(void);
+
+#ifdef CONFIG_X86_32
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+extern void default_init_apic_ldr(void);
+
+static inline int default_apic_id_registered(void)
+{
+       return physid_isset(read_apic_id(), phys_cpu_present_map);
+}
+
+static inline unsigned int
+default_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+       return cpumask_bits(cpumask)[0];
+}
+
+static inline unsigned int
+default_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                              const struct cpumask *andmask)
+{
+       unsigned long mask1 = cpumask_bits(cpumask)[0];
+       unsigned long mask2 = cpumask_bits(andmask)[0];
+       unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
+
+       return (unsigned int)(mask1 & mask2 & mask3);
+}
+
+static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+       return cpuid_apic >> index_msb;
+}
+
+extern int default_apicid_to_node(int logical_apicid);
+
+#endif
+
+static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+       return physid_isset(apicid, bitmap);
+}
+
+static inline unsigned long default_check_apicid_present(int bit)
+{
+       return physid_isset(bit, phys_cpu_present_map);
+}
+
+static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+       return phys_map;
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int default_cpu_to_logical_apicid(int cpu)
+{
+       return 1 << cpu;
+}
+
+static inline int __default_cpu_present_to_apicid(int mps_cpu)
+{
+       if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
+               return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+       else
+               return BAD_APICID;
+}
+
+static inline int
+__default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
+}
+
+#ifdef CONFIG_X86_32
+static inline int default_cpu_present_to_apicid(int mps_cpu)
+{
+       return __default_cpu_present_to_apicid(mps_cpu);
+}
+
+static inline int
+default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
+}
+#else
+extern int default_cpu_present_to_apicid(int mps_cpu);
+extern int default_check_phys_apicid_present(int boot_cpu_physical_apicid);
+#endif
+
+static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid)
+{
+       return physid_mask_of_physid(phys_apicid);
+}
+
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#ifdef CONFIG_X86_32
+extern u8 cpu_2_logical_apicid[NR_CPUS];
+#endif
+
 #endif /* _ASM_X86_APIC_H */
index 63134e31e8b933acb973ee2e66f62a2f815bb326..bc9514fb3b13f70d75b11f037546eca8e43d07fd 100644 (file)
@@ -53,6 +53,7 @@
 #define                APIC_ESR_SENDILL        0x00020
 #define                APIC_ESR_RECVILL        0x00040
 #define                APIC_ESR_ILLREGA        0x00080
+#define        APIC_LVTCMCI    0x2f0
 #define        APIC_ICR        0x300
 #define                APIC_DEST_SELF          0x40000
 #define                APIC_DEST_ALLINC        0x80000
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h
new file mode 100644 (file)
index 0000000..82f613c
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _ASM_X86_APICNUM_H
+#define _ASM_X86_APICNUM_H
+
+/* define MAX_IO_APICS */
+#ifdef CONFIG_X86_32
+# define MAX_IO_APICS 64
+#else
+# define MAX_IO_APICS 128
+# define MAX_LOCAL_APIC 32768
+#endif
+
+#endif /* _ASM_X86_APICNUM_H */
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
new file mode 100644 (file)
index 0000000..20370c6
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ *  Machine specific APM BIOS functions for generic.
+ *  Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+
+#ifndef _ASM_X86_MACH_DEFAULT_APM_H
+#define _ASM_X86_MACH_DEFAULT_APM_H
+
+#ifdef APM_ZERO_SEGS
+#      define APM_DO_ZERO_SEGS \
+               "pushl %%ds\n\t" \
+               "pushl %%es\n\t" \
+               "xorl %%edx, %%edx\n\t" \
+               "mov %%dx, %%ds\n\t" \
+               "mov %%dx, %%es\n\t" \
+               "mov %%dx, %%fs\n\t" \
+               "mov %%dx, %%gs\n\t"
+#      define APM_DO_POP_SEGS \
+               "popl %%es\n\t" \
+               "popl %%ds\n\t"
+#else
+#      define APM_DO_ZERO_SEGS
+#      define APM_DO_POP_SEGS
+#endif
+
+static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
+                                       u32 *eax, u32 *ebx, u32 *ecx,
+                                       u32 *edx, u32 *esi)
+{
+       /*
+        * N.B. We do NOT need a cld after the BIOS call
+        * because we always save and restore the flags.
+        */
+       __asm__ __volatile__(APM_DO_ZERO_SEGS
+               "pushl %%edi\n\t"
+               "pushl %%ebp\n\t"
+               "lcall *%%cs:apm_bios_entry\n\t"
+               "setc %%al\n\t"
+               "popl %%ebp\n\t"
+               "popl %%edi\n\t"
+               APM_DO_POP_SEGS
+               : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx),
+                 "=S" (*esi)
+               : "a" (func), "b" (ebx_in), "c" (ecx_in)
+               : "memory", "cc");
+}
+
+static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
+                                               u32 ecx_in, u32 *eax)
+{
+       int     cx, dx, si;
+       u8      error;
+
+       /*
+        * N.B. We do NOT need a cld after the BIOS call
+        * because we always save and restore the flags.
+        */
+       __asm__ __volatile__(APM_DO_ZERO_SEGS
+               "pushl %%edi\n\t"
+               "pushl %%ebp\n\t"
+               "lcall *%%cs:apm_bios_entry\n\t"
+               "setc %%bl\n\t"
+               "popl %%ebp\n\t"
+               "popl %%edi\n\t"
+               APM_DO_POP_SEGS
+               : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx),
+                 "=S" (si)
+               : "a" (func), "b" (ebx_in), "c" (ecx_in)
+               : "memory", "cc");
+       return error;
+}
+
+#endif /* _ASM_X86_MACH_DEFAULT_APM_H */
diff --git a/arch/x86/include/asm/arch_hooks.h b/arch/x86/include/asm/arch_hooks.h
deleted file mode 100644 (file)
index cbd4957..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef _ASM_X86_ARCH_HOOKS_H
-#define _ASM_X86_ARCH_HOOKS_H
-
-#include <linux/interrupt.h>
-
-/*
- *     linux/include/asm/arch_hooks.h
- *
- *     define the architecture specific hooks
- */
-
-/* these aren't arch hooks, they are generic routines
- * that can be used by the hooks */
-extern void init_ISA_irqs(void);
-extern irqreturn_t timer_interrupt(int irq, void *dev_id);
-
-/* these are the defined hooks */
-extern void intr_init_hook(void);
-extern void pre_intr_init_hook(void);
-extern void pre_setup_arch_hook(void);
-extern void trap_init_hook(void);
-extern void pre_time_init_hook(void);
-extern void time_init_hook(void);
-extern void mca_nmi_hook(void);
-
-#endif /* _ASM_X86_ARCH_HOOKS_H */
diff --git a/arch/x86/include/asm/bigsmp/apic.h b/arch/x86/include/asm/bigsmp/apic.h
deleted file mode 100644 (file)
index d8dd9f5..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-#ifndef __ASM_MACH_APIC_H
-#define __ASM_MACH_APIC_H
-
-#define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu))
-#define esr_disable (1)
-
-static inline int apic_id_registered(void)
-{
-       return (1);
-}
-
-static inline const cpumask_t *target_cpus(void)
-{
-#ifdef CONFIG_SMP
-       return &cpu_online_map;
-#else
-       return &cpumask_of_cpu(0);
-#endif
-}
-
-#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL      0
-#define APIC_DFR_VALUE         (APIC_DFR_FLAT)
-#define INT_DELIVERY_MODE      (dest_Fixed)
-#define INT_DEST_MODE          (0)    /* phys delivery to target proc */
-#define NO_BALANCE_IRQ         (0)
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-       return (0);
-}
-
-static inline unsigned long check_apicid_present(int bit)
-{
-       return (1);
-}
-
-static inline unsigned long calculate_ldr(int cpu)
-{
-       unsigned long val, id;
-       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-       id = xapic_phys_to_log_apicid(cpu);
-       val |= SET_APIC_LOGICAL_ID(id);
-       return val;
-}
-
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
- */
-static inline void init_apic_ldr(void)
-{
-       unsigned long val;
-       int cpu = smp_processor_id();
-
-       apic_write(APIC_DFR, APIC_DFR_VALUE);
-       val = calculate_ldr(cpu);
-       apic_write(APIC_LDR, val);
-}
-
-static inline void setup_apic_routing(void)
-{
-       printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
-               "Physflat", nr_ioapics);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
-       return (0);
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-       return apicid_2_node[hard_smp_processor_id()];
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-       if (mps_cpu < nr_cpu_ids)
-               return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
-
-       return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
-       return physid_mask_of_physid(phys_apicid);
-}
-
-extern u8 cpu_2_logical_apicid[];
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return cpu_physical_id(cpu);
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
-       /* For clustered we don't have a good way to do this yet - hack */
-       return physids_promote(0xFFL);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
-       return (1);
-}
-
-/* As we are using single CPU as destination, pick only one CPU here */
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
-       int cpu;
-       int apicid;     
-
-       cpu = first_cpu(*cpumask);
-       apicid = cpu_to_logical_apicid(cpu);
-       return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                                 const struct cpumask *andmask)
-{
-       int cpu;
-
-       /*
-        * We're using fixed IRQ delivery, can only return one phys APIC ID.
-        * May as well be the first.
-        */
-       for_each_cpu_and(cpu, cpumask, andmask)
-               if (cpumask_test_cpu(cpu, cpu_online_mask))
-                       break;
-       if (cpu < nr_cpu_ids)
-               return cpu_to_logical_apicid(cpu);
-
-       return BAD_APICID;
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-       return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_MACH_APIC_H */
diff --git a/arch/x86/include/asm/bigsmp/apicdef.h b/arch/x86/include/asm/bigsmp/apicdef.h
deleted file mode 100644 (file)
index 392c3f5..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_MACH_APICDEF_H
-#define __ASM_MACH_APICDEF_H
-
-#define                APIC_ID_MASK            (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
-       return (((x)>>24)&0xFF);
-}
-
-#define                GET_APIC_ID(x)  get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/bigsmp/ipi.h b/arch/x86/include/asm/bigsmp/ipi.h
deleted file mode 100644 (file)
index 27fcd01..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_MACH_IPI_H
-#define __ASM_MACH_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
-       send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
-       send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_MACH_IPI_H */
index dd61616cb73d1d3aab1bb7d5c2dfd8923ec6e672..6ba23dd9fc9216de96b9cb52d1954b9bb7ae2b99 100644 (file)
@@ -1,26 +1,36 @@
 #ifndef _ASM_X86_BOOT_H
 #define _ASM_X86_BOOT_H
 
-/* Don't touch these, unless you really know what you're doing. */
-#define DEF_SYSSEG     0x1000
-#define DEF_SYSSIZE    0x7F00
-
 /* Internal svga startup constants */
 #define NORMAL_VGA     0xffff          /* 80x25 mode */
 #define EXTENDED_VGA   0xfffe          /* 80x50 mode */
 #define ASK_VGA                0xfffd          /* ask for it at bootup */
 
+#ifdef __KERNEL__
+
 /* Physical address where kernel should be loaded. */
 #define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
                                + (CONFIG_PHYSICAL_ALIGN - 1)) \
                                & ~(CONFIG_PHYSICAL_ALIGN - 1))
 
+#ifdef CONFIG_KERNEL_BZIP2
+#define BOOT_HEAP_SIZE             0x400000
+#else /* !CONFIG_KERNEL_BZIP2 */
+
 #ifdef CONFIG_X86_64
 #define BOOT_HEAP_SIZE 0x7000
-#define BOOT_STACK_SIZE        0x4000
 #else
 #define BOOT_HEAP_SIZE 0x4000
+#endif
+
+#endif /* !CONFIG_KERNEL_BZIP2 */
+
+#ifdef CONFIG_X86_64
+#define BOOT_STACK_SIZE        0x4000
+#else
 #define BOOT_STACK_SIZE        0x1000
 #endif
 
+#endif /* __KERNEL__ */
+
 #endif /* _ASM_X86_BOOT_H */
index 2f8466540fb52237c71373513304ddbf5627b873..5b301b7ff5f4bcac4854caaed6dc4877a389814b 100644 (file)
@@ -5,24 +5,43 @@
 #include <linux/mm.h>
 
 /* Caches aren't brain-dead on the intel. */
-#define flush_cache_all()                      do { } while (0)
-#define flush_cache_mm(mm)                     do { } while (0)
-#define flush_cache_dup_mm(mm)                 do { } while (0)
-#define flush_cache_range(vma, start, end)     do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn)     do { } while (0)
-#define flush_dcache_page(page)                        do { } while (0)
-#define flush_dcache_mmap_lock(mapping)                do { } while (0)
-#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
-#define flush_icache_range(start, end)         do { } while (0)
-#define flush_icache_page(vma, pg)             do { } while (0)
-#define flush_icache_user_range(vma, pg, adr, len)     do { } while (0)
-#define flush_cache_vmap(start, end)           do { } while (0)
-#define flush_cache_vunmap(start, end)         do { } while (0)
+static inline void flush_cache_all(void) { }
+static inline void flush_cache_mm(struct mm_struct *mm) { }
+static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
+static inline void flush_cache_range(struct vm_area_struct *vma,
+                                    unsigned long start, unsigned long end) { }
+static inline void flush_cache_page(struct vm_area_struct *vma,
+                                   unsigned long vmaddr, unsigned long pfn) { }
+static inline void flush_dcache_page(struct page *page) { }
+static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
+static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
+static inline void flush_icache_range(unsigned long start,
+                                     unsigned long end) { }
+static inline void flush_icache_page(struct vm_area_struct *vma,
+                                    struct page *page) { }
+static inline void flush_icache_user_range(struct vm_area_struct *vma,
+                                          struct page *page,
+                                          unsigned long addr,
+                                          unsigned long len) { }
+static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
+static inline void flush_cache_vunmap(unsigned long start,
+                                     unsigned long end) { }
 
-#define copy_to_user_page(vma, page, vaddr, dst, src, len)     \
-       memcpy((dst), (src), (len))
-#define copy_from_user_page(vma, page, vaddr, dst, src, len)   \
-       memcpy((dst), (src), (len))
+static inline void copy_to_user_page(struct vm_area_struct *vma,
+                                    struct page *page, unsigned long vaddr,
+                                    void *dst, const void *src,
+                                    unsigned long len)
+{
+       memcpy(dst, src, len);
+}
+
+static inline void copy_from_user_page(struct vm_area_struct *vma,
+                                      struct page *page, unsigned long vaddr,
+                                      void *dst, const void *src,
+                                      unsigned long len)
+{
+       memcpy(dst, src, len);
+}
 
 #define PG_non_WB                              PG_arch_1
 PAGEFLAG(NonWB, non_WB)
index 2bc162e0ec6eb3bd02c685d80e99c24762a002b7..0e63c9a2a8d0d722d2eba8afafc175f48096698d 100644 (file)
@@ -1,5 +1,55 @@
 /*
- * Some macros to handle stack frames in assembly.
+
+ x86 function call convention, 64-bit:
+ -------------------------------------
+  arguments           |  callee-saved      | extra caller-saved | return
+ [callee-clobbered]   |                    | [callee-clobbered] |
+ ---------------------------------------------------------------------------
+ rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
+
+ ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
+   functions when it sees tail-call optimization possibilities) rflags is
+   clobbered. Leftover arguments are passed over the stack frame.)
+
+ [*]  In the frame-pointers case rbp is fixed to the stack frame.
+
+ [**] for struct return values wider than 64 bits the return convention is a
+      bit more complex: up to 128 bits width we return small structures
+      straight in rax, rdx. For structures larger than that (3 words or
+      larger) the caller puts a pointer to an on-stack return struct
+      [allocated in the caller's stack frame] into the first argument - i.e.
+      into rdi. All other arguments shift up by one in this case.
+      Fortunately this case is rare in the kernel.
+
+For 32-bit we have the following conventions - kernel is built with
+-mregparm=3 and -freg-struct-return:
+
+ x86 function calling convention, 32-bit:
+ ----------------------------------------
+  arguments         | callee-saved        | extra caller-saved | return
+ [callee-clobbered] |                     | [callee-clobbered] |
+ -------------------------------------------------------------------------
+ eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
+
+ ( here too esp is obviously invariant across normal function calls. eflags
+   is clobbered. Leftover arguments are passed over the stack frame. )
+
+ [*]  In the frame-pointers case ebp is fixed to the stack frame.
+
+ [**] We build with -freg-struct-return, which on 32-bit means similar
+      semantics as on 64-bit: edx can be used for a second return value
+      (i.e. covering integer and structure sizes up to 64 bits) - after that
+      it gets more complex and more expensive: 3-word or larger struct returns
+      get done in the caller's frame and the pointer to the return struct goes
+      into regparm0, i.e. eax - the other arguments shift up and the
+      function's register parameters degenerate to regparm=2 in essence.
+
+*/
+
+
+/*
+ * 64-bit system call stack frame layout defines and helpers,
+ * for assembly code:
  */
 
 #define R15              0
@@ -9,7 +59,7 @@
 #define RBP             32
 #define RBX             40
 
-/* arguments: interrupts/non tracing syscalls only save upto here*/
+/* arguments: interrupts/non tracing syscalls only save up to here: */
 #define R11             48
 #define R10             56
 #define R9              64
@@ -22,7 +72,7 @@
 #define ORIG_RAX       120       /* + error_code */
 /* end of arguments */
 
-/* cpu exception frame or undefined in case of fast syscall. */
+/* cpu exception frame or undefined in case of fast syscall: */
 #define RIP            128
 #define CS             136
 #define EFLAGS         144
index bae482df6039a5122617bbc781db52fbf8ae56e2..b185091bf19ce39f67a325ecd6aafb7c13f8cd23 100644 (file)
@@ -7,6 +7,20 @@
 #include <linux/nodemask.h>
 #include <linux/percpu.h>
 
+#ifdef CONFIG_SMP
+
+extern void prefill_possible_map(void);
+
+#else /* CONFIG_SMP */
+
+static inline void prefill_possible_map(void) {}
+
+#define cpu_physical_id(cpu)                   boot_cpu_physical_apicid
+#define safe_smp_processor_id()                        0
+#define stack_smp_processor_id()               0
+
+#endif /* CONFIG_SMP */
+
 struct x86_cpu {
        struct cpu cpu;
 };
@@ -17,4 +31,7 @@ extern void arch_unregister_cpu(int);
 #endif
 
 DECLARE_PER_CPU(int, cpu_state);
+
+extern unsigned int boot_cpu_id;
+
 #endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/cpu_debug.h b/arch/x86/include/asm/cpu_debug.h
new file mode 100755 (executable)
index 0000000..56f1635
--- /dev/null
@@ -0,0 +1,199 @@
+#ifndef _ASM_X86_CPU_DEBUG_H
+#define _ASM_X86_CPU_DEBUG_H
+
+/*
+ * CPU x86 architecture debug
+ *
+ * Copyright(C) 2009 Jaswinder Singh Rajput
+ */
+
+/* Register flags */
+enum cpu_debug_bit {
+/* Model Specific Registers (MSRs)                                     */
+       CPU_MC_BIT,                             /* Machine Check        */
+       CPU_MONITOR_BIT,                        /* Monitor              */
+       CPU_TIME_BIT,                           /* Time                 */
+       CPU_PMC_BIT,                            /* Performance Monitor  */
+       CPU_PLATFORM_BIT,                       /* Platform             */
+       CPU_APIC_BIT,                           /* APIC                 */
+       CPU_POWERON_BIT,                        /* Power-on             */
+       CPU_CONTROL_BIT,                        /* Control              */
+       CPU_FEATURES_BIT,                       /* Features control     */
+       CPU_LBRANCH_BIT,                        /* Last Branch          */
+       CPU_BIOS_BIT,                           /* BIOS                 */
+       CPU_FREQ_BIT,                           /* Frequency            */
+       CPU_MTTR_BIT,                           /* MTRR                 */
+       CPU_PERF_BIT,                           /* Performance          */
+       CPU_CACHE_BIT,                          /* Cache                */
+       CPU_SYSENTER_BIT,                       /* Sysenter             */
+       CPU_THERM_BIT,                          /* Thermal              */
+       CPU_MISC_BIT,                           /* Miscellaneous        */
+       CPU_DEBUG_BIT,                          /* Debug                */
+       CPU_PAT_BIT,                            /* PAT                  */
+       CPU_VMX_BIT,                            /* VMX                  */
+       CPU_CALL_BIT,                           /* System Call          */
+       CPU_BASE_BIT,                           /* BASE Address         */
+       CPU_SMM_BIT,                            /* System mgmt mode     */
+       CPU_SVM_BIT,                            /*Secure Virtual Machine*/
+       CPU_OSVM_BIT,                           /* OS-Visible Workaround*/
+/* Standard Registers                                                  */
+       CPU_TSS_BIT,                            /* Task Stack Segment   */
+       CPU_CR_BIT,                             /* Control Registers    */
+       CPU_DT_BIT,                             /* Descriptor Table     */
+/* End of Registers flags                                              */
+       CPU_REG_ALL_BIT,                        /* Select all Registers */
+};
+
+#define        CPU_REG_ALL             (~0)            /* Select all Registers */
+
+#define        CPU_MC                  (1 << CPU_MC_BIT)
+#define        CPU_MONITOR             (1 << CPU_MONITOR_BIT)
+#define        CPU_TIME                (1 << CPU_TIME_BIT)
+#define        CPU_PMC                 (1 << CPU_PMC_BIT)
+#define        CPU_PLATFORM            (1 << CPU_PLATFORM_BIT)
+#define        CPU_APIC                (1 << CPU_APIC_BIT)
+#define        CPU_POWERON             (1 << CPU_POWERON_BIT)
+#define        CPU_CONTROL             (1 << CPU_CONTROL_BIT)
+#define        CPU_FEATURES            (1 << CPU_FEATURES_BIT)
+#define        CPU_LBRANCH             (1 << CPU_LBRANCH_BIT)
+#define        CPU_BIOS                (1 << CPU_BIOS_BIT)
+#define        CPU_FREQ                (1 << CPU_FREQ_BIT)
+#define        CPU_MTRR                (1 << CPU_MTTR_BIT)
+#define        CPU_PERF                (1 << CPU_PERF_BIT)
+#define        CPU_CACHE               (1 << CPU_CACHE_BIT)
+#define        CPU_SYSENTER            (1 << CPU_SYSENTER_BIT)
+#define        CPU_THERM               (1 << CPU_THERM_BIT)
+#define        CPU_MISC                (1 << CPU_MISC_BIT)
+#define        CPU_DEBUG               (1 << CPU_DEBUG_BIT)
+#define        CPU_PAT                 (1 << CPU_PAT_BIT)
+#define        CPU_VMX                 (1 << CPU_VMX_BIT)
+#define        CPU_CALL                (1 << CPU_CALL_BIT)
+#define        CPU_BASE                (1 << CPU_BASE_BIT)
+#define        CPU_SMM                 (1 << CPU_SMM_BIT)
+#define        CPU_SVM                 (1 << CPU_SVM_BIT)
+#define        CPU_OSVM                (1 << CPU_OSVM_BIT)
+#define        CPU_TSS                 (1 << CPU_TSS_BIT)
+#define        CPU_CR                  (1 << CPU_CR_BIT)
+#define        CPU_DT                  (1 << CPU_DT_BIT)
+
+/* Register file flags */
+enum cpu_file_bit {
+       CPU_INDEX_BIT,                          /* index                */
+       CPU_VALUE_BIT,                          /* value                */
+};
+
+#define        CPU_FILE_VALUE                  (1 << CPU_VALUE_BIT)
+
+/*
+ * DisplayFamily_DisplayModel  Processor Families/Processor Number Series
+ * --------------------------  ------------------------------------------
+ * 05_01, 05_02, 05_04         Pentium, Pentium with MMX
+ *
+ * 06_01                       Pentium Pro
+ * 06_03, 06_05                        Pentium II Xeon, Pentium II
+ * 06_07, 06_08, 06_0A, 06_0B  Pentium III Xeon, Pentum III
+ *
+ * 06_09, 060D                 Pentium M
+ *
+ * 06_0E                       Core Duo, Core Solo
+ *
+ * 06_0F                       Xeon 3000, 3200, 5100, 5300, 7300 series,
+ *                             Core 2 Quad, Core 2 Extreme, Core 2 Duo,
+ *                             Pentium dual-core
+ * 06_17                       Xeon 5200, 5400 series, Core 2 Quad Q9650
+ *
+ * 06_1C                       Atom
+ *
+ * 0F_00, 0F_01, 0F_02         Xeon, Xeon MP, Pentium 4
+ * 0F_03, 0F_04                        Xeon, Xeon MP, Pentium 4, Pentium D
+ *
+ * 0F_06                       Xeon 7100, 5000 Series, Xeon MP,
+ *                             Pentium 4, Pentium D
+ */
+
+/* Register processors bits */
+enum cpu_processor_bit {
+       CPU_NONE,
+/* Intel */
+       CPU_INTEL_PENTIUM_BIT,
+       CPU_INTEL_P6_BIT,
+       CPU_INTEL_PENTIUM_M_BIT,
+       CPU_INTEL_CORE_BIT,
+       CPU_INTEL_CORE2_BIT,
+       CPU_INTEL_ATOM_BIT,
+       CPU_INTEL_XEON_P4_BIT,
+       CPU_INTEL_XEON_MP_BIT,
+};
+
+#define        CPU_ALL                 (~0)            /* Select all CPUs      */
+
+#define        CPU_INTEL_PENTIUM       (1 << CPU_INTEL_PENTIUM_BIT)
+#define        CPU_INTEL_P6            (1 << CPU_INTEL_P6_BIT)
+#define        CPU_INTEL_PENTIUM_M     (1 << CPU_INTEL_PENTIUM_M_BIT)
+#define        CPU_INTEL_CORE          (1 << CPU_INTEL_CORE_BIT)
+#define        CPU_INTEL_CORE2         (1 << CPU_INTEL_CORE2_BIT)
+#define        CPU_INTEL_ATOM          (1 << CPU_INTEL_ATOM_BIT)
+#define        CPU_INTEL_XEON_P4       (1 << CPU_INTEL_XEON_P4_BIT)
+#define        CPU_INTEL_XEON_MP       (1 << CPU_INTEL_XEON_MP_BIT)
+
+#define        CPU_INTEL_PX            (CPU_INTEL_P6 | CPU_INTEL_PENTIUM_M)
+#define        CPU_INTEL_COREX         (CPU_INTEL_CORE | CPU_INTEL_CORE2)
+#define        CPU_INTEL_XEON          (CPU_INTEL_XEON_P4 | CPU_INTEL_XEON_MP)
+#define        CPU_CO_AT               (CPU_INTEL_CORE | CPU_INTEL_ATOM)
+#define        CPU_C2_AT               (CPU_INTEL_CORE2 | CPU_INTEL_ATOM)
+#define        CPU_CX_AT               (CPU_INTEL_COREX | CPU_INTEL_ATOM)
+#define        CPU_CX_XE               (CPU_INTEL_COREX | CPU_INTEL_XEON)
+#define        CPU_P6_XE               (CPU_INTEL_P6 | CPU_INTEL_XEON)
+#define        CPU_PM_CO_AT            (CPU_INTEL_PENTIUM_M | CPU_CO_AT)
+#define        CPU_C2_AT_XE            (CPU_C2_AT | CPU_INTEL_XEON)
+#define        CPU_CX_AT_XE            (CPU_CX_AT | CPU_INTEL_XEON)
+#define        CPU_P6_CX_AT            (CPU_INTEL_P6 | CPU_CX_AT)
+#define        CPU_P6_CX_XE            (CPU_P6_XE | CPU_INTEL_COREX)
+#define        CPU_P6_CX_AT_XE         (CPU_INTEL_P6 | CPU_CX_AT_XE)
+#define        CPU_PM_CX_AT_XE         (CPU_INTEL_PENTIUM_M | CPU_CX_AT_XE)
+#define        CPU_PM_CX_AT            (CPU_INTEL_PENTIUM_M | CPU_CX_AT)
+#define        CPU_PM_CX_XE            (CPU_INTEL_PENTIUM_M | CPU_CX_XE)
+#define        CPU_PX_CX_AT            (CPU_INTEL_PX | CPU_CX_AT)
+#define        CPU_PX_CX_AT_XE         (CPU_INTEL_PX | CPU_CX_AT_XE)
+
+/* Select all Intel CPUs*/
+#define        CPU_INTEL_ALL           (CPU_INTEL_PENTIUM | CPU_PX_CX_AT_XE)
+
+#define MAX_CPU_FILES          512
+
+struct cpu_private {
+       unsigned                cpu;
+       unsigned                type;
+       unsigned                reg;
+       unsigned                file;
+};
+
+struct cpu_debug_base {
+       char                    *name;          /* Register name        */
+       unsigned                flag;           /* Register flag        */
+       unsigned                write;          /* Register write flag  */
+};
+
+/*
+ * Currently it looks similar to cpu_debug_base but once we add more files
+ * cpu_file_base will go in different direction
+ */
+struct cpu_file_base {
+       char                    *name;          /* Register file name   */
+       unsigned                flag;           /* Register file flag   */
+       unsigned                write;          /* Register write flag  */
+};
+
+struct cpu_cpuX_base {
+       struct dentry           *dentry;        /* Register dentry      */
+       int                     init;           /* Register index file  */
+};
+
+struct cpu_debug_range {
+       unsigned                min;            /* Register range min   */
+       unsigned                max;            /* Register range max   */
+       unsigned                flag;           /* Supported flags      */
+       unsigned                model;          /* Supported models     */
+};
+
+#endif /* _ASM_X86_CPU_DEBUG_H */
diff --git a/arch/x86/include/asm/cpumask.h b/arch/x86/include/asm/cpumask.h
new file mode 100644 (file)
index 0000000..a7f3c75
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef _ASM_X86_CPUMASK_H
+#define _ASM_X86_CPUMASK_H
+#ifndef __ASSEMBLY__
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_X86_64
+
+extern cpumask_var_t cpu_callin_mask;
+extern cpumask_var_t cpu_callout_mask;
+extern cpumask_var_t cpu_initialized_mask;
+extern cpumask_var_t cpu_sibling_setup_mask;
+
+extern void setup_cpu_local_masks(void);
+
+#else /* CONFIG_X86_32 */
+
+extern cpumask_t cpu_callin_map;
+extern cpumask_t cpu_callout_map;
+extern cpumask_t cpu_initialized;
+extern cpumask_t cpu_sibling_setup_map;
+
+#define cpu_callin_mask                ((struct cpumask *)&cpu_callin_map)
+#define cpu_callout_mask       ((struct cpumask *)&cpu_callout_map)
+#define cpu_initialized_mask   ((struct cpumask *)&cpu_initialized)
+#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
+
+static inline void setup_cpu_local_masks(void) { }
+
+#endif /* CONFIG_X86_32 */
+
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_X86_CPUMASK_H */
index 0930b4f8d672249145ea249cbc0d272471e822de..c68c361697e144649f9d0b40e69921d40ca2cbb8 100644 (file)
@@ -1,39 +1,21 @@
 #ifndef _ASM_X86_CURRENT_H
 #define _ASM_X86_CURRENT_H
 
-#ifdef CONFIG_X86_32
 #include <linux/compiler.h>
 #include <asm/percpu.h>
 
+#ifndef __ASSEMBLY__
 struct task_struct;
 
 DECLARE_PER_CPU(struct task_struct *, current_task);
-static __always_inline struct task_struct *get_current(void)
-{
-       return x86_read_percpu(current_task);
-}
-
-#else /* X86_32 */
-
-#ifndef __ASSEMBLY__
-#include <asm/pda.h>
-
-struct task_struct;
 
 static __always_inline struct task_struct *get_current(void)
 {
-       return read_pda(pcurrent);
+       return percpu_read(current_task);
 }
 
-#else /* __ASSEMBLY__ */
-
-#include <asm/asm-offsets.h>
-#define GET_CURRENT(reg) movq %gs:(pda_pcurrent),reg
+#define current get_current()
 
 #endif /* __ASSEMBLY__ */
 
-#endif /* X86_32 */
-
-#define current get_current()
-
 #endif /* _ASM_X86_CURRENT_H */
index dc27705f5443268cc69857590fde7e1d369f97a2..5623c50d67b268a00c10dc60f8a5013f04bb28aa 100644 (file)
@@ -91,7 +91,6 @@ static inline int desc_empty(const void *ptr)
 #define store_gdt(dtr) native_store_gdt(dtr)
 #define store_idt(dtr) native_store_idt(dtr)
 #define store_tr(tr) (tr = native_store_tr())
-#define store_ldt(ldt) asm("sldt %0":"=m" (ldt))
 
 #define load_TLS(t, cpu) native_load_tls(t, cpu)
 #define set_ldt native_set_ldt
@@ -112,6 +111,8 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
 }
 #endif /* CONFIG_PARAVIRT */
 
+#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
+
 static inline void native_write_idt_entry(gate_desc *idt, int entry,
                                          const gate_desc *gate)
 {
diff --git a/arch/x86/include/asm/do_timer.h b/arch/x86/include/asm/do_timer.h
new file mode 100644 (file)
index 0000000..23ecda0
--- /dev/null
@@ -0,0 +1,16 @@
+/* defines for inline arch setup functions */
+#include <linux/clockchips.h>
+
+#include <asm/i8259.h>
+#include <asm/i8253.h>
+
+/**
+ * do_timer_interrupt_hook - hook into timer tick
+ *
+ * Call the pit clock event handler. see asm/i8253.h
+ **/
+
+static inline void do_timer_interrupt_hook(void)
+{
+       global_clock_event->event_handler(global_clock_event);
+}
index f51a3ddde01a753c5931e4d352c165a6e3d0ebd7..83c1bc8d2e8a7ba071cdd591050bbd45e7cf678a 100644 (file)
@@ -112,7 +112,7 @@ extern unsigned int vdso_enabled;
  * now struct_user_regs, they are different)
  */
 
-#define ELF_CORE_COPY_REGS(pr_reg, regs)       \
+#define ELF_CORE_COPY_REGS_COMMON(pr_reg, regs)        \
 do {                                           \
        pr_reg[0] = regs->bx;                   \
        pr_reg[1] = regs->cx;                   \
@@ -124,7 +124,6 @@ do {                                                \
        pr_reg[7] = regs->ds & 0xffff;          \
        pr_reg[8] = regs->es & 0xffff;          \
        pr_reg[9] = regs->fs & 0xffff;          \
-       savesegment(gs, pr_reg[10]);            \
        pr_reg[11] = regs->orig_ax;             \
        pr_reg[12] = regs->ip;                  \
        pr_reg[13] = regs->cs & 0xffff;         \
@@ -133,6 +132,18 @@ do {                                               \
        pr_reg[16] = regs->ss & 0xffff;         \
 } while (0);
 
+#define ELF_CORE_COPY_REGS(pr_reg, regs)       \
+do {                                           \
+       ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
+       pr_reg[10] = get_user_gs(regs);         \
+} while (0);
+
+#define ELF_CORE_COPY_KERNEL_REGS(pr_reg, regs)        \
+do {                                           \
+       ELF_CORE_COPY_REGS_COMMON(pr_reg, regs);\
+       savesegment(gs, pr_reg[10]);            \
+} while (0);
+
 #define ELF_PLATFORM   (utsname()->machine)
 #define set_personality_64bit()        do { } while (0)
 
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h
new file mode 100644 (file)
index 0000000..c2e6bed
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * This file is designed to contain the BUILD_INTERRUPT specifications for
+ * all of the extra named interrupt vectors used by the architecture.
+ * Usually this is the Inter Process Interrupts (IPIs)
+ */
+
+/*
+ * The following vectors are part of the Linux architecture, there
+ * is no hardware IRQ pin equivalent for them, they are triggered
+ * through the ICC by us (IPIs)
+ */
+#ifdef CONFIG_SMP
+BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
+BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
+BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
+BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
+
+BUILD_INTERRUPT3(invalidate_interrupt0,INVALIDATE_TLB_VECTOR_START+0,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt1,INVALIDATE_TLB_VECTOR_START+1,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt2,INVALIDATE_TLB_VECTOR_START+2,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt3,INVALIDATE_TLB_VECTOR_START+3,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt4,INVALIDATE_TLB_VECTOR_START+4,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt5,INVALIDATE_TLB_VECTOR_START+5,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt6,INVALIDATE_TLB_VECTOR_START+6,
+                smp_invalidate_interrupt)
+BUILD_INTERRUPT3(invalidate_interrupt7,INVALIDATE_TLB_VECTOR_START+7,
+                smp_invalidate_interrupt)
+#endif
+
+BUILD_INTERRUPT(generic_interrupt, GENERIC_INTERRUPT_VECTOR)
+
+/*
+ * every pentium local APIC has two 'local interrupts', with a
+ * soft-definable vector attached to both interrupts, one of
+ * which is a timer interrupt, the other one is error counter
+ * overflow. Linux uses the local APIC timer interrupt to get
+ * a much simpler SMP time architecture:
+ */
+#ifdef CONFIG_X86_LOCAL_APIC
+
+BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
+BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
+BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
+
+#ifdef CONFIG_PERF_COUNTERS
+BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
+#endif
+
+#ifdef CONFIG_X86_MCE_P4THERMAL
+BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/es7000/apic.h b/arch/x86/include/asm/es7000/apic.h
deleted file mode 100644 (file)
index c58b9cc..0000000
+++ /dev/null
@@ -1,242 +0,0 @@
-#ifndef __ASM_ES7000_APIC_H
-#define __ASM_ES7000_APIC_H
-
-#include <linux/gfp.h>
-
-#define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu)
-#define esr_disable (1)
-
-static inline int apic_id_registered(void)
-{
-               return (1);
-}
-
-static inline const cpumask_t *target_cpus_cluster(void)
-{
-       return &CPU_MASK_ALL;
-}
-
-static inline const cpumask_t *target_cpus(void)
-{
-       return &cpumask_of_cpu(smp_processor_id());
-}
-
-#define APIC_DFR_VALUE_CLUSTER         (APIC_DFR_CLUSTER)
-#define INT_DELIVERY_MODE_CLUSTER      (dest_LowestPrio)
-#define INT_DEST_MODE_CLUSTER          (1) /* logical delivery broadcast to all procs */
-#define NO_BALANCE_IRQ_CLUSTER         (1)
-
-#define APIC_DFR_VALUE         (APIC_DFR_FLAT)
-#define INT_DELIVERY_MODE      (dest_Fixed)
-#define INT_DEST_MODE          (0)    /* phys delivery to target procs */
-#define NO_BALANCE_IRQ         (0)
-#undef  APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL      0x0
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-       return 0;
-}
-static inline unsigned long check_apicid_present(int bit)
-{
-       return physid_isset(bit, phys_cpu_present_map);
-}
-
-#define apicid_cluster(apicid) (apicid & 0xF0)
-
-static inline unsigned long calculate_ldr(int cpu)
-{
-       unsigned long id;
-       id = xapic_phys_to_log_apicid(cpu);
-       return (SET_APIC_LOGICAL_ID(id));
-}
-
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LdR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
- */
-static inline void init_apic_ldr_cluster(void)
-{
-       unsigned long val;
-       int cpu = smp_processor_id();
-
-       apic_write(APIC_DFR, APIC_DFR_VALUE_CLUSTER);
-       val = calculate_ldr(cpu);
-       apic_write(APIC_LDR, val);
-}
-
-static inline void init_apic_ldr(void)
-{
-       unsigned long val;
-       int cpu = smp_processor_id();
-
-       apic_write(APIC_DFR, APIC_DFR_VALUE);
-       val = calculate_ldr(cpu);
-       apic_write(APIC_LDR, val);
-}
-
-extern int apic_version [MAX_APICS];
-static inline void setup_apic_routing(void)
-{
-       int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
-       printk("Enabling APIC mode:  %s. Using %d I/O APICs, target cpus %lx\n",
-               (apic_version[apic] == 0x14) ?
-                       "Physical Cluster" : "Logical Cluster",
-                       nr_ioapics, cpus_addr(*target_cpus())[0]);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
-       return 0;
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-       return 0;
-}
-
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-       if (!mps_cpu)
-               return boot_cpu_physical_apicid;
-       else if (mps_cpu < nr_cpu_ids)
-               return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
-       else
-               return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
-       static int id = 0;
-       physid_mask_t mask;
-       mask = physid_mask_of_physid(id);
-       ++id;
-       return mask;
-}
-
-extern u8 cpu_2_logical_apicid[];
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return (int)cpu_2_logical_apicid[cpu];
-#else
-       return logical_smp_processor_id();
-#endif
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
-       /* For clustered we don't have a good way to do this yet - hack */
-       return physids_promote(0xff);
-}
-
-
-static inline void setup_portio_remap(void)
-{
-}
-
-extern unsigned int boot_cpu_physical_apicid;
-static inline int check_phys_apicid_present(int cpu_physical_apicid)
-{
-       boot_cpu_physical_apicid = read_apic_id();
-       return (1);
-}
-
-static inline unsigned int
-cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
-{
-       int num_bits_set;
-       int cpus_found = 0;
-       int cpu;
-       int apicid;
-
-       num_bits_set = cpumask_weight(cpumask);
-       /* Return id to all */
-       if (num_bits_set == nr_cpu_ids)
-               return 0xFF;
-       /*
-        * The cpus in the mask must all be on the apic cluster.  If are not
-        * on the same apicid cluster return default value of TARGET_CPUS.
-        */
-       cpu = cpumask_first(cpumask);
-       apicid = cpu_to_logical_apicid(cpu);
-       while (cpus_found < num_bits_set) {
-               if (cpumask_test_cpu(cpu, cpumask)) {
-                       int new_apicid = cpu_to_logical_apicid(cpu);
-                       if (apicid_cluster(apicid) !=
-                                       apicid_cluster(new_apicid)){
-                               printk ("%s: Not a valid mask!\n", __func__);
-                               return 0xFF;
-                       }
-                       apicid = new_apicid;
-                       cpus_found++;
-               }
-               cpu++;
-       }
-       return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
-       int num_bits_set;
-       int cpus_found = 0;
-       int cpu;
-       int apicid;
-
-       num_bits_set = cpus_weight(*cpumask);
-       /* Return id to all */
-       if (num_bits_set == nr_cpu_ids)
-               return cpu_to_logical_apicid(0);
-       /*
-        * The cpus in the mask must all be on the apic cluster.  If are not
-        * on the same apicid cluster return default value of TARGET_CPUS.
-        */
-       cpu = first_cpu(*cpumask);
-       apicid = cpu_to_logical_apicid(cpu);
-       while (cpus_found < num_bits_set) {
-               if (cpu_isset(cpu, *cpumask)) {
-                       int new_apicid = cpu_to_logical_apicid(cpu);
-                       if (apicid_cluster(apicid) !=
-                                       apicid_cluster(new_apicid)){
-                               printk ("%s: Not a valid mask!\n", __func__);
-                               return cpu_to_logical_apicid(0);
-                       }
-                       apicid = new_apicid;
-                       cpus_found++;
-               }
-               cpu++;
-       }
-       return apicid;
-}
-
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
-                                                 const struct cpumask *andmask)
-{
-       int apicid = cpu_to_logical_apicid(0);
-       cpumask_var_t cpumask;
-
-       if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
-               return apicid;
-
-       cpumask_and(cpumask, inmask, andmask);
-       cpumask_and(cpumask, cpumask, cpu_online_mask);
-       apicid = cpu_mask_to_apicid(cpumask);
-
-       free_cpumask_var(cpumask);
-       return apicid;
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-       return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_ES7000_APIC_H */
diff --git a/arch/x86/include/asm/es7000/apicdef.h b/arch/x86/include/asm/es7000/apicdef.h
deleted file mode 100644 (file)
index 8b234a3..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_ES7000_APICDEF_H
-#define __ASM_ES7000_APICDEF_H
-
-#define                APIC_ID_MASK            (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
-       return (((x)>>24)&0xFF);
-}
-
-#define                GET_APIC_ID(x)  get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/es7000/ipi.h b/arch/x86/include/asm/es7000/ipi.h
deleted file mode 100644 (file)
index 7e8ed24..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_ES7000_IPI_H
-#define __ASM_ES7000_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
-       send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
-       send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_ES7000_IPI_H */
diff --git a/arch/x86/include/asm/es7000/mpparse.h b/arch/x86/include/asm/es7000/mpparse.h
deleted file mode 100644 (file)
index c1629b0..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __ASM_ES7000_MPPARSE_H
-#define __ASM_ES7000_MPPARSE_H
-
-#include <linux/acpi.h>
-
-extern int parse_unisys_oem (char *oemptr);
-extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
-extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
-extern void setup_unisys(void);
-
-#ifndef CONFIG_X86_GENERICARCH
-extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
-extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
-#endif
-
-#ifdef CONFIG_ACPI
-
-static inline int es7000_check_dsdt(void)
-{
-       struct acpi_table_header header;
-
-       if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
-           !strncmp(header.oem_id, "UNISYS", 6))
-               return 1;
-       return 0;
-}
-#endif
-
-#endif /* __ASM_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/es7000/wakecpu.h b/arch/x86/include/asm/es7000/wakecpu.h
deleted file mode 100644 (file)
index 78f0daa..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#ifndef __ASM_ES7000_WAKECPU_H
-#define __ASM_ES7000_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW    0x467
-#define TRAMPOLINE_PHYS_HIGH   0x469
-
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
-#ifndef CONFIG_ES7000_CLUSTERED_APIC
-       while (!atomic_read(deassert))
-               cpu_relax();
-#endif
-       return;
-}
-
-/* Nothing to do for most platforms, since cleared by the INIT cycle */
-static inline void smp_callin_clear_local_apic(void)
-{
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-extern void __inquire_remote_apic(int apicid);
-
-static inline void inquire_remote_apic(int apicid)
-{
-       if (apic_verbosity >= APIC_DEBUG)
-               __inquire_remote_apic(apicid);
-}
-
-#endif /* __ASM_MACH_WAKECPU_H */
index 23696d44a0af85fb2d3bf407d8b44def4874bf81..63a79c77d220058f74eb81a70e70e3f3721be76d 100644 (file)
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
+ */
+
 #ifndef _ASM_X86_FIXMAP_H
 #define _ASM_X86_FIXMAP_H
 
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+#include <asm/acpi.h>
+#include <asm/apicdef.h>
+#include <asm/page.h>
+#ifdef CONFIG_X86_32
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#else
+#include <asm/vsyscall.h>
+#endif
+
+/*
+ * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
+ * uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
+ * Because of this, FIXADDR_TOP x86 integration was left as later work.
+ */
+#ifdef CONFIG_X86_32
+/* used by vmalloc.c, vsyscall.lds.S.
+ *
+ * Leave one empty page between vmalloc'ed areas and
+ * the start of the fixmap.
+ */
+extern unsigned long __FIXADDR_TOP;
+#define FIXADDR_TOP    ((unsigned long)__FIXADDR_TOP)
+
+#define FIXADDR_USER_START     __fix_to_virt(FIX_VDSO)
+#define FIXADDR_USER_END       __fix_to_virt(FIX_VDSO - 1)
+#else
+#define FIXADDR_TOP    (VSYSCALL_END-PAGE_SIZE)
+
+/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
+#define FIXADDR_USER_START     ((unsigned long)VSYSCALL32_VSYSCALL)
+#define FIXADDR_USER_END       (FIXADDR_USER_START + PAGE_SIZE)
+#endif
+
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process.
+ * for x86_32: We allocate these special addresses
+ * from the end of virtual memory (0xfffff000) backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * These 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages (or larger if used with an increment
+ * higher than 1). Use set_fixmap(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ * TLB entries of such buffers will not be flushed across
+ * task switches.
+ */
+enum fixed_addresses {
 #ifdef CONFIG_X86_32
-# include "fixmap_32.h"
+       FIX_HOLE,
+       FIX_VDSO,
 #else
-# include "fixmap_64.h"
+       VSYSCALL_LAST_PAGE,
+       VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
+                           + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
+       VSYSCALL_HPET,
 #endif
+       FIX_DBGP_BASE,
+       FIX_EARLYCON_MEM_BASE,
+#ifdef CONFIG_X86_LOCAL_APIC
+       FIX_APIC_BASE,  /* local (CPU) APIC) -- required for SMP or not */
+#endif
+#ifdef CONFIG_X86_IO_APIC
+       FIX_IO_APIC_BASE_0,
+       FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
+#endif
+#ifdef CONFIG_X86_VISWS_APIC
+       FIX_CO_CPU,     /* Cobalt timer */
+       FIX_CO_APIC,    /* Cobalt APIC Redirection Table */
+       FIX_LI_PCIA,    /* Lithium PCI Bridge A */
+       FIX_LI_PCIB,    /* Lithium PCI Bridge B */
+#endif
+#ifdef CONFIG_X86_F00F_BUG
+       FIX_F00F_IDT,   /* Virtual mapping for IDT */
+#endif
+#ifdef CONFIG_X86_CYCLONE_TIMER
+       FIX_CYCLONE_TIMER, /*cyclone timer register*/
+#endif
+#ifdef CONFIG_X86_32
+       FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+       FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+#ifdef CONFIG_PCI_MMCONFIG
+       FIX_PCIE_MCFG,
+#endif
+#endif
+#ifdef CONFIG_PARAVIRT
+       FIX_PARAVIRT_BOOTMAP,
+#endif
+       __end_of_permanent_fixed_addresses,
+#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
+       FIX_OHCI1394_BASE,
+#endif
+       /*
+        * 256 temporary boot-time mappings, used by early_ioremap(),
+        * before ioremap() is functional.
+        *
+        * We round it up to the next 256 pages boundary so that we
+        * can have a single pgd entry and a single pte table:
+        */
+#define NR_FIX_BTMAPS          64
+#define FIX_BTMAPS_SLOTS       4
+       FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
+                       (__end_of_permanent_fixed_addresses & 255),
+       FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
+#ifdef CONFIG_X86_32
+       FIX_WP_TEST,
+#endif
+       __end_of_fixed_addresses
+};
+
+
+extern void reserve_top_address(unsigned long reserve);
+
+#define FIXADDR_SIZE   (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_BOOT_SIZE      (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START          (FIXADDR_TOP - FIXADDR_SIZE)
+#define FIXADDR_BOOT_START     (FIXADDR_TOP - FIXADDR_BOOT_SIZE)
 
 extern int fixmaps_set;
 
@@ -69,4 +203,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
        BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
        return __virt_to_fix(vaddr);
 }
+#endif /* !__ASSEMBLY__ */
 #endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h
deleted file mode 100644 (file)
index c7115c1..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#ifndef _ASM_X86_FIXMAP_32_H
-#define _ASM_X86_FIXMAP_32_H
-
-
-/* used by vmalloc.c, vsyscall.lds.S.
- *
- * Leave one empty page between vmalloc'ed areas and
- * the start of the fixmap.
- */
-extern unsigned long __FIXADDR_TOP;
-#define FIXADDR_USER_START     __fix_to_virt(FIX_VDSO)
-#define FIXADDR_USER_END       __fix_to_virt(FIX_VDSO - 1)
-
-#ifndef __ASSEMBLY__
-#include <linux/kernel.h>
-#include <asm/acpi.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the end of virtual memory (0xfffff000) backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * highger than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-enum fixed_addresses {
-       FIX_HOLE,
-       FIX_VDSO,
-       FIX_DBGP_BASE,
-       FIX_EARLYCON_MEM_BASE,
-#ifdef CONFIG_X86_LOCAL_APIC
-       FIX_APIC_BASE,  /* local (CPU) APIC) -- required for SMP or not */
-#endif
-#ifdef CONFIG_X86_IO_APIC
-       FIX_IO_APIC_BASE_0,
-       FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
-#endif
-#ifdef CONFIG_X86_VISWS_APIC
-       FIX_CO_CPU,     /* Cobalt timer */
-       FIX_CO_APIC,    /* Cobalt APIC Redirection Table */
-       FIX_LI_PCIA,    /* Lithium PCI Bridge A */
-       FIX_LI_PCIB,    /* Lithium PCI Bridge B */
-#endif
-#ifdef CONFIG_X86_F00F_BUG
-       FIX_F00F_IDT,   /* Virtual mapping for IDT */
-#endif
-#ifdef CONFIG_X86_CYCLONE_TIMER
-       FIX_CYCLONE_TIMER, /*cyclone timer register*/
-#endif
-       FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
-       FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#ifdef CONFIG_PCI_MMCONFIG
-       FIX_PCIE_MCFG,
-#endif
-#ifdef CONFIG_PARAVIRT
-       FIX_PARAVIRT_BOOTMAP,
-#endif
-       __end_of_permanent_fixed_addresses,
-       /*
-        * 256 temporary boot-time mappings, used by early_ioremap(),
-        * before ioremap() is functional.
-        *
-        * We round it up to the next 256 pages boundary so that we
-        * can have a single pgd entry and a single pte table:
-        */
-#define NR_FIX_BTMAPS          64
-#define FIX_BTMAPS_SLOTS       4
-       FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
-                       (__end_of_permanent_fixed_addresses & 255),
-       FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
-       FIX_WP_TEST,
-#ifdef CONFIG_ACPI
-       FIX_ACPI_BEGIN,
-       FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-#endif
-#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
-       FIX_OHCI1394_BASE,
-#endif
-       __end_of_fixed_addresses
-};
-
-extern void reserve_top_address(unsigned long reserve);
-
-
-#define FIXADDR_TOP    ((unsigned long)__FIXADDR_TOP)
-
-#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
-#define __FIXADDR_BOOT_SIZE    (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START          (FIXADDR_TOP - __FIXADDR_SIZE)
-#define FIXADDR_BOOT_START     (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
-
-#endif /* !__ASSEMBLY__ */
-#endif /* _ASM_X86_FIXMAP_32_H */
diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h
deleted file mode 100644 (file)
index 8be7409..0000000
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- */
-
-#ifndef _ASM_X86_FIXMAP_64_H
-#define _ASM_X86_FIXMAP_64_H
-
-#include <linux/kernel.h>
-#include <asm/acpi.h>
-#include <asm/apicdef.h>
-#include <asm/page.h>
-#include <asm/vsyscall.h>
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process.
- *
- * These 'compile-time allocated' memory buffers are
- * fixed-size 4k pages (or larger if used with an increment
- * higher than 1). Use set_fixmap(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-
-enum fixed_addresses {
-       VSYSCALL_LAST_PAGE,
-       VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
-                           + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
-       VSYSCALL_HPET,
-       FIX_DBGP_BASE,
-       FIX_EARLYCON_MEM_BASE,
-       FIX_APIC_BASE,  /* local (CPU) APIC) -- required for SMP or not */
-       FIX_IO_APIC_BASE_0,
-       FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
-#ifdef CONFIG_PARAVIRT
-       FIX_PARAVIRT_BOOTMAP,
-#endif
-       __end_of_permanent_fixed_addresses,
-#ifdef CONFIG_ACPI
-       FIX_ACPI_BEGIN,
-       FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
-#endif
-#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
-       FIX_OHCI1394_BASE,
-#endif
-       /*
-        * 256 temporary boot-time mappings, used by early_ioremap(),
-        * before ioremap() is functional.
-        *
-        * We round it up to the next 256 pages boundary so that we
-        * can have a single pgd entry and a single pte table:
-        */
-#define NR_FIX_BTMAPS          64
-#define FIX_BTMAPS_SLOTS       4
-       FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
-                       (__end_of_permanent_fixed_addresses & 255),
-       FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
-       __end_of_fixed_addresses
-};
-
-#define FIXADDR_TOP    (VSYSCALL_END-PAGE_SIZE)
-#define FIXADDR_SIZE   (__end_of_fixed_addresses << PAGE_SHIFT)
-#define FIXADDR_START  (FIXADDR_TOP - FIXADDR_SIZE)
-
-/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
-#define FIXADDR_USER_START     ((unsigned long)VSYSCALL32_VSYSCALL)
-#define FIXADDR_USER_END       (FIXADDR_USER_START + PAGE_SIZE)
-
-#endif /* _ASM_X86_FIXMAP_64_H */
index d48bee663a6fadb11ff12578ec4dc7cc3112d954..4b8b98fa7f256e54078669a9800d5f79d977acae 100644 (file)
@@ -1,5 +1 @@
-#ifdef CONFIG_X86_32
-# include "genapic_32.h"
-#else
-# include "genapic_64.h"
-#endif
+#include <asm/apic.h>
diff --git a/arch/x86/include/asm/genapic_32.h b/arch/x86/include/asm/genapic_32.h
deleted file mode 100644 (file)
index 2c05b73..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-#ifndef _ASM_X86_GENAPIC_32_H
-#define _ASM_X86_GENAPIC_32_H
-
-#include <asm/mpspec.h>
-#include <asm/atomic.h>
-
-/*
- * Generic APIC driver interface.
- *
- * An straight forward mapping of the APIC related parts of the
- * x86 subarchitecture interface to a dynamic object.
- *
- * This is used by the "generic" x86 subarchitecture.
- *
- * Copyright 2003 Andi Kleen, SuSE Labs.
- */
-
-struct mpc_bus;
-struct mpc_table;
-struct mpc_cpu;
-
-struct genapic {
-       char *name;
-       int (*probe)(void);
-
-       int (*apic_id_registered)(void);
-       const struct cpumask *(*target_cpus)(void);
-       int int_delivery_mode;
-       int int_dest_mode;
-       int ESR_DISABLE;
-       int apic_destination_logical;
-       unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid);
-       unsigned long (*check_apicid_present)(int apicid);
-       int no_balance_irq;
-       int no_ioapic_check;
-       void (*init_apic_ldr)(void);
-       physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map);
-
-       void (*setup_apic_routing)(void);
-       int (*multi_timer_check)(int apic, int irq);
-       int (*apicid_to_node)(int logical_apicid);
-       int (*cpu_to_logical_apicid)(int cpu);
-       int (*cpu_present_to_apicid)(int mps_cpu);
-       physid_mask_t (*apicid_to_cpu_present)(int phys_apicid);
-       void (*setup_portio_remap)(void);
-       int (*check_phys_apicid_present)(int boot_cpu_physical_apicid);
-       void (*enable_apic_mode)(void);
-       u32 (*phys_pkg_id)(u32 cpuid_apic, int index_msb);
-
-       /* mpparse */
-       /* When one of the next two hooks returns 1 the genapic
-          is switched to this. Essentially they are additional probe
-          functions. */
-       int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
-                            char *productid);
-       int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
-
-       unsigned (*get_apic_id)(unsigned long x);
-       unsigned long apic_id_mask;
-       unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
-       unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
-                                              const struct cpumask *andmask);
-       void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
-
-#ifdef CONFIG_SMP
-       /* ipi */
-       void (*send_IPI_mask)(const struct cpumask *mask, int vector);
-       void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
-                                        int vector);
-       void (*send_IPI_allbutself)(int vector);
-       void (*send_IPI_all)(int vector);
-#endif
-       int (*wakeup_cpu)(int apicid, unsigned long start_eip);
-       int trampoline_phys_low;
-       int trampoline_phys_high;
-       void (*wait_for_init_deassert)(atomic_t *deassert);
-       void (*smp_callin_clear_local_apic)(void);
-       void (*store_NMI_vector)(unsigned short *high, unsigned short *low);
-       void (*restore_NMI_vector)(unsigned short *high, unsigned short *low);
-       void (*inquire_remote_apic)(int apicid);
-};
-
-#define APICFUNC(x) .x = x,
-
-/* More functions could be probably marked IPIFUNC and save some space
-   in UP GENERICARCH kernels, but I don't have the nerve right now
-   to untangle this mess. -AK  */
-#ifdef CONFIG_SMP
-#define IPIFUNC(x) APICFUNC(x)
-#else
-#define IPIFUNC(x)
-#endif
-
-#define APIC_INIT(aname, aprobe)                       \
-{                                                      \
-       .name = aname,                                  \
-       .probe = aprobe,                                \
-       .int_delivery_mode = INT_DELIVERY_MODE,         \
-       .int_dest_mode = INT_DEST_MODE,                 \
-       .no_balance_irq = NO_BALANCE_IRQ,               \
-       .ESR_DISABLE = esr_disable,                     \
-       .apic_destination_logical = APIC_DEST_LOGICAL,  \
-       APICFUNC(apic_id_registered)                    \
-       APICFUNC(target_cpus)                           \
-       APICFUNC(check_apicid_used)                     \
-       APICFUNC(check_apicid_present)                  \
-       APICFUNC(init_apic_ldr)                         \
-       APICFUNC(ioapic_phys_id_map)                    \
-       APICFUNC(setup_apic_routing)                    \
-       APICFUNC(multi_timer_check)                     \
-       APICFUNC(apicid_to_node)                        \
-       APICFUNC(cpu_to_logical_apicid)                 \
-       APICFUNC(cpu_present_to_apicid)                 \
-       APICFUNC(apicid_to_cpu_present)                 \
-       APICFUNC(setup_portio_remap)                    \
-       APICFUNC(check_phys_apicid_present)             \
-       APICFUNC(mps_oem_check)                         \
-       APICFUNC(get_apic_id)                           \
-       .apic_id_mask = APIC_ID_MASK,                   \
-       APICFUNC(cpu_mask_to_apicid)                    \
-       APICFUNC(cpu_mask_to_apicid_and)                \
-       APICFUNC(vector_allocation_domain)              \
-       APICFUNC(acpi_madt_oem_check)                   \
-       IPIFUNC(send_IPI_mask)                          \
-       IPIFUNC(send_IPI_allbutself)                    \
-       IPIFUNC(send_IPI_all)                           \
-       APICFUNC(enable_apic_mode)                      \
-       APICFUNC(phys_pkg_id)                           \
-       .trampoline_phys_low = TRAMPOLINE_PHYS_LOW,             \
-       .trampoline_phys_high = TRAMPOLINE_PHYS_HIGH,           \
-       APICFUNC(wait_for_init_deassert)                \
-       APICFUNC(smp_callin_clear_local_apic)           \
-       APICFUNC(store_NMI_vector)                      \
-       APICFUNC(restore_NMI_vector)                    \
-       APICFUNC(inquire_remote_apic)                   \
-}
-
-extern struct genapic *genapic;
-extern void es7000_update_genapic_to_cluster(void);
-
-enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
-#define get_uv_system_type()           UV_NONE
-#define is_uv_system()                 0
-#define uv_wakeup_secondary(a, b)      1
-#define uv_system_init()               do {} while (0)
-
-
-#endif /* _ASM_X86_GENAPIC_32_H */
diff --git a/arch/x86/include/asm/genapic_64.h b/arch/x86/include/asm/genapic_64.h
deleted file mode 100644 (file)
index adf32fb..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-#ifndef _ASM_X86_GENAPIC_64_H
-#define _ASM_X86_GENAPIC_64_H
-
-#include <linux/cpumask.h>
-
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Generic APIC sub-arch data struct.
- *
- * Hacked for x86-64 by James Cleverdon from i386 architecture code by
- * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
- * James Cleverdon.
- */
-
-struct genapic {
-       char *name;
-       int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
-       u32 int_delivery_mode;
-       u32 int_dest_mode;
-       int (*apic_id_registered)(void);
-       const struct cpumask *(*target_cpus)(void);
-       void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
-       void (*init_apic_ldr)(void);
-       /* ipi */
-       void (*send_IPI_mask)(const struct cpumask *mask, int vector);
-       void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
-                                        int vector);
-       void (*send_IPI_allbutself)(int vector);
-       void (*send_IPI_all)(int vector);
-       void (*send_IPI_self)(int vector);
-       /* */
-       unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
-       unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
-                                              const struct cpumask *andmask);
-       unsigned int (*phys_pkg_id)(int index_msb);
-       unsigned int (*get_apic_id)(unsigned long x);
-       unsigned long (*set_apic_id)(unsigned int id);
-       unsigned long apic_id_mask;
-       /* wakeup_secondary_cpu */
-       int (*wakeup_cpu)(int apicid, unsigned long start_eip);
-};
-
-extern struct genapic *genapic;
-
-extern struct genapic apic_flat;
-extern struct genapic apic_physflat;
-extern struct genapic apic_x2apic_cluster;
-extern struct genapic apic_x2apic_phys;
-extern int acpi_madt_oem_check(char *, char *);
-
-extern void apic_send_IPI_self(int vector);
-enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
-extern enum uv_system_type get_uv_system_type(void);
-extern int is_uv_system(void);
-
-extern struct genapic apic_x2apic_uv_x;
-DECLARE_PER_CPU(int, x2apic_extra_bits);
-extern void uv_cpu_init(void);
-extern void uv_system_init(void);
-extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
-
-extern void setup_apic_routing(void);
-
-#endif /* _ASM_X86_GENAPIC_64_H */
index 000787df66e6340a0e81b4a705e511692137a53f..039db6aa8e0271951706ee20bd9b819098f8002a 100644 (file)
@@ -1,11 +1,53 @@
-#ifdef CONFIG_X86_32
-# include "hardirq_32.h"
-#else
-# include "hardirq_64.h"
+#ifndef _ASM_X86_HARDIRQ_H
+#define _ASM_X86_HARDIRQ_H
+
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+       unsigned int __softirq_pending;
+       unsigned int __nmi_count;       /* arch dependent */
+       unsigned int irq0_irqs;
+#ifdef CONFIG_X86_LOCAL_APIC
+       unsigned int apic_timer_irqs;   /* arch dependent */
+       unsigned int irq_spurious_count;
+#endif
+       unsigned int generic_irqs;      /* arch dependent */
+#ifdef CONFIG_SMP
+       unsigned int irq_resched_count;
+       unsigned int irq_call_count;
+       unsigned int irq_tlb_count;
+#endif
+#ifdef CONFIG_X86_MCE
+       unsigned int irq_thermal_count;
+# ifdef CONFIG_X86_64
+       unsigned int irq_threshold_count;
+# endif
 #endif
+} ____cacheline_aligned irq_cpustat_t;
+
+DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
+
+/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
+#define MAX_HARDIRQS_PER_CPU NR_VECTORS
+
+#define __ARCH_IRQ_STAT
+
+#define inc_irq_stat(member)   percpu_add(irq_stat.member, 1)
+
+#define local_softirq_pending()        percpu_read(irq_stat.__softirq_pending)
+
+#define __ARCH_SET_SOFTIRQ_PENDING
+
+#define set_softirq_pending(x) percpu_write(irq_stat.__softirq_pending, (x))
+#define or_softirq_pending(x)  percpu_or(irq_stat.__softirq_pending, (x))
+
+extern void ack_bad_irq(unsigned int irq);
 
 extern u64 arch_irq_stat_cpu(unsigned int cpu);
 #define arch_irq_stat_cpu      arch_irq_stat_cpu
 
 extern u64 arch_irq_stat(void);
 #define arch_irq_stat          arch_irq_stat
+
+#endif /* _ASM_X86_HARDIRQ_H */
diff --git a/arch/x86/include/asm/hardirq_32.h b/arch/x86/include/asm/hardirq_32.h
deleted file mode 100644 (file)
index cf7954d..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef _ASM_X86_HARDIRQ_32_H
-#define _ASM_X86_HARDIRQ_32_H
-
-#include <linux/threads.h>
-#include <linux/irq.h>
-
-typedef struct {
-       unsigned int __softirq_pending;
-       unsigned long idle_timestamp;
-       unsigned int __nmi_count;       /* arch dependent */
-       unsigned int apic_timer_irqs;   /* arch dependent */
-       unsigned int irq0_irqs;
-       unsigned int irq_resched_count;
-       unsigned int irq_call_count;
-       unsigned int irq_tlb_count;
-       unsigned int irq_thermal_count;
-       unsigned int irq_spurious_count;
-} ____cacheline_aligned irq_cpustat_t;
-
-DECLARE_PER_CPU(irq_cpustat_t, irq_stat);
-
-#define __ARCH_IRQ_STAT
-#define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
-
-#define inc_irq_stat(member)   (__get_cpu_var(irq_stat).member++)
-
-void ack_bad_irq(unsigned int irq);
-#include <linux/irq_cpustat.h>
-
-#endif /* _ASM_X86_HARDIRQ_32_H */
diff --git a/arch/x86/include/asm/hardirq_64.h b/arch/x86/include/asm/hardirq_64.h
deleted file mode 100644 (file)
index b5a6b5d..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef _ASM_X86_HARDIRQ_64_H
-#define _ASM_X86_HARDIRQ_64_H
-
-#include <linux/threads.h>
-#include <linux/irq.h>
-#include <asm/pda.h>
-#include <asm/apic.h>
-
-/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
-#define MAX_HARDIRQS_PER_CPU NR_VECTORS
-
-#define __ARCH_IRQ_STAT 1
-
-#define inc_irq_stat(member)   add_pda(member, 1)
-
-#define local_softirq_pending() read_pda(__softirq_pending)
-
-#define __ARCH_SET_SOFTIRQ_PENDING 1
-
-#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
-#define or_softirq_pending(x)  or_pda(__softirq_pending, (x))
-
-extern void ack_bad_irq(unsigned int irq);
-
-#endif /* _ASM_X86_HARDIRQ_64_H */
index bf9276bea6602fca7bf2b93e6aa640a083e57f99..014c2b85ae45eae61201dcf19da9e04f2940e721 100644 (file)
@@ -63,6 +63,7 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot);
 void *kmap_atomic(struct page *page, enum km_type type);
 void kunmap_atomic(void *kvaddr, enum km_type type);
 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
+void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
 struct page *kmap_atomic_to_page(void *ptr);
 
 #ifndef CONFIG_PARAVIRT
index 8de644b6b95992deb500f0e2325c77db87e105aa..b762ea49bd703ab3b28958cf83b6908e825e57a4 100644 (file)
 #include <asm/irq.h>
 #include <asm/sections.h>
 
-#define platform_legacy_irq(irq)       ((irq) < 16)
-
 /* Interrupt handlers registered during init_IRQ */
 extern void apic_timer_interrupt(void);
+extern void generic_interrupt(void);
 extern void error_interrupt(void);
 extern void spurious_interrupt(void);
 extern void thermal_interrupt(void);
@@ -58,7 +57,7 @@ extern void make_8259A_irq(unsigned int irq);
 extern void init_8259A(int aeoi);
 
 /* IOAPIC */
-#define IO_APIC_IRQ(x) (((x) >= 16) || ((1<<(x)) & io_apic_irqs))
+#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
 extern unsigned long io_apic_irqs;
 
 extern void init_VISWS_APIC_irqs(void);
@@ -67,15 +66,7 @@ extern void disable_IO_APIC(void);
 extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
 extern void setup_ioapic_dest(void);
 
-#ifdef CONFIG_X86_64
 extern void enable_IO_APIC(void);
-#endif
-
-/* IPI functions */
-#ifdef CONFIG_X86_32
-extern void send_IPI_self(int vector);
-#endif
-extern void send_IPI(int dest, int vector);
 
 /* Statistics */
 extern atomic_t irq_err_count;
@@ -84,21 +75,11 @@ extern atomic_t irq_mis_count;
 /* EISA */
 extern void eisa_set_level_irq(unsigned int irq);
 
-/* Voyager functions */
-extern asmlinkage void vic_cpi_interrupt(void);
-extern asmlinkage void vic_sys_interrupt(void);
-extern asmlinkage void vic_cmn_interrupt(void);
-extern asmlinkage void qic_timer_interrupt(void);
-extern asmlinkage void qic_invalidate_interrupt(void);
-extern asmlinkage void qic_reschedule_interrupt(void);
-extern asmlinkage void qic_enable_irq_interrupt(void);
-extern asmlinkage void qic_call_function_interrupt(void);
-
 /* SMP */
 extern void smp_apic_timer_interrupt(struct pt_regs *);
 extern void smp_spurious_interrupt(struct pt_regs *);
 extern void smp_error_interrupt(struct pt_regs *);
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
 extern void smp_reschedule_interrupt(struct pt_regs *);
 extern void smp_call_function_interrupt(struct pt_regs *);
 extern void smp_call_function_single_interrupt(struct pt_regs *);
index 58d7091eeb1fc1e8955c87246121060fbb851d31..1a99e6c092afcfaaad1a73b95ee4ecc7249f231b 100644 (file)
@@ -60,4 +60,8 @@ extern struct irq_chip i8259A_chip;
 extern void mask_8259A(void);
 extern void unmask_8259A(void);
 
+#ifdef CONFIG_X86_32
+extern void init_ISA_irqs(void);
+#endif
+
 #endif /* _ASM_X86_I8259_H */
diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
new file mode 100644 (file)
index 0000000..36fb1a6
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _ASM_X86_INIT_32_H
+#define _ASM_X86_INIT_32_H
+
+#ifdef CONFIG_X86_32
+extern void __init early_ioremap_page_table_range_init(void);
+#endif
+
+extern unsigned long __init
+kernel_physical_mapping_init(unsigned long start,
+                            unsigned long end,
+                            unsigned long page_size_mask);
+
+
+extern unsigned long __initdata e820_table_start;
+extern unsigned long __meminitdata e820_table_end;
+extern unsigned long __meminitdata e820_table_top;
+
+#endif /* _ASM_X86_INIT_32_H */
index 1dbbdf4be9b466ba2d77abedbe9e4f364576ec78..e5383e3d2f8c2dcc84f8e1a10a53fc8125800c60 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/compiler.h>
 #include <asm-generic/int-ll64.h>
+#include <asm/page.h>
 
 #define build_mmio_read(name, size, type, reg, barrier) \
 static inline type name(const volatile void __iomem *addr) \
@@ -80,6 +81,98 @@ static inline void writeq(__u64 val, volatile void __iomem *addr)
 #define readq                  readq
 #define writeq                 writeq
 
+/**
+ *     virt_to_phys    -       map virtual addresses to physical
+ *     @address: address to remap
+ *
+ *     The returned physical address is the physical (CPU) mapping for
+ *     the memory address given. It is only valid to use this function on
+ *     addresses directly mapped or allocated via kmalloc.
+ *
+ *     This function does not give bus mappings for DMA transfers. In
+ *     almost all conceivable cases a device driver should not be using
+ *     this function
+ */
+
+static inline phys_addr_t virt_to_phys(volatile void *address)
+{
+       return __pa(address);
+}
+
+/**
+ *     phys_to_virt    -       map physical address to virtual
+ *     @address: address to remap
+ *
+ *     The returned virtual address is a current CPU mapping for
+ *     the memory address given. It is only valid to use this function on
+ *     addresses that have a kernel mapping
+ *
+ *     This function does not handle bus mappings for DMA transfers. In
+ *     almost all conceivable cases a device driver should not be using
+ *     this function
+ */
+
+static inline void *phys_to_virt(phys_addr_t address)
+{
+       return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+#define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+/*
+ * ISA I/O bus memory addresses are 1:1 with the physical address.
+ * However, we truncate the address to unsigned int to avoid undesirable
+ * promitions in legacy drivers.
+ */
+static inline unsigned int isa_virt_to_bus(volatile void *address)
+{
+       return (unsigned int)virt_to_phys(address);
+}
+#define isa_page_to_bus(page)  ((unsigned int)page_to_phys(page))
+#define isa_bus_to_virt                phys_to_virt
+
+/*
+ * However PCI ones are not necessarily 1:1 and therefore these interfaces
+ * are forbidden in portable PCI drivers.
+ *
+ * Allow them on x86 for legacy drivers, though.
+ */
+#define virt_to_bus virt_to_phys
+#define bus_to_virt phys_to_virt
+
+/**
+ * ioremap     -   map bus memory into CPU space
+ * @offset:    bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * If the area you are trying to map is a PCI BAR you should have a
+ * look at pci_iomap().
+ */
+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
+                               unsigned long prot_val);
+
+/*
+ * The default ioremap() behavior is non-cached:
+ */
+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
+{
+       return ioremap_nocache(offset, size);
+}
+
+extern void iounmap(volatile void __iomem *addr);
+
+
 #ifdef CONFIG_X86_32
 # include "io_32.h"
 #else
@@ -91,7 +184,7 @@ extern void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
 
 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
                                unsigned long prot_val);
-extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size);
+extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
 
 /*
  * early_ioremap() and early_iounmap() are for temporary early boot-time
@@ -103,7 +196,7 @@ extern void early_ioremap_reset(void);
 extern void __iomem *early_ioremap(unsigned long offset, unsigned long size);
 extern void __iomem *early_memremap(unsigned long offset, unsigned long size);
 extern void early_iounmap(void __iomem *addr, unsigned long size);
-extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
 
+#define IO_SPACE_LIMIT 0xffff
 
 #endif /* _ASM_X86_IO_H */
index d8e242e1b396a0dedeb3810127628d2adbe5b949..a299900f5920898977a4b0935382a898f673082e 100644 (file)
@@ -37,8 +37,6 @@
   *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
   */
 
-#define IO_SPACE_LIMIT 0xffff
-
 #define XQUAD_PORTIO_BASE 0xfe400000
 #define XQUAD_PORTIO_QUAD 0x40000  /* 256k per quad. */
 
  */
 #define xlate_dev_kmem_ptr(p)  p
 
-/**
- *     virt_to_phys    -       map virtual addresses to physical
- *     @address: address to remap
- *
- *     The returned physical address is the physical (CPU) mapping for
- *     the memory address given. It is only valid to use this function on
- *     addresses directly mapped or allocated via kmalloc.
- *
- *     This function does not give bus mappings for DMA transfers. In
- *     almost all conceivable cases a device driver should not be using
- *     this function
- */
-
-static inline unsigned long virt_to_phys(volatile void *address)
-{
-       return __pa(address);
-}
-
-/**
- *     phys_to_virt    -       map physical address to virtual
- *     @address: address to remap
- *
- *     The returned virtual address is a current CPU mapping for
- *     the memory address given. It is only valid to use this function on
- *     addresses that have a kernel mapping
- *
- *     This function does not handle bus mappings for DMA transfers. In
- *     almost all conceivable cases a device driver should not be using
- *     this function
- */
-
-static inline void *phys_to_virt(unsigned long address)
-{
-       return __va(address);
-}
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
-/**
- * ioremap     -   map bus memory into CPU space
- * @offset:    bus address of the memory
- * @size:      size of the resource to map
- *
- * ioremap performs a platform specific sequence of operations to
- * make bus memory CPU accessible via the readb/readw/readl/writeb/
- * writew/writel functions and the other mmio helpers. The returned
- * address is not guaranteed to be usable directly as a virtual
- * address.
- *
- * If the area you are trying to map is a PCI BAR you should have a
- * look at pci_iomap().
- */
-extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
-                               unsigned long prot_val);
-
-/*
- * The default ioremap() behavior is non-cached:
- */
-static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
-{
-       return ioremap_nocache(offset, size);
-}
-
-extern void iounmap(volatile void __iomem *addr);
-
-/*
- * ISA I/O bus memory addresses are 1:1 with the physical address.
- */
-#define isa_virt_to_bus virt_to_phys
-#define isa_page_to_bus page_to_phys
-#define isa_bus_to_virt phys_to_virt
-
-/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them on x86 for legacy drivers, though.
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
 static inline void
 memset_io(volatile void __iomem *addr, unsigned char val, int count)
 {
index 563c16270ba6d3fee1bda648f873109b7c1d2b3f..244067893af42ac6c40a65126896b2604cf5cd7f 100644 (file)
@@ -136,73 +136,12 @@ __OUTS(b)
 __OUTS(w)
 __OUTS(l)
 
-#define IO_SPACE_LIMIT 0xffff
-
 #if defined(__KERNEL__) && defined(__x86_64__)
 
 #include <linux/vmalloc.h>
 
-#ifndef __i386__
-/*
- * Change virtual addresses to physical addresses and vv.
- * These are pretty trivial
- */
-static inline unsigned long virt_to_phys(volatile void *address)
-{
-       return __pa(address);
-}
-
-static inline void *phys_to_virt(unsigned long address)
-{
-       return __va(address);
-}
-#endif
-
-/*
- * Change "struct page" to physical address.
- */
-#define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-
 #include <asm-generic/iomap.h>
 
-/*
- * This one maps high address device memory and turns off caching for that area.
- * it's useful if some control registers are in such an area and write combining
- * or read caching is not desirable:
- */
-extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
-extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
-                               unsigned long prot_val);
-
-/*
- * The default ioremap() behavior is non-cached:
- */
-static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
-{
-       return ioremap_nocache(offset, size);
-}
-
-extern void iounmap(volatile void __iomem *addr);
-
-extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
-
-/*
- * ISA I/O bus memory addresses are 1:1 with the physical address.
- */
-#define isa_virt_to_bus virt_to_phys
-#define isa_page_to_bus page_to_phys
-#define isa_bus_to_virt phys_to_virt
-
-/*
- * However PCI ones are not necessarily 1:1 and therefore these interfaces
- * are forbidden in portable PCI drivers.
- *
- * Allow them on x86 for legacy drivers, though.
- */
-#define virt_to_bus virt_to_phys
-#define bus_to_virt phys_to_virt
-
 void __memcpy_fromio(void *, unsigned long, unsigned);
 void __memcpy_toio(unsigned long, const void *, unsigned);
 
index 7a1f44ac1f17e127e054852d2ccc8baefb27b528..59cb4a1317b774b0f89a87890355bf70157a016b 100644 (file)
@@ -114,38 +114,16 @@ struct IR_IO_APIC_route_entry {
 extern int nr_ioapics;
 extern int nr_ioapic_registers[MAX_IO_APICS];
 
-/*
- * MP-BIOS irq configuration table structures:
- */
-
 #define MP_MAX_IOAPIC_PIN 127
 
-struct mp_config_ioapic {
-       unsigned long mp_apicaddr;
-       unsigned int mp_apicid;
-       unsigned char mp_type;
-       unsigned char mp_apicver;
-       unsigned char mp_flags;
-};
-
-struct mp_config_intsrc {
-       unsigned int mp_dstapic;
-       unsigned char mp_type;
-       unsigned char mp_irqtype;
-       unsigned short mp_irqflag;
-       unsigned char mp_srcbus;
-       unsigned char mp_srcbusirq;
-       unsigned char mp_dstirq;
-};
-
 /* I/O APIC entries */
-extern struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
+extern struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
 
 /* # of MP IRQ source entries */
 extern int mp_irq_entries;
 
 /* MP IRQ source entries */
-extern struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
+extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
 
 /* non-0 if default (table-less) MP configuration */
 extern int mpc_default_type;
@@ -165,15 +143,6 @@ extern int noioapicreroute;
 /* 1 if the timer IRQ uses the '8259A Virtual Wire' mode */
 extern int timer_through_8259;
 
-static inline void disable_ioapic_setup(void)
-{
-#ifdef CONFIG_PCI
-       noioapicquirk = 1;
-       noioapicreroute = -1;
-#endif
-       skip_ioapic_setup = 1;
-}
-
 /*
  * If we use the IO-APIC for IRQ routing, disable automatic
  * assignment of PCI IRQ's.
@@ -200,6 +169,12 @@ extern void reinit_intr_remapped_IO_APIC(int);
 
 extern void probe_nr_irqs_gsi(void);
 
+extern int setup_ioapic_entry(int apic, int irq,
+                             struct IO_APIC_route_entry *entry,
+                             unsigned int destination, int trigger,
+                             int polarity, int vector);
+extern void ioapic_write_entry(int apic, int pin,
+                              struct IO_APIC_route_entry e);
 #else  /* !CONFIG_X86_IO_APIC */
 #define io_apic_assign_pci_irqs 0
 static const int timer_through_8259 = 0;
index c745a306f7d3572be1e5baa29a6cba7eca5b03c7..0b7228268a63e1d9bfacc13aca04c664e8297b1d 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_IPI_H
 #define _ASM_X86_IPI_H
 
+#ifdef CONFIG_X86_LOCAL_APIC
+
 /*
  * Copyright 2004 James Cleverdon, IBM.
  * Subject to the GNU Public License, v.2
@@ -55,8 +57,8 @@ static inline void __xapic_wait_icr_idle(void)
                cpu_relax();
 }
 
-static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
-                                      unsigned int dest)
+static inline void
+__default_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
 {
        /*
         * Subtle. In the case of the 'never do double writes' workaround
@@ -87,8 +89,8 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector,
  * This is used to send an IPI with no shorthand notation (the destination is
  * specified in bits 56 to 63 of the ICR).
  */
-static inline void __send_IPI_dest_field(unsigned int mask, int vector,
                                       unsigned int dest)
+static inline void
__default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
 {
        unsigned long cfg;
 
@@ -117,41 +119,44 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
        native_apic_mem_write(APIC_ICR, cfg);
 }
 
-static inline void send_IPI_mask_sequence(const struct cpumask *mask,
-                                         int vector)
-{
-       unsigned long flags;
-       unsigned long query_cpu;
+extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
+                                                int vector);
+extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
+                                                        int vector);
+extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+                                                        int vector);
+extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+                                                        int vector);
 
-       /*
-        * Hack. The clustered APIC addressing mode doesn't allow us to send
-        * to an arbitrary mask, so I do a unicast to each CPU instead.
-        * - mbligh
-        */
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask) {
-               __send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
-                                     vector, APIC_DEST_PHYSICAL);
-       }
-       local_irq_restore(flags);
+/* Avoid include hell */
+#define NMI_VECTOR 0x02
+
+extern int no_broadcast;
+
+static inline void __default_local_send_IPI_allbutself(int vector)
+{
+       if (no_broadcast || vector == NMI_VECTOR)
+               apic->send_IPI_mask_allbutself(cpu_online_mask, vector);
+       else
+               __default_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, apic->dest_logical);
 }
 
-static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
-                                           int vector)
+static inline void __default_local_send_IPI_all(int vector)
 {
-       unsigned long flags;
-       unsigned int query_cpu;
-       unsigned int this_cpu = smp_processor_id();
-
-       /* See Hack comment above */
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
-               if (query_cpu != this_cpu)
-                       __send_IPI_dest_field(
-                               per_cpu(x86_cpu_to_apicid, query_cpu),
-                               vector, APIC_DEST_PHYSICAL);
-       local_irq_restore(flags);
+       if (no_broadcast || vector == NMI_VECTOR)
+               apic->send_IPI_mask(cpu_online_mask, vector);
+       else
+               __default_send_IPI_shortcut(APIC_DEST_ALLINC, vector, apic->dest_logical);
 }
 
+#ifdef CONFIG_X86_32
+extern void default_send_IPI_mask_logical(const struct cpumask *mask,
+                                                int vector);
+extern void default_send_IPI_allbutself(int vector);
+extern void default_send_IPI_all(int vector);
+extern void default_send_IPI_self(int vector);
+#endif
+
+#endif
+
 #endif /* _ASM_X86_IPI_H */
index 592688ed04d33462d172936d56dff55fc6e52eac..f38481bcd45538f26d640a2c460a3510246510d2 100644 (file)
@@ -36,9 +36,12 @@ static inline int irq_canonicalize(int irq)
 extern void fixup_irqs(void);
 #endif
 
-extern unsigned int do_IRQ(struct pt_regs *regs);
+extern void (*generic_interrupt_extension)(void);
 extern void init_IRQ(void);
 extern void native_init_IRQ(void);
+extern bool handle_irq(unsigned irq, struct pt_regs *regs);
+
+extern unsigned int do_IRQ(struct pt_regs *regs);
 
 /* Interrupt vector management */
 extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
index 89c898ab298b398ee204c7c64122e3922841a133..77843225b7eac71e06331383106d7d962f332043 100644 (file)
@@ -1,5 +1,31 @@
-#ifdef CONFIG_X86_32
-# include "irq_regs_32.h"
-#else
-# include "irq_regs_64.h"
-#endif
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack, stored in the per-cpu area.
+ *
+ * Jeremy Fitzhardinge <jeremy@goop.org>
+ */
+#ifndef _ASM_X86_IRQ_REGS_H
+#define _ASM_X86_IRQ_REGS_H
+
+#include <asm/percpu.h>
+
+#define ARCH_HAS_OWN_IRQ_REGS
+
+DECLARE_PER_CPU(struct pt_regs *, irq_regs);
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+       return percpu_read(irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+       struct pt_regs *old_regs;
+
+       old_regs = get_irq_regs();
+       percpu_write(irq_regs, new_regs);
+
+       return old_regs;
+}
+
+#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_32.h b/arch/x86/include/asm/irq_regs_32.h
deleted file mode 100644 (file)
index 86afd74..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Per-cpu current frame pointer - the location of the last exception frame on
- * the stack, stored in the per-cpu area.
- *
- * Jeremy Fitzhardinge <jeremy@goop.org>
- */
-#ifndef _ASM_X86_IRQ_REGS_32_H
-#define _ASM_X86_IRQ_REGS_32_H
-
-#include <asm/percpu.h>
-
-#define ARCH_HAS_OWN_IRQ_REGS
-
-DECLARE_PER_CPU(struct pt_regs *, irq_regs);
-
-static inline struct pt_regs *get_irq_regs(void)
-{
-       return x86_read_percpu(irq_regs);
-}
-
-static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
-{
-       struct pt_regs *old_regs;
-
-       old_regs = get_irq_regs();
-       x86_write_percpu(irq_regs, new_regs);
-
-       return old_regs;
-}
-
-#endif /* _ASM_X86_IRQ_REGS_32_H */
diff --git a/arch/x86/include/asm/irq_regs_64.h b/arch/x86/include/asm/irq_regs_64.h
deleted file mode 100644 (file)
index 3dd9c0b..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/irq_regs.h>
index f7ff65032b9d66aee077fcc3ddbcddd40bd4f75d..3cbd79bbb47c82613341fca01ee6e174319342fe 100644 (file)
@@ -1,47 +1,69 @@
 #ifndef _ASM_X86_IRQ_VECTORS_H
 #define _ASM_X86_IRQ_VECTORS_H
 
-#include <linux/threads.h>
+/*
+ * Linux IRQ vector layout.
+ *
+ * There are 256 IDT entries (per CPU - each entry is 8 bytes) which can
+ * be defined by Linux. They are used as a jump table by the CPU when a
+ * given vector is triggered - by a CPU-external, CPU-internal or
+ * software-triggered event.
+ *
+ * Linux sets the kernel code address each entry jumps to early during
+ * bootup, and never changes them. This is the general layout of the
+ * IDT entries:
+ *
+ *  Vectors   0 ...  31 : system traps and exceptions - hardcoded events
+ *  Vectors  32 ... 127 : device interrupts
+ *  Vector  128         : legacy int80 syscall interface
+ *  Vectors 129 ... 237 : device interrupts
+ *  Vectors 238 ... 255 : special interrupts
+ *
+ * 64-bit x86 has per CPU IDT tables, 32-bit has one shared IDT table.
+ *
+ * This file enumerates the exact layout of them:
+ */
 
-#define NMI_VECTOR             0x02
+#define NMI_VECTOR                     0x02
 
 /*
  * IDT vectors usable for external interrupt sources start
  * at 0x20:
  */
-#define FIRST_EXTERNAL_VECTOR  0x20
+#define FIRST_EXTERNAL_VECTOR          0x20
 
 #ifdef CONFIG_X86_32
-# define SYSCALL_VECTOR                0x80
+# define SYSCALL_VECTOR                        0x80
 #else
-# define IA32_SYSCALL_VECTOR   0x80
+# define IA32_SYSCALL_VECTOR           0x80
 #endif
 
 /*
  * Reserve the lowest usable priority level 0x20 - 0x2f for triggering
  * cleanup after irq migration.
  */
-#define IRQ_MOVE_CLEANUP_VECTOR        FIRST_EXTERNAL_VECTOR
+#define IRQ_MOVE_CLEANUP_VECTOR                FIRST_EXTERNAL_VECTOR
 
 /*
  * Vectors 0x30-0x3f are used for ISA interrupts.
  */
-#define IRQ0_VECTOR            (FIRST_EXTERNAL_VECTOR + 0x10)
-#define IRQ1_VECTOR            (IRQ0_VECTOR + 1)
-#define IRQ2_VECTOR            (IRQ0_VECTOR + 2)
-#define IRQ3_VECTOR            (IRQ0_VECTOR + 3)
-#define IRQ4_VECTOR            (IRQ0_VECTOR + 4)
-#define IRQ5_VECTOR            (IRQ0_VECTOR + 5)
-#define IRQ6_VECTOR            (IRQ0_VECTOR + 6)
-#define IRQ7_VECTOR            (IRQ0_VECTOR + 7)
-#define IRQ8_VECTOR            (IRQ0_VECTOR + 8)
-#define IRQ9_VECTOR            (IRQ0_VECTOR + 9)
-#define IRQ10_VECTOR           (IRQ0_VECTOR + 10)
-#define IRQ11_VECTOR           (IRQ0_VECTOR + 11)
-#define IRQ12_VECTOR           (IRQ0_VECTOR + 12)
-#define IRQ13_VECTOR           (IRQ0_VECTOR + 13)
-#define IRQ14_VECTOR           (IRQ0_VECTOR + 14)
-#define IRQ15_VECTOR           (IRQ0_VECTOR + 15)
+#define IRQ0_VECTOR                    (FIRST_EXTERNAL_VECTOR + 0x10)
+
+#define IRQ1_VECTOR                    (IRQ0_VECTOR +  1)
+#define IRQ2_VECTOR                    (IRQ0_VECTOR +  2)
+#define IRQ3_VECTOR                    (IRQ0_VECTOR +  3)
+#define IRQ4_VECTOR                    (IRQ0_VECTOR +  4)
+#define IRQ5_VECTOR                    (IRQ0_VECTOR +  5)
+#define IRQ6_VECTOR                    (IRQ0_VECTOR +  6)
+#define IRQ7_VECTOR                    (IRQ0_VECTOR +  7)
+#define IRQ8_VECTOR                    (IRQ0_VECTOR +  8)
+#define IRQ9_VECTOR                    (IRQ0_VECTOR +  9)
+#define IRQ10_VECTOR                   (IRQ0_VECTOR + 10)
+#define IRQ11_VECTOR                   (IRQ0_VECTOR + 11)
+#define IRQ12_VECTOR                   (IRQ0_VECTOR + 12)
+#define IRQ13_VECTOR                   (IRQ0_VECTOR + 13)
+#define IRQ14_VECTOR                   (IRQ0_VECTOR + 14)
+#define IRQ15_VECTOR                   (IRQ0_VECTOR + 15)
 
 /*
  * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
  *  some of the following vectors are 'rare', they are merged
  *  into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
  *  TLB, reschedule and local APIC vectors are performance-critical.
- *
- *  Vectors 0xf0-0xfa are free (reserved for future Linux use).
  */
-#ifdef CONFIG_X86_32
-
-# define SPURIOUS_APIC_VECTOR          0xff
-# define ERROR_APIC_VECTOR             0xfe
-# define INVALIDATE_TLB_VECTOR         0xfd
-# define RESCHEDULE_VECTOR             0xfc
-# define CALL_FUNCTION_VECTOR          0xfb
-# define CALL_FUNCTION_SINGLE_VECTOR   0xfa
-# define THERMAL_APIC_VECTOR           0xf0
-
-#else
 
 #define SPURIOUS_APIC_VECTOR           0xff
+/*
+ * Sanity check
+ */
+#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
+# error SPURIOUS_APIC_VECTOR definition error
+#endif
+
 #define ERROR_APIC_VECTOR              0xfe
 #define RESCHEDULE_VECTOR              0xfd
 #define CALL_FUNCTION_VECTOR           0xfc
 #define CALL_FUNCTION_SINGLE_VECTOR    0xfb
 #define THERMAL_APIC_VECTOR            0xfa
-#define THRESHOLD_APIC_VECTOR          0xf9
-#define UV_BAU_MESSAGE                 0xf8
-#define INVALIDATE_TLB_VECTOR_END      0xf7
-#define INVALIDATE_TLB_VECTOR_START    0xf0    /* f0-f7 used for TLB flush */
-
-#define NUM_INVALIDATE_TLB_VECTORS     8
 
+#ifdef CONFIG_X86_32
+/* 0xf8 - 0xf9 : free */
+#else
+# define THRESHOLD_APIC_VECTOR         0xf9
+# define UV_BAU_MESSAGE                        0xf8
 #endif
 
+/* f0-f7 used for spreading out TLB flushes: */
+#define INVALIDATE_TLB_VECTOR_END      0xf7
+#define INVALIDATE_TLB_VECTOR_START    0xf0
+#define NUM_INVALIDATE_TLB_VECTORS        8
+
 /*
  * Local APIC timer IRQ vector is on a different priority level,
  * to work around the 'lost local interrupt if more than 2 IRQ
  * sources per level' errata.
  */
-#define LOCAL_TIMER_VECTOR     0xef
+#define LOCAL_TIMER_VECTOR             0xef
+
+/*
+ * Performance monitoring interrupt vector:
+ */
+#define LOCAL_PERF_VECTOR              0xee
+
+/*
+ * Generic system vector for platform specific use
+ */
+#define GENERIC_INTERRUPT_VECTOR       0xed
 
 /*
  * First APIC vector available to drivers: (vectors 0x30-0xee) we
  * start at 0x31(0x41) to spread out vectors evenly between priority
  * levels. (0x80 is the syscall vector)
  */
-#define FIRST_DEVICE_VECTOR    (IRQ15_VECTOR + 2)
+#define FIRST_DEVICE_VECTOR            (IRQ15_VECTOR + 2)
 
-#define NR_VECTORS             256
+#define NR_VECTORS                      256
 
-#define FPU_IRQ                        13
+#define FPU_IRQ                                  13
 
-#define        FIRST_VM86_IRQ          3
-#define LAST_VM86_IRQ          15
-#define invalid_vm86_irq(irq)  ((irq) < 3 || (irq) > 15)
+#define        FIRST_VM86_IRQ                     3
+#define LAST_VM86_IRQ                    15
 
-#define NR_IRQS_LEGACY         16
-
-#if defined(CONFIG_X86_IO_APIC) && !defined(CONFIG_X86_VOYAGER)
-
-#ifndef CONFIG_SPARSE_IRQ
-# if NR_CPUS < MAX_IO_APICS
-#  define NR_IRQS (NR_VECTORS + (32 * NR_CPUS))
-# else
-#  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
-# endif
-#else
-# if (8 * NR_CPUS) > (32 * MAX_IO_APICS)
-#  define NR_IRQS (NR_VECTORS + (8 * NR_CPUS))
-# else
-#  define NR_IRQS (NR_VECTORS + (32 * MAX_IO_APICS))
-# endif
+#ifndef __ASSEMBLY__
+static inline int invalid_vm86_irq(int irq)
+{
+       return irq < FIRST_VM86_IRQ || irq > LAST_VM86_IRQ;
+}
 #endif
 
-#elif defined(CONFIG_X86_VOYAGER)
-
-# define NR_IRQS               224
+/*
+ * Size the maximum number of interrupts.
+ *
+ * If the irq_desc[] array has a sparse layout, we can size things
+ * generously - it scales up linearly with the maximum number of CPUs,
+ * and the maximum number of IO-APICs, whichever is higher.
+ *
+ * In other cases we size more conservatively, to not create too large
+ * static arrays.
+ */
 
-#else /* IO_APIC || VOYAGER */
+#define NR_IRQS_LEGACY                   16
 
-# define NR_IRQS               16
+#define CPU_VECTOR_LIMIT               (  8 * NR_CPUS      )
+#define IO_APIC_VECTOR_LIMIT           ( 32 * MAX_IO_APICS )
 
+#ifdef CONFIG_X86_IO_APIC
+# ifdef CONFIG_SPARSE_IRQ
+#  define NR_IRQS                                      \
+       (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ?      \
+               (NR_VECTORS + CPU_VECTOR_LIMIT)  :      \
+               (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
+# else
+#  if NR_CPUS < MAX_IO_APICS
+#   define NR_IRQS                     (NR_VECTORS + 4*CPU_VECTOR_LIMIT)
+#  else
+#   define NR_IRQS                     (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
+#  endif
+# endif
+#else /* !CONFIG_X86_IO_APIC: */
+# define NR_IRQS                       NR_IRQS_LEGACY
 #endif
 
-/* Voyager specific defines */
-/* These define the CPIs we use in linux */
-#define VIC_CPI_LEVEL0                 0
-#define VIC_CPI_LEVEL1                 1
-/* now the fake CPIs */
-#define VIC_TIMER_CPI                  2
-#define VIC_INVALIDATE_CPI             3
-#define VIC_RESCHEDULE_CPI             4
-#define VIC_ENABLE_IRQ_CPI             5
-#define VIC_CALL_FUNCTION_CPI          6
-#define VIC_CALL_FUNCTION_SINGLE_CPI   7
-
-/* Now the QIC CPIs:  Since we don't need the two initial levels,
- * these are 2 less than the VIC CPIs */
-#define QIC_CPI_OFFSET                 1
-#define QIC_TIMER_CPI                  (VIC_TIMER_CPI - QIC_CPI_OFFSET)
-#define QIC_INVALIDATE_CPI             (VIC_INVALIDATE_CPI - QIC_CPI_OFFSET)
-#define QIC_RESCHEDULE_CPI             (VIC_RESCHEDULE_CPI - QIC_CPI_OFFSET)
-#define QIC_ENABLE_IRQ_CPI             (VIC_ENABLE_IRQ_CPI - QIC_CPI_OFFSET)
-#define QIC_CALL_FUNCTION_CPI          (VIC_CALL_FUNCTION_CPI - QIC_CPI_OFFSET)
-#define QIC_CALL_FUNCTION_SINGLE_CPI   (VIC_CALL_FUNCTION_SINGLE_CPI - QIC_CPI_OFFSET)
-
-#define VIC_START_FAKE_CPI             VIC_TIMER_CPI
-#define VIC_END_FAKE_CPI               VIC_CALL_FUNCTION_SINGLE_CPI
-
-/* this is the SYS_INT CPI. */
-#define VIC_SYS_INT                    8
-#define VIC_CMN_INT                    15
-
-/* This is the boot CPI for alternate processors.  It gets overwritten
- * by the above once the system has activated all available processors */
-#define VIC_CPU_BOOT_CPI               VIC_CPI_LEVEL0
-#define VIC_CPU_BOOT_ERRATA_CPI                (VIC_CPI_LEVEL0 + 8)
-
-
 #endif /* _ASM_X86_IRQ_VECTORS_H */
index c61d8b2ab8b9d8ac00e8c514ee26028fe601eb86..317ff1703d0b0a5e7eaf17b94fd93640b8e62997 100644 (file)
 #else
 # define PA_CONTROL_PAGE       0
 # define VA_CONTROL_PAGE       1
-# define PA_PGD                        2
-# define VA_PGD                        3
-# define PA_PUD_0              4
-# define VA_PUD_0              5
-# define PA_PMD_0              6
-# define VA_PMD_0              7
-# define PA_PTE_0              8
-# define VA_PTE_0              9
-# define PA_PUD_1              10
-# define VA_PUD_1              11
-# define PA_PMD_1              12
-# define VA_PMD_1              13
-# define PA_PTE_1              14
-# define VA_PTE_1              15
-# define PA_TABLE_PAGE         16
-# define PAGES_NR              17
+# define PA_TABLE_PAGE         2
+# define PA_SWAP_PAGE          3
+# define PAGES_NR              4
 #endif
 
-#ifdef CONFIG_X86_32
 # define KEXEC_CONTROL_CODE_MAX_SIZE   2048
-#endif
 
 #ifndef __ASSEMBLY__
 
@@ -151,15 +136,16 @@ relocate_kernel(unsigned long indirection_page,
                unsigned int has_pae,
                unsigned int preserve_context);
 #else
-NORET_TYPE void
+unsigned long
 relocate_kernel(unsigned long indirection_page,
                unsigned long page_list,
-               unsigned long start_address) ATTRIB_NORET;
+               unsigned long start_address,
+               unsigned int preserve_context);
 #endif
 
-#ifdef CONFIG_X86_32
 #define ARCH_HAS_KIMAGE_ARCH
 
+#ifdef CONFIG_X86_32
 struct kimage_arch {
        pgd_t *pgd;
 #ifdef CONFIG_X86_PAE
@@ -169,6 +155,12 @@ struct kimage_arch {
        pte_t *pte0;
        pte_t *pte1;
 };
+#else
+struct kimage_arch {
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+};
 #endif
 
 #endif /* __ASSEMBLY__ */
index 5d98d0b68ffc8e08e07cf829ed15103f6fc09796..12d55e773eb605ff85c67c8cbf8d453697e01871 100644 (file)
@@ -1,14 +1,11 @@
 #ifndef _ASM_X86_LINKAGE_H
 #define _ASM_X86_LINKAGE_H
 
+#include <linux/stringify.h>
+
 #undef notrace
 #define notrace __attribute__((no_instrument_function))
 
-#ifdef CONFIG_X86_64
-#define __ALIGN .p2align 4,,15
-#define __ALIGN_STR ".p2align 4,,15"
-#endif
-
 #ifdef CONFIG_X86_32
 #define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
 /*
        __asmlinkage_protect_n(ret, "g" (arg1), "g" (arg2), "g" (arg3), \
                              "g" (arg4), "g" (arg5), "g" (arg6))
 
-#endif
-
-#ifdef CONFIG_X86_ALIGNMENT_16
-#define __ALIGN .align 16,0x90
-#define __ALIGN_STR ".align 16,0x90"
-#endif
-
-/*
- * to check ENTRY_X86/END_X86 and
- * KPROBE_ENTRY_X86/KPROBE_END_X86
- * unbalanced-missed-mixed appearance
- */
-#define __set_entry_x86                .set ENTRY_X86_IN, 0
-#define __unset_entry_x86      .set ENTRY_X86_IN, 1
-#define __set_kprobe_x86       .set KPROBE_X86_IN, 0
-#define __unset_kprobe_x86     .set KPROBE_X86_IN, 1
-
-#define __macro_err_x86 .error "ENTRY_X86/KPROBE_X86 unbalanced,missed,mixed"
-
-#define __check_entry_x86      \
-       .ifdef ENTRY_X86_IN;    \
-       .ifeq ENTRY_X86_IN;     \
-       __macro_err_x86;        \
-       .abort;                 \
-       .endif;                 \
-       .endif
-
-#define __check_kprobe_x86     \
-       .ifdef KPROBE_X86_IN;   \
-       .ifeq KPROBE_X86_IN;    \
-       __macro_err_x86;        \
-       .abort;                 \
-       .endif;                 \
-       .endif
+#endif /* CONFIG_X86_32 */
 
-#define __check_entry_kprobe_x86       \
-       __check_entry_x86;              \
-       __check_kprobe_x86
+#ifdef __ASSEMBLY__
 
-#define ENTRY_KPROBE_FINAL_X86 __check_entry_kprobe_x86
-
-#define ENTRY_X86(name)                        \
-       __check_entry_kprobe_x86;       \
-       __set_entry_x86;                \
-       .globl name;                    \
-       __ALIGN;                        \
+#define GLOBAL(name)   \
+       .globl name;    \
        name:
 
-#define END_X86(name)                  \
-       __unset_entry_x86;              \
-       __check_entry_kprobe_x86;       \
-       .size name, .-name
-
-#define KPROBE_ENTRY_X86(name)         \
-       __check_entry_kprobe_x86;       \
-       __set_kprobe_x86;               \
-       .pushsection .kprobes.text, "ax"; \
-       .globl name;                    \
-       __ALIGN;                        \
-       name:
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16)
+#define __ALIGN                .p2align 4, 0x90
+#define __ALIGN_STR    __stringify(__ALIGN)
+#endif
 
-#define KPROBE_END_X86(name)           \
-       __unset_kprobe_x86;             \
-       __check_entry_kprobe_x86;       \
-       .size name, .-name;             \
-       .popsection
+#endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_X86_LINKAGE_H */
 
diff --git a/arch/x86/include/asm/mach-default/apm.h b/arch/x86/include/asm/mach-default/apm.h
deleted file mode 100644 (file)
index 20370c6..0000000
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- *  Machine specific APM BIOS functions for generic.
- *  Split out from apm.c by Osamu Tomita <tomita@cinet.co.jp>
- */
-
-#ifndef _ASM_X86_MACH_DEFAULT_APM_H
-#define _ASM_X86_MACH_DEFAULT_APM_H
-
-#ifdef APM_ZERO_SEGS
-#      define APM_DO_ZERO_SEGS \
-               "pushl %%ds\n\t" \
-               "pushl %%es\n\t" \
-               "xorl %%edx, %%edx\n\t" \
-               "mov %%dx, %%ds\n\t" \
-               "mov %%dx, %%es\n\t" \
-               "mov %%dx, %%fs\n\t" \
-               "mov %%dx, %%gs\n\t"
-#      define APM_DO_POP_SEGS \
-               "popl %%es\n\t" \
-               "popl %%ds\n\t"
-#else
-#      define APM_DO_ZERO_SEGS
-#      define APM_DO_POP_SEGS
-#endif
-
-static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
-                                       u32 *eax, u32 *ebx, u32 *ecx,
-                                       u32 *edx, u32 *esi)
-{
-       /*
-        * N.B. We do NOT need a cld after the BIOS call
-        * because we always save and restore the flags.
-        */
-       __asm__ __volatile__(APM_DO_ZERO_SEGS
-               "pushl %%edi\n\t"
-               "pushl %%ebp\n\t"
-               "lcall *%%cs:apm_bios_entry\n\t"
-               "setc %%al\n\t"
-               "popl %%ebp\n\t"
-               "popl %%edi\n\t"
-               APM_DO_POP_SEGS
-               : "=a" (*eax), "=b" (*ebx), "=c" (*ecx), "=d" (*edx),
-                 "=S" (*esi)
-               : "a" (func), "b" (ebx_in), "c" (ecx_in)
-               : "memory", "cc");
-}
-
-static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
-                                               u32 ecx_in, u32 *eax)
-{
-       int     cx, dx, si;
-       u8      error;
-
-       /*
-        * N.B. We do NOT need a cld after the BIOS call
-        * because we always save and restore the flags.
-        */
-       __asm__ __volatile__(APM_DO_ZERO_SEGS
-               "pushl %%edi\n\t"
-               "pushl %%ebp\n\t"
-               "lcall *%%cs:apm_bios_entry\n\t"
-               "setc %%bl\n\t"
-               "popl %%ebp\n\t"
-               "popl %%edi\n\t"
-               APM_DO_POP_SEGS
-               : "=a" (*eax), "=b" (error), "=c" (cx), "=d" (dx),
-                 "=S" (si)
-               : "a" (func), "b" (ebx_in), "c" (ecx_in)
-               : "memory", "cc");
-       return error;
-}
-
-#endif /* _ASM_X86_MACH_DEFAULT_APM_H */
diff --git a/arch/x86/include/asm/mach-default/do_timer.h b/arch/x86/include/asm/mach-default/do_timer.h
deleted file mode 100644 (file)
index 23ecda0..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/* defines for inline arch setup functions */
-#include <linux/clockchips.h>
-
-#include <asm/i8259.h>
-#include <asm/i8253.h>
-
-/**
- * do_timer_interrupt_hook - hook into timer tick
- *
- * Call the pit clock event handler. see asm/i8253.h
- **/
-
-static inline void do_timer_interrupt_hook(void)
-{
-       global_clock_event->event_handler(global_clock_event);
-}
diff --git a/arch/x86/include/asm/mach-default/entry_arch.h b/arch/x86/include/asm/mach-default/entry_arch.h
deleted file mode 100644 (file)
index 6b1add8..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * This file is designed to contain the BUILD_INTERRUPT specifications for
- * all of the extra named interrupt vectors used by the architecture.
- * Usually this is the Inter Process Interrupts (IPIs)
- */
-
-/*
- * The following vectors are part of the Linux architecture, there
- * is no hardware IRQ pin equivalent for them, they are triggered
- * through the ICC by us (IPIs)
- */
-#ifdef CONFIG_X86_SMP
-BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
-BUILD_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
-BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
-BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
-BUILD_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
-#endif
-
-/*
- * every pentium local APIC has two 'local interrupts', with a
- * soft-definable vector attached to both interrupts, one of
- * which is a timer interrupt, the other one is error counter
- * overflow. Linux uses the local APIC timer interrupt to get
- * a much simpler SMP time architecture:
- */
-#ifdef CONFIG_X86_LOCAL_APIC
-BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
-BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
-BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
-
-#ifdef CONFIG_X86_MCE_P4THERMAL
-BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
-#endif
-
-#endif
diff --git a/arch/x86/include/asm/mach-default/mach_apic.h b/arch/x86/include/asm/mach-default/mach_apic.h
deleted file mode 100644 (file)
index cc09cbb..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_APIC_H
-#define _ASM_X86_MACH_DEFAULT_MACH_APIC_H
-
-#ifdef CONFIG_X86_LOCAL_APIC
-
-#include <mach_apicdef.h>
-#include <asm/smp.h>
-
-#define APIC_DFR_VALUE (APIC_DFR_FLAT)
-
-static inline const struct cpumask *target_cpus(void)
-{ 
-#ifdef CONFIG_SMP
-       return cpu_online_mask;
-#else
-       return cpumask_of(0);
-#endif
-} 
-
-#define NO_BALANCE_IRQ (0)
-#define esr_disable (0)
-
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#define TARGET_CPUS      (genapic->target_cpus())
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
-#define phys_pkg_id    (genapic->phys_pkg_id)
-#define vector_allocation_domain    (genapic->vector_allocation_domain)
-#define read_apic_id()  (GET_APIC_ID(apic_read(APIC_ID)))
-#define send_IPI_self (genapic->send_IPI_self)
-#define wakeup_secondary_cpu (genapic->wakeup_cpu)
-extern void setup_apic_routing(void);
-#else
-#define INT_DELIVERY_MODE dest_LowestPrio
-#define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
-#define TARGET_CPUS (target_cpus())
-#define wakeup_secondary_cpu wakeup_secondary_cpu_via_init
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
- */
-static inline void init_apic_ldr(void)
-{
-       unsigned long val;
-
-       apic_write(APIC_DFR, APIC_DFR_VALUE);
-       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-       val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
-       apic_write(APIC_LDR, val);
-}
-
-static inline int apic_id_registered(void)
-{
-       return physid_isset(read_apic_id(), phys_cpu_present_map);
-}
-
-static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
-       return cpumask_bits(cpumask)[0];
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                                 const struct cpumask *andmask)
-{
-       unsigned long mask1 = cpumask_bits(cpumask)[0];
-       unsigned long mask2 = cpumask_bits(andmask)[0];
-       unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
-
-       return (unsigned int)(mask1 & mask2 & mask3);
-}
-
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-       return cpuid_apic >> index_msb;
-}
-
-static inline void setup_apic_routing(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
-                                       "Flat", nr_ioapics);
-#endif
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-#ifdef CONFIG_SMP
-       return apicid_2_node[hard_smp_processor_id()];
-#else
-       return 0;
-#endif
-}
-
-static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
-        /* Careful. Some cpus do not strictly honor the set of cpus
-         * specified in the interrupt destination when using lowest
-         * priority interrupt delivery mode.
-         *
-         * In particular there was a hyperthreading cpu observed to
-         * deliver interrupts to the wrong hyperthread when only one
-         * hyperthread was specified in the interrupt desitination.
-         */
-       *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
-}
-#endif
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-       return physid_isset(apicid, bitmap);
-}
-
-static inline unsigned long check_apicid_present(int bit)
-{
-       return physid_isset(bit, phys_cpu_present_map);
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
-       return phys_map;
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
-       return 0;
-}
-
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-       return 1 << cpu;
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-       if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
-               return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
-       else
-               return BAD_APICID;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int phys_apicid)
-{
-       return physid_mask_of_physid(phys_apicid);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
-       return physid_isset(boot_cpu_physical_apicid, phys_cpu_present_map);
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-#endif /* CONFIG_X86_LOCAL_APIC */
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_apicdef.h b/arch/x86/include/asm/mach-default/mach_apicdef.h
deleted file mode 100644 (file)
index 5317993..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
-#define _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H
-
-#include <asm/apic.h>
-
-#ifdef CONFIG_X86_64
-#define        APIC_ID_MASK            (genapic->apic_id_mask)
-#define GET_APIC_ID(x)         (genapic->get_apic_id(x))
-#define        SET_APIC_ID(x)          (genapic->set_apic_id(x))
-#else
-#define                APIC_ID_MASK            (0xF<<24)
-static inline unsigned get_apic_id(unsigned long x) 
-{
-       unsigned int ver = GET_APIC_VERSION(apic_read(APIC_LVR));
-       if (APIC_XAPIC(ver))
-               return (((x)>>24)&0xFF);
-       else
-               return (((x)>>24)&0xF);
-} 
-
-#define                GET_APIC_ID(x)  get_apic_id(x)
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-default/mach_ipi.h b/arch/x86/include/asm/mach-default/mach_ipi.h
deleted file mode 100644 (file)
index 191312d..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_IPI_H
-#define _ASM_X86_MACH_DEFAULT_MACH_IPI_H
-
-/* Avoid include hell */
-#define NMI_VECTOR 0x02
-
-void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-void __send_IPI_shortcut(unsigned int shortcut, int vector);
-
-extern int no_broadcast;
-
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
-#else
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       send_IPI_mask_bitmask(mask, vector);
-}
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-#endif
-
-static inline void __local_send_IPI_allbutself(int vector)
-{
-       if (no_broadcast || vector == NMI_VECTOR)
-               send_IPI_mask_allbutself(cpu_online_mask, vector);
-       else
-               __send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
-}
-
-static inline void __local_send_IPI_all(int vector)
-{
-       if (no_broadcast || vector == NMI_VECTOR)
-               send_IPI_mask(cpu_online_mask, vector);
-       else
-               __send_IPI_shortcut(APIC_DEST_ALLINC, vector);
-}
-
-#ifdef CONFIG_X86_64
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-#else
-static inline void send_IPI_allbutself(int vector)
-{
-       /*
-        * if there are no other CPUs in the system then we get an APIC send 
-        * error if we try to broadcast, thus avoid sending IPIs in this case.
-        */
-       if (!(num_online_cpus() > 1))
-               return;
-
-       __local_send_IPI_allbutself(vector);
-       return;
-}
-
-static inline void send_IPI_all(int vector)
-{
-       __local_send_IPI_all(vector);
-}
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpparse.h b/arch/x86/include/asm/mach-default/mach_mpparse.h
deleted file mode 100644 (file)
index c70a263..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
-#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
-
-static inline int
-mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
-       return 0;
-}
-
-/* Hook from generic ACPI tables.c */
-static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       return 0;
-}
-
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-default/mach_mpspec.h b/arch/x86/include/asm/mach-default/mach_mpspec.h
deleted file mode 100644 (file)
index e85ede6..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
-#define _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H
-
-#define MAX_IRQ_SOURCES 256
-
-#if CONFIG_BASE_SMALL == 0
-#define MAX_MP_BUSSES 256
-#else
-#define MAX_MP_BUSSES 32
-#endif
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-default/mach_timer.h b/arch/x86/include/asm/mach-default/mach_timer.h
deleted file mode 100644 (file)
index 8537285..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- *  Machine specific calibrate_tsc() for generic.
- *  Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
- */
-/* ------ Calibrate the TSC ------- 
- * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
- * Too much 64-bit arithmetic here to do this cleanly in C, and for
- * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
- * output busy loop as low as possible. We avoid reading the CTC registers
- * directly because of the awkward 8-bit access mechanism of the 82C54
- * device.
- */
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_TIMER_H
-#define _ASM_X86_MACH_DEFAULT_MACH_TIMER_H
-
-#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
-#define CALIBRATE_LATCH        \
-       ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
-
-static inline void mach_prepare_counter(void)
-{
-       /* Set the Gate high, disable speaker */
-       outb((inb(0x61) & ~0x02) | 0x01, 0x61);
-
-       /*
-        * Now let's take care of CTC channel 2
-        *
-        * Set the Gate high, program CTC channel 2 for mode 0,
-        * (interrupt on terminal count mode), binary count,
-        * load 5 * LATCH count, (LSB and MSB) to begin countdown.
-        *
-        * Some devices need a delay here.
-        */
-       outb(0xb0, 0x43);                       /* binary, mode 0, LSB/MSB, Ch 2 */
-       outb_p(CALIBRATE_LATCH & 0xff, 0x42);   /* LSB of count */
-       outb_p(CALIBRATE_LATCH >> 8, 0x42);       /* MSB of count */
-}
-
-static inline void mach_countup(unsigned long *count_p)
-{
-       unsigned long count = 0;
-       do {
-               count++;
-       } while ((inb_p(0x61) & 0x20) == 0);
-       *count_p = count;
-}
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_TIMER_H */
diff --git a/arch/x86/include/asm/mach-default/mach_traps.h b/arch/x86/include/asm/mach-default/mach_traps.h
deleted file mode 100644 (file)
index f792060..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- *  Machine specific NMI handling for generic.
- *  Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
- */
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H
-#define _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H
-
-#include <asm/mc146818rtc.h>
-
-static inline unsigned char get_nmi_reason(void)
-{
-       return inb(0x61);
-}
-
-static inline void reassert_nmi(void)
-{
-       int old_reg = -1;
-
-       if (do_i_have_lock_cmos())
-               old_reg = current_lock_cmos_reg();
-       else
-               lock_cmos(0); /* register doesn't matter here */
-       outb(0x8f, 0x70);
-       inb(0x71);              /* dummy */
-       outb(0x0f, 0x70);
-       inb(0x71);              /* dummy */
-       if (old_reg >= 0)
-               outb(old_reg, 0x70);
-       else
-               unlock_cmos();
-}
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H */
diff --git a/arch/x86/include/asm/mach-default/mach_wakecpu.h b/arch/x86/include/asm/mach-default/mach_wakecpu.h
deleted file mode 100644 (file)
index 89897a6..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-#ifndef _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
-#define _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW (0x467)
-#define TRAMPOLINE_PHYS_HIGH (0x469)
-
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
-       while (!atomic_read(deassert))
-               cpu_relax();
-       return;
-}
-
-/* Nothing to do for most platforms, since cleared by the INIT cycle */
-static inline void smp_callin_clear_local_apic(void)
-{
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
-}
-
-#ifdef CONFIG_SMP
-extern void __inquire_remote_apic(int apicid);
-#else /* CONFIG_SMP */
-static inline void __inquire_remote_apic(int apicid)
-{
-}
-#endif /* CONFIG_SMP */
-
-static inline void inquire_remote_apic(int apicid)
-{
-       if (apic_verbosity >= APIC_DEBUG)
-               __inquire_remote_apic(apicid);
-}
-
-#endif /* _ASM_X86_MACH_DEFAULT_MACH_WAKECPU_H */
diff --git a/arch/x86/include/asm/mach-default/pci-functions.h b/arch/x86/include/asm/mach-default/pci-functions.h
deleted file mode 100644 (file)
index ed0bab4..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- *     PCI BIOS function numbering for conventional PCI BIOS 
- *     systems
- */
-
-#define PCIBIOS_PCI_FUNCTION_ID        0xb1XX
-#define PCIBIOS_PCI_BIOS_PRESENT       0xb101
-#define PCIBIOS_FIND_PCI_DEVICE                0xb102
-#define PCIBIOS_FIND_PCI_CLASS_CODE    0xb103
-#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106
-#define PCIBIOS_READ_CONFIG_BYTE       0xb108
-#define PCIBIOS_READ_CONFIG_WORD       0xb109
-#define PCIBIOS_READ_CONFIG_DWORD      0xb10a
-#define PCIBIOS_WRITE_CONFIG_BYTE      0xb10b
-#define PCIBIOS_WRITE_CONFIG_WORD      0xb10c
-#define PCIBIOS_WRITE_CONFIG_DWORD     0xb10d
-#define PCIBIOS_GET_ROUTING_OPTIONS    0xb10e
-#define PCIBIOS_SET_PCI_HW_INT         0xb10f
-
diff --git a/arch/x86/include/asm/mach-default/setup_arch.h b/arch/x86/include/asm/mach-default/setup_arch.h
deleted file mode 100644 (file)
index 3884620..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-/* Hook to call BIOS initialisation function */
-
-/* no action for generic */
diff --git a/arch/x86/include/asm/mach-default/smpboot_hooks.h b/arch/x86/include/asm/mach-default/smpboot_hooks.h
deleted file mode 100644 (file)
index 23bf521..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
- * which needs to alter them. */
-
-static inline void smpboot_clear_io_apic_irqs(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       io_apic_irqs = 0;
-#endif
-}
-
-static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
-{
-       CMOS_WRITE(0xa, 0xf);
-       local_flush_tlb();
-       pr_debug("1.\n");
-       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
-                                                                start_eip >> 4;
-       pr_debug("2.\n");
-       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
-                                                        start_eip & 0xf;
-       pr_debug("3.\n");
-}
-
-static inline void smpboot_restore_warm_reset_vector(void)
-{
-       /*
-        * Install writable page 0 entry to set BIOS data area.
-        */
-       local_flush_tlb();
-
-       /*
-        * Paranoid:  Set warm reset code and vector here back
-        * to default values.
-        */
-       CMOS_WRITE(0, 0xf);
-
-       *((volatile long *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
-}
-
-static inline void __init smpboot_setup_io_apic(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       /*
-        * Here we can be sure that there is an IO-APIC in the system. Let's
-        * go and set it up:
-        */
-       if (!skip_ioapic_setup && nr_ioapics)
-               setup_IO_APIC();
-       else {
-               nr_ioapics = 0;
-               localise_nmi_watchdog();
-       }
-#endif
-}
-
-static inline void smpboot_clear_io_apic(void)
-{
-#ifdef CONFIG_X86_IO_APIC
-       nr_ioapics = 0;
-#endif
-}
diff --git a/arch/x86/include/asm/mach-generic/gpio.h b/arch/x86/include/asm/mach-generic/gpio.h
deleted file mode 100644 (file)
index 995c45e..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_GPIO_H
-#define _ASM_X86_MACH_GENERIC_GPIO_H
-
-int gpio_request(unsigned gpio, const char *label);
-void gpio_free(unsigned gpio);
-int gpio_direction_input(unsigned gpio);
-int gpio_direction_output(unsigned gpio, int value);
-int gpio_get_value(unsigned gpio);
-void gpio_set_value(unsigned gpio, int value);
-int gpio_to_irq(unsigned gpio);
-int irq_to_gpio(unsigned irq);
-
-#include <asm-generic/gpio.h>           /* cansleep wrappers */
-
-#endif /* _ASM_X86_MACH_GENERIC_GPIO_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apic.h b/arch/x86/include/asm/mach-generic/mach_apic.h
deleted file mode 100644 (file)
index 48553e9..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_APIC_H
-#define _ASM_X86_MACH_GENERIC_MACH_APIC_H
-
-#include <asm/genapic.h>
-
-#define esr_disable (genapic->ESR_DISABLE)
-#define NO_BALANCE_IRQ (genapic->no_balance_irq)
-#define INT_DELIVERY_MODE (genapic->int_delivery_mode)
-#define INT_DEST_MODE (genapic->int_dest_mode)
-#undef APIC_DEST_LOGICAL
-#define APIC_DEST_LOGICAL (genapic->apic_destination_logical)
-#define TARGET_CPUS      (genapic->target_cpus())
-#define apic_id_registered (genapic->apic_id_registered)
-#define init_apic_ldr (genapic->init_apic_ldr)
-#define ioapic_phys_id_map (genapic->ioapic_phys_id_map)
-#define setup_apic_routing (genapic->setup_apic_routing)
-#define multi_timer_check (genapic->multi_timer_check)
-#define apicid_to_node (genapic->apicid_to_node)
-#define cpu_to_logical_apicid (genapic->cpu_to_logical_apicid) 
-#define cpu_present_to_apicid (genapic->cpu_present_to_apicid)
-#define apicid_to_cpu_present (genapic->apicid_to_cpu_present)
-#define setup_portio_remap (genapic->setup_portio_remap)
-#define check_apicid_present (genapic->check_apicid_present)
-#define check_phys_apicid_present (genapic->check_phys_apicid_present)
-#define check_apicid_used (genapic->check_apicid_used)
-#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
-#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
-#define vector_allocation_domain (genapic->vector_allocation_domain)
-#define enable_apic_mode (genapic->enable_apic_mode)
-#define phys_pkg_id (genapic->phys_pkg_id)
-#define wakeup_secondary_cpu (genapic->wakeup_cpu)
-
-extern void generic_bigsmp_probe(void);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_apicdef.h b/arch/x86/include/asm/mach-generic/mach_apicdef.h
deleted file mode 100644 (file)
index 68041f3..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
-#define _ASM_X86_MACH_GENERIC_MACH_APICDEF_H
-
-#ifndef APIC_DEFINITION
-#include <asm/genapic.h>
-
-#define GET_APIC_ID (genapic->get_apic_id)
-#define APIC_ID_MASK (genapic->apic_id_mask)
-#endif
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APICDEF_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_ipi.h b/arch/x86/include/asm/mach-generic/mach_ipi.h
deleted file mode 100644 (file)
index ffd637e..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_IPI_H
-#define _ASM_X86_MACH_GENERIC_MACH_IPI_H
-
-#include <asm/genapic.h>
-
-#define send_IPI_mask (genapic->send_IPI_mask)
-#define send_IPI_allbutself (genapic->send_IPI_allbutself)
-#define send_IPI_all (genapic->send_IPI_all)
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_IPI_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpparse.h b/arch/x86/include/asm/mach-generic/mach_mpparse.h
deleted file mode 100644 (file)
index 9444ab8..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
-#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
-
-
-extern int mps_oem_check(struct mpc_table *, char *, char *);
-
-extern int acpi_madt_oem_check(char *, char *);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_mpspec.h b/arch/x86/include/asm/mach-generic/mach_mpspec.h
deleted file mode 100644 (file)
index 3bc4072..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
-#define _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H
-
-#define MAX_IRQ_SOURCES 256
-
-/* Summit or generic (i.e. installer) kernels need lots of bus entries. */
-/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
-#define MAX_MP_BUSSES 260
-
-extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
diff --git a/arch/x86/include/asm/mach-generic/mach_wakecpu.h b/arch/x86/include/asm/mach-generic/mach_wakecpu.h
deleted file mode 100644 (file)
index 1ab16b1..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#ifndef _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
-#define _ASM_X86_MACH_GENERIC_MACH_WAKECPU_H
-
-#define TRAMPOLINE_PHYS_LOW (genapic->trampoline_phys_low)
-#define TRAMPOLINE_PHYS_HIGH (genapic->trampoline_phys_high)
-#define wait_for_init_deassert (genapic->wait_for_init_deassert)
-#define smp_callin_clear_local_apic (genapic->smp_callin_clear_local_apic)
-#define store_NMI_vector (genapic->store_NMI_vector)
-#define restore_NMI_vector (genapic->restore_NMI_vector)
-#define inquire_remote_apic (genapic->inquire_remote_apic)
-
-#endif /* _ASM_X86_MACH_GENERIC_MACH_APIC_H */
diff --git a/arch/x86/include/asm/mach-rdc321x/gpio.h b/arch/x86/include/asm/mach-rdc321x/gpio.h
deleted file mode 100644 (file)
index c210ab5..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-#ifndef _ASM_X86_MACH_RDC321X_GPIO_H
-#define _ASM_X86_MACH_RDC321X_GPIO_H
-
-#include <linux/kernel.h>
-
-extern int rdc_gpio_get_value(unsigned gpio);
-extern void rdc_gpio_set_value(unsigned gpio, int value);
-extern int rdc_gpio_direction_input(unsigned gpio);
-extern int rdc_gpio_direction_output(unsigned gpio, int value);
-extern int rdc_gpio_request(unsigned gpio, const char *label);
-extern void rdc_gpio_free(unsigned gpio);
-extern void __init rdc321x_gpio_setup(void);
-
-/* Wrappers for the arch-neutral GPIO API */
-
-static inline int gpio_request(unsigned gpio, const char *label)
-{
-       return rdc_gpio_request(gpio, label);
-}
-
-static inline void gpio_free(unsigned gpio)
-{
-       might_sleep();
-       rdc_gpio_free(gpio);
-}
-
-static inline int gpio_direction_input(unsigned gpio)
-{
-       return rdc_gpio_direction_input(gpio);
-}
-
-static inline int gpio_direction_output(unsigned gpio, int value)
-{
-       return rdc_gpio_direction_output(gpio, value);
-}
-
-static inline int gpio_get_value(unsigned gpio)
-{
-       return rdc_gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned gpio, int value)
-{
-       rdc_gpio_set_value(gpio, value);
-}
-
-static inline int gpio_to_irq(unsigned gpio)
-{
-       return gpio;
-}
-
-static inline int irq_to_gpio(unsigned irq)
-{
-       return irq;
-}
-
-/* For cansleep */
-#include <asm-generic/gpio.h>
-
-#endif /* _ASM_X86_MACH_RDC321X_GPIO_H */
diff --git a/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h b/arch/x86/include/asm/mach-rdc321x/rdc321x_defs.h
deleted file mode 100644 (file)
index c8e9c8b..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#define PFX    "rdc321x: "
-
-/* General purpose configuration and data registers */
-#define RDC3210_CFGREG_ADDR     0x0CF8
-#define RDC3210_CFGREG_DATA     0x0CFC
-
-#define RDC321X_GPIO_CTRL_REG1 0x48
-#define RDC321X_GPIO_CTRL_REG2 0x84
-#define RDC321X_GPIO_DATA_REG1 0x4c
-#define RDC321X_GPIO_DATA_REG2 0x88
-
-#define RDC321X_MAX_GPIO       58
diff --git a/arch/x86/include/asm/mach-voyager/do_timer.h b/arch/x86/include/asm/mach-voyager/do_timer.h
deleted file mode 100644 (file)
index 9e5a459..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* defines for inline arch setup functions */
-#include <linux/clockchips.h>
-
-#include <asm/voyager.h>
-#include <asm/i8253.h>
-
-/**
- * do_timer_interrupt_hook - hook into timer tick
- *
- * Call the pit clock event handler. see asm/i8253.h
- **/
-static inline void do_timer_interrupt_hook(void)
-{
-       global_clock_event->event_handler(global_clock_event);
-       voyager_timer_interrupt();
-}
-
diff --git a/arch/x86/include/asm/mach-voyager/entry_arch.h b/arch/x86/include/asm/mach-voyager/entry_arch.h
deleted file mode 100644 (file)
index ae52624..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* Copyright (C) 2002
- *
- * Author: James.Bottomley@HansenPartnership.com
- *
- * linux/arch/i386/voyager/entry_arch.h
- *
- * This file builds the VIC and QIC CPI gates
- */
-
-/* initialise the voyager interrupt gates 
- *
- * This uses the macros in irq.h to set up assembly jump gates.  The
- * calls are then redirected to the same routine with smp_ prefixed */
-BUILD_INTERRUPT(vic_sys_interrupt, VIC_SYS_INT)
-BUILD_INTERRUPT(vic_cmn_interrupt, VIC_CMN_INT)
-BUILD_INTERRUPT(vic_cpi_interrupt, VIC_CPI_LEVEL0);
-
-/* do all the QIC interrupts */
-BUILD_INTERRUPT(qic_timer_interrupt, QIC_TIMER_CPI);
-BUILD_INTERRUPT(qic_invalidate_interrupt, QIC_INVALIDATE_CPI);
-BUILD_INTERRUPT(qic_reschedule_interrupt, QIC_RESCHEDULE_CPI);
-BUILD_INTERRUPT(qic_enable_irq_interrupt, QIC_ENABLE_IRQ_CPI);
-BUILD_INTERRUPT(qic_call_function_interrupt, QIC_CALL_FUNCTION_CPI);
-BUILD_INTERRUPT(qic_call_function_single_interrupt, QIC_CALL_FUNCTION_SINGLE_CPI);
diff --git a/arch/x86/include/asm/mach-voyager/setup_arch.h b/arch/x86/include/asm/mach-voyager/setup_arch.h
deleted file mode 100644 (file)
index 71729ca..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-#include <asm/voyager.h>
-#include <asm/setup.h>
-#define VOYAGER_BIOS_INFO ((struct voyager_bios_info *) \
-                       (&boot_params.apm_bios_info))
-
-/* Hook to call BIOS initialisation function */
-
-/* for voyager, pass the voyager BIOS/SUS info area to the detection
- * routines */
-
-#define ARCH_SETUP     voyager_detect(VOYAGER_BIOS_INFO);
-
diff --git a/arch/x86/include/asm/mach_timer.h b/arch/x86/include/asm/mach_timer.h
new file mode 100644 (file)
index 0000000..8537285
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ *  Machine specific calibrate_tsc() for generic.
+ *  Split out from timer_tsc.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+/* ------ Calibrate the TSC ------- 
+ * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
+ * Too much 64-bit arithmetic here to do this cleanly in C, and for
+ * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
+ * output busy loop as low as possible. We avoid reading the CTC registers
+ * directly because of the awkward 8-bit access mechanism of the 82C54
+ * device.
+ */
+#ifndef _ASM_X86_MACH_DEFAULT_MACH_TIMER_H
+#define _ASM_X86_MACH_DEFAULT_MACH_TIMER_H
+
+#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
+#define CALIBRATE_LATCH        \
+       ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)
+
+static inline void mach_prepare_counter(void)
+{
+       /* Set the Gate high, disable speaker */
+       outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+       /*
+        * Now let's take care of CTC channel 2
+        *
+        * Set the Gate high, program CTC channel 2 for mode 0,
+        * (interrupt on terminal count mode), binary count,
+        * load 5 * LATCH count, (LSB and MSB) to begin countdown.
+        *
+        * Some devices need a delay here.
+        */
+       outb(0xb0, 0x43);                       /* binary, mode 0, LSB/MSB, Ch 2 */
+       outb_p(CALIBRATE_LATCH & 0xff, 0x42);   /* LSB of count */
+       outb_p(CALIBRATE_LATCH >> 8, 0x42);       /* MSB of count */
+}
+
+static inline void mach_countup(unsigned long *count_p)
+{
+       unsigned long count = 0;
+       do {
+               count++;
+       } while ((inb_p(0x61) & 0x20) == 0);
+       *count_p = count;
+}
+
+#endif /* _ASM_X86_MACH_DEFAULT_MACH_TIMER_H */
diff --git a/arch/x86/include/asm/mach_traps.h b/arch/x86/include/asm/mach_traps.h
new file mode 100644 (file)
index 0000000..f792060
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ *  Machine specific NMI handling for generic.
+ *  Split out from traps.c by Osamu Tomita <tomita@cinet.co.jp>
+ */
+#ifndef _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H
+#define _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H
+
+#include <asm/mc146818rtc.h>
+
+static inline unsigned char get_nmi_reason(void)
+{
+       return inb(0x61);
+}
+
+static inline void reassert_nmi(void)
+{
+       int old_reg = -1;
+
+       if (do_i_have_lock_cmos())
+               old_reg = current_lock_cmos_reg();
+       else
+               lock_cmos(0); /* register doesn't matter here */
+       outb(0x8f, 0x70);
+       inb(0x71);              /* dummy */
+       outb(0x0f, 0x70);
+       inb(0x71);              /* dummy */
+       if (old_reg >= 0)
+               outb(old_reg, 0x70);
+       else
+               unlock_cmos();
+}
+
+#endif /* _ASM_X86_MACH_DEFAULT_MACH_TRAPS_H */
index 32c6e17b960b7aed994078dc1d5c1fb4e547b2a4..563933e06a35fa48f6c828d3884f9b35886ab4eb 100644 (file)
@@ -11,6 +11,8 @@
  */
 
 #define MCG_CTL_P       (1UL<<8)   /* MCG_CAP register available */
+#define MCG_EXT_P       (1ULL<<9)   /* Extended registers available */
+#define MCG_CMCI_P      (1ULL<<10)  /* CMCI supported */
 
 #define MCG_STATUS_RIPV  (1UL<<0)   /* restart ip valid */
 #define MCG_STATUS_EIPV  (1UL<<1)   /* ip points to correct instruction */
@@ -90,14 +92,29 @@ extern int mce_disabled;
 
 #include <asm/atomic.h>
 
+void mce_setup(struct mce *m);
 void mce_log(struct mce *m);
 DECLARE_PER_CPU(struct sys_device, device_mce);
 extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
 
+/*
+ * To support more than 128 would need to escape the predefined
+ * Linux defined extended banks first.
+ */
+#define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1)
+
 #ifdef CONFIG_X86_MCE_INTEL
 void mce_intel_feature_init(struct cpuinfo_x86 *c);
+void cmci_clear(void);
+void cmci_reenable(void);
+void cmci_rediscover(int dying);
+void cmci_recheck(void);
 #else
 static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
+static inline void cmci_clear(void) {}
+static inline void cmci_reenable(void) {}
+static inline void cmci_rediscover(int dying) {}
+static inline void cmci_recheck(void) {}
 #endif
 
 #ifdef CONFIG_X86_MCE_AMD
@@ -106,11 +123,23 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c);
 static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { }
 #endif
 
-void mce_log_therm_throt_event(unsigned int cpu, __u64 status);
+extern int mce_available(struct cpuinfo_x86 *c);
+
+void mce_log_therm_throt_event(__u64 status);
 
 extern atomic_t mce_entry;
 
 extern void do_machine_check(struct pt_regs *, long);
+
+typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS);
+DECLARE_PER_CPU(mce_banks_t, mce_poll_banks);
+
+enum mcp_flags {
+       MCP_TIMESTAMP = (1 << 0),       /* log time stamp */
+       MCP_UC = (1 << 1),              /* log uncorrected errors */
+};
+extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
+
 extern int mce_notify_user(void);
 
 #endif /* !CONFIG_X86_32 */
@@ -120,8 +149,8 @@ extern void mcheck_init(struct cpuinfo_x86 *c);
 #else
 #define mcheck_init(c) do { } while (0)
 #endif
-extern void stop_mce(void);
-extern void restart_mce(void);
+
+extern void (*mce_threshold_vector)(void);
 
 #endif /* __KERNEL__ */
 #endif /* _ASM_X86_MCE_H */
index 8aeeb3fd73db8a90369b60adb747719cdeb8f58d..f923203dc39a68f04e6c0ea05fe7915c25b0b969 100644 (file)
@@ -21,11 +21,54 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 void destroy_context(struct mm_struct *mm);
 
-#ifdef CONFIG_X86_32
-# include "mmu_context_32.h"
-#else
-# include "mmu_context_64.h"
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+#ifdef CONFIG_SMP
+       if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+               percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
+#endif
+}
+
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+                            struct task_struct *tsk)
+{
+       unsigned cpu = smp_processor_id();
+
+       if (likely(prev != next)) {
+               /* stop flush ipis for the previous mm */
+               cpu_clear(cpu, prev->cpu_vm_mask);
+#ifdef CONFIG_SMP
+               percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               percpu_write(cpu_tlbstate.active_mm, next);
 #endif
+               cpu_set(cpu, next->cpu_vm_mask);
+
+               /* Re-load page tables */
+               load_cr3(next->pgd);
+
+               /*
+                * load the LDT, if the LDT is different:
+                */
+               if (unlikely(prev->context.ldt != next->context.ldt))
+                       load_LDT_nolock(&next->context);
+       }
+#ifdef CONFIG_SMP
+       else {
+               percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
+               BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
+
+               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
+                       /* We were in lazy tlb mode and leave_mm disabled
+                        * tlb flush IPI delivery. We must reload CR3
+                        * to make sure to use no freed page tables.
+                        */
+                       load_cr3(next->pgd);
+                       load_LDT_nolock(&next->context);
+               }
+       }
+#endif
+}
 
 #define activate_mm(prev, next)                        \
 do {                                           \
@@ -33,5 +76,17 @@ do {                                         \
        switch_mm((prev), (next), NULL);        \
 } while (0);
 
+#ifdef CONFIG_X86_32
+#define deactivate_mm(tsk, mm)                 \
+do {                                           \
+       lazy_load_gs(0);                        \
+} while (0)
+#else
+#define deactivate_mm(tsk, mm)                 \
+do {                                           \
+       load_gs_index(0);                       \
+       loadsegment(fs, 0);                     \
+} while (0)
+#endif
 
 #endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/mmu_context_32.h b/arch/x86/include/asm/mmu_context_32.h
deleted file mode 100644 (file)
index 7e98ce1..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-#ifndef _ASM_X86_MMU_CONTEXT_32_H
-#define _ASM_X86_MMU_CONTEXT_32_H
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-#ifdef CONFIG_SMP
-       if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK)
-               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_LAZY);
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev,
-                            struct mm_struct *next,
-                            struct task_struct *tsk)
-{
-       int cpu = smp_processor_id();
-
-       if (likely(prev != next)) {
-               /* stop flush ipis for the previous mm */
-               cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
-               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
-               x86_write_percpu(cpu_tlbstate.active_mm, next);
-#endif
-               cpu_set(cpu, next->cpu_vm_mask);
-
-               /* Re-load page tables */
-               load_cr3(next->pgd);
-
-               /*
-                * load the LDT, if the LDT is different:
-                */
-               if (unlikely(prev->context.ldt != next->context.ldt))
-                       load_LDT_nolock(&next->context);
-       }
-#ifdef CONFIG_SMP
-       else {
-               x86_write_percpu(cpu_tlbstate.state, TLBSTATE_OK);
-               BUG_ON(x86_read_percpu(cpu_tlbstate.active_mm) != next);
-
-               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-                       /* We were in lazy tlb mode and leave_mm disabled
-                        * tlb flush IPI delivery. We must reload %cr3.
-                        */
-                       load_cr3(next->pgd);
-                       load_LDT_nolock(&next->context);
-               }
-       }
-#endif
-}
-
-#define deactivate_mm(tsk, mm)                 \
-       asm("movl %0,%%gs": :"r" (0));
-
-#endif /* _ASM_X86_MMU_CONTEXT_32_H */
diff --git a/arch/x86/include/asm/mmu_context_64.h b/arch/x86/include/asm/mmu_context_64.h
deleted file mode 100644 (file)
index 677d36e..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef _ASM_X86_MMU_CONTEXT_64_H
-#define _ASM_X86_MMU_CONTEXT_64_H
-
-#include <asm/pda.h>
-
-static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
-{
-#ifdef CONFIG_SMP
-       if (read_pda(mmu_state) == TLBSTATE_OK)
-               write_pda(mmu_state, TLBSTATE_LAZY);
-#endif
-}
-
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
-                            struct task_struct *tsk)
-{
-       unsigned cpu = smp_processor_id();
-       if (likely(prev != next)) {
-               /* stop flush ipis for the previous mm */
-               cpu_clear(cpu, prev->cpu_vm_mask);
-#ifdef CONFIG_SMP
-               write_pda(mmu_state, TLBSTATE_OK);
-               write_pda(active_mm, next);
-#endif
-               cpu_set(cpu, next->cpu_vm_mask);
-               load_cr3(next->pgd);
-
-               if (unlikely(next->context.ldt != prev->context.ldt))
-                       load_LDT_nolock(&next->context);
-       }
-#ifdef CONFIG_SMP
-       else {
-               write_pda(mmu_state, TLBSTATE_OK);
-               if (read_pda(active_mm) != next)
-                       BUG();
-               if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
-                       /* We were in lazy tlb mode and leave_mm disabled
-                        * tlb flush IPI delivery. We must reload CR3
-                        * to make sure to use no freed page tables.
-                        */
-                       load_cr3(next->pgd);
-                       load_LDT_nolock(&next->context);
-               }
-       }
-#endif
-}
-
-#define deactivate_mm(tsk, mm)                 \
-do {                                           \
-       load_gs_index(0);                       \
-       asm volatile("movl %0,%%fs"::"r"(0));   \
-} while (0)
-
-#endif /* _ASM_X86_MMU_CONTEXT_64_H */
index 105fb90a063527d761ed47350d553ecb755bd3cb..ede6998bd92c73709dde26fbe07bd04808a355cd 100644 (file)
@@ -91,46 +91,9 @@ static inline int pfn_valid(int pfn)
 #endif /* CONFIG_DISCONTIGMEM */
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
-
-/*
- * Following are macros that are specific to this numa platform.
- */
-#define reserve_bootmem(addr, size, flags) \
-       reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags))
-#define alloc_bootmem(x) \
-       __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_nopanic(x) \
-       __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \
-                               __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low(x) \
-       __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0)
-#define alloc_bootmem_pages(x) \
-       __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_pages_nopanic(x) \
-       __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \
-                               __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages(x) \
-       __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0)
-#define alloc_bootmem_node(pgdat, x)                                   \
-({                                                                     \
-       struct pglist_data  __maybe_unused                      \
-                               *__alloc_bootmem_node__pgdat = (pgdat); \
-       __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES,        \
-                                               __pa(MAX_DMA_ADDRESS)); \
-})
-#define alloc_bootmem_pages_node(pgdat, x)                             \
-({                                                                     \
-       struct pglist_data  __maybe_unused                      \
-                               *__alloc_bootmem_node__pgdat = (pgdat); \
-       __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE,              \
-                                               __pa(MAX_DMA_ADDRESS)); \
-})
-#define alloc_bootmem_low_pages_node(pgdat, x)                         \
-({                                                                     \
-       struct pglist_data  __maybe_unused                      \
-                               *__alloc_bootmem_node__pgdat = (pgdat); \
-       __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0);          \
-})
+/* always use node 0 for bootmem on this numa platform */
+#define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \
+       (NODE_DATA(0)->bdata)
 #endif /* CONFIG_NEED_MULTIPLE_NODES */
 
 #endif /* _ASM_X86_MMZONE_32_H */
index bd22f2a3713fe15bac77d53847f06ac0d1be45e7..642fc7fc8cdc3fe8aedea4ddb60cacabc7925983 100644 (file)
@@ -9,7 +9,18 @@ extern int apic_version[MAX_APICS];
 extern int pic_mode;
 
 #ifdef CONFIG_X86_32
-#include <mach_mpspec.h>
+
+/*
+ * Summit or generic (i.e. installer) kernels need lots of bus entries.
+ * Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets.
+ */
+#if CONFIG_BASE_SMALL == 0
+# define MAX_MP_BUSSES         260
+#else
+# define MAX_MP_BUSSES         32
+#endif
+
+#define MAX_IRQ_SOURCES                256
 
 extern unsigned int def_to_bigsmp;
 extern u8 apicid_2_node[];
@@ -20,15 +31,15 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
 extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
 #endif
 
-#define MAX_APICID 256
+#define MAX_APICID             256
 
-#else
+#else /* CONFIG_X86_64: */
 
-#define MAX_MP_BUSSES 256
+#define MAX_MP_BUSSES          256
 /* Each PCI slot may be a combo card with its own bus.  4 IRQ pins per slot. */
-#define MAX_IRQ_SOURCES (MAX_MP_BUSSES * 4)
+#define MAX_IRQ_SOURCES                (MAX_MP_BUSSES * 4)
 
-#endif
+#endif /* CONFIG_X86_64 */
 
 extern void early_find_smp_config(void);
 extern void early_get_smp_config(void);
@@ -45,11 +56,13 @@ extern int smp_found_config;
 extern int mpc_default_type;
 extern unsigned long mp_lapic_addr;
 
-extern void find_smp_config(void);
 extern void get_smp_config(void);
+
 #ifdef CONFIG_X86_MPPARSE
+extern void find_smp_config(void);
 extern void early_reserve_e820_mpc_new(void);
 #else
+static inline void find_smp_config(void) { }
 static inline void early_reserve_e820_mpc_new(void) { }
 #endif
 
@@ -64,6 +77,8 @@ extern int acpi_probe_gsi(void);
 #ifdef CONFIG_X86_IO_APIC
 extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
                                u32 gsi, int triggering, int polarity);
+extern int mp_find_ioapic(int gsi);
+extern int mp_find_ioapic_pin(int ioapic, int gsi);
 #else
 static inline int
 mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
@@ -148,4 +163,8 @@ static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map)
 
 extern physid_mask_t phys_cpu_present_map;
 
+extern int generic_mps_oem_check(struct mpc_table *, char *, char *);
+
+extern int default_acpi_madt_oem_check(char *, char *);
+
 #endif /* _ASM_X86_MPSPEC_H */
index 59568bc4767f6c488b9ef32c1a4f30d5e74dff8f..4a7f96d7c188edd92387cdec4a3be36e23028f35 100644 (file)
 # endif
 #endif
 
-struct intel_mp_floating {
-       char mpf_signature[4];          /* "_MP_"                       */
-       unsigned int mpf_physptr;       /* Configuration table address  */
-       unsigned char mpf_length;       /* Our length (paragraphs)      */
-       unsigned char mpf_specification;/* Specification version        */
-       unsigned char mpf_checksum;     /* Checksum (makes sum 0)       */
-       unsigned char mpf_feature1;     /* Standard or configuration ?  */
-       unsigned char mpf_feature2;     /* Bit7 set for IMCR|PIC        */
-       unsigned char mpf_feature3;     /* Unused (0)                   */
-       unsigned char mpf_feature4;     /* Unused (0)                   */
-       unsigned char mpf_feature5;     /* Unused (0)                   */
+/* Intel MP Floating Pointer Structure */
+struct mpf_intel {
+       char signature[4];              /* "_MP_"                       */
+       unsigned int physptr;           /* Configuration table address  */
+       unsigned char length;           /* Our length (paragraphs)      */
+       unsigned char specification;    /* Specification version        */
+       unsigned char checksum;         /* Checksum (makes sum 0)       */
+       unsigned char feature1;         /* Standard or configuration ?  */
+       unsigned char feature2;         /* Bit7 set for IMCR|PIC        */
+       unsigned char feature3;         /* Unused (0)                   */
+       unsigned char feature4;         /* Unused (0)                   */
+       unsigned char feature5;         /* Unused (0)                   */
 };
 
 #define MPC_SIGNATURE "PCMP"
index 358acc59ae044d421196c3beb8abcc3c6e58efc0..2dbd2314139e2426c511fe1dd7bc8b3a29392c67 100644 (file)
 #define MSR_IA32_MC0_ADDR              0x00000402
 #define MSR_IA32_MC0_MISC              0x00000403
 
+/* These are consecutive and not in the normal 4er MCE bank block */
+#define MSR_IA32_MC0_CTL2              0x00000280
+#define CMCI_EN                        (1ULL << 30)
+#define CMCI_THRESHOLD_MASK            0xffffULL
+
 #define MSR_P6_PERFCTR0                        0x000000c1
 #define MSR_P6_PERFCTR1                        0x000000c2
 #define MSR_P6_EVNTSEL0                        0x00000186
index e9f5db796244b200249802673bc30022b8d8cc94..a37229011b56bc5c013610e85cb3a597d2b7a8ff 100644 (file)
@@ -4,8 +4,12 @@
 extern int pxm_to_nid(int pxm);
 extern void numa_remove_cpu(int cpu);
 
-#ifdef CONFIG_NUMA
+#ifdef CONFIG_HIGHMEM
 extern void set_highmem_pages_init(void);
+#else
+static inline void set_highmem_pages_init(void)
+{
+}
 #endif
 
 #endif /* _ASM_X86_NUMA_32_H */
index 1e8bd30b4c161137a68fcd242866169ce1048c62..9f0a5f5d29ec6ff29aae7d427484ad9c2416822f 100644 (file)
@@ -31,6 +31,8 @@
 extern int found_numaq;
 extern int get_memcfg_numaq(void);
 
+extern void *xquad_portio;
+
 /*
  * SYS_CFG_DATA_PRIV_ADDR, struct eachquadmem, and struct sys_cfg_data are the
  */
diff --git a/arch/x86/include/asm/numaq/apic.h b/arch/x86/include/asm/numaq/apic.h
deleted file mode 100644 (file)
index bf37bc4..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-#ifndef __ASM_NUMAQ_APIC_H
-#define __ASM_NUMAQ_APIC_H
-
-#include <asm/io.h>
-#include <linux/mmzone.h>
-#include <linux/nodemask.h>
-
-#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
-
-static inline const cpumask_t *target_cpus(void)
-{
-       return &CPU_MASK_ALL;
-}
-
-#define NO_BALANCE_IRQ (1)
-#define esr_disable (1)
-
-#define INT_DELIVERY_MODE dest_LowestPrio
-#define INT_DEST_MODE 0     /* physical delivery on LOCAL quad */
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-       return physid_isset(apicid, bitmap);
-}
-static inline unsigned long check_apicid_present(int bit)
-{
-       return physid_isset(bit, phys_cpu_present_map);
-}
-#define apicid_cluster(apicid) (apicid & 0xF0)
-
-static inline int apic_id_registered(void)
-{
-       return 1;
-}
-
-static inline void init_apic_ldr(void)
-{
-       /* Already done in NUMA-Q firmware */
-}
-
-static inline void setup_apic_routing(void)
-{
-       printk("Enabling APIC mode:  %s.  Using %d I/O APICs\n",
-               "NUMA-Q", nr_ioapics);
-}
-
-/*
- * Skip adding the timer int on secondary nodes, which causes
- * a small but painful rift in the time-space continuum.
- */
-static inline int multi_timer_check(int apic, int irq)
-{
-       return apic != 0 && irq == 0;
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
-{
-       /* We don't have a good way to do this yet - hack */
-       return physids_promote(0xFUL);
-}
-
-/* Mapping from cpu number to logical apicid */
-extern u8 cpu_2_logical_apicid[];
-static inline int cpu_to_logical_apicid(int cpu)
-{
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return (int)cpu_2_logical_apicid[cpu];
-}
-
-/*
- * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
- * cpu to APIC ID relation to properly interact with the intelligent
- * mode of the cluster controller.
- */
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-       if (mps_cpu < 60)
-               return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
-       else
-               return BAD_APICID;
-}
-
-static inline int apicid_to_node(int logical_apicid) 
-{
-       return logical_apicid >> 4;
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int logical_apicid)
-{
-       int node = apicid_to_node(logical_apicid);
-       int cpu = __ffs(logical_apicid & 0xf);
-
-       return physid_mask_of_physid(cpu + 4*node);
-}
-
-extern void *xquad_portio;
-
-static inline void setup_portio_remap(void)
-{
-       int num_quads = num_online_nodes();
-
-       if (num_quads <= 1)
-                       return;
-
-       printk("Remapping cross-quad port I/O for %d quads\n", num_quads);
-       xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
-       printk("xquad_portio vaddr 0x%08lx, len %08lx\n",
-               (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
-       return (1);
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-/*
- * We use physical apicids here, not logical, so just return the default
- * physical broadcast to stop people from breaking us
- */
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
-       return (int) 0xF;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                                 const struct cpumask *andmask)
-{
-       return (int) 0xF;
-}
-
-/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-       return cpuid_apic >> index_msb;
-}
-
-#endif /* __ASM_NUMAQ_APIC_H */
diff --git a/arch/x86/include/asm/numaq/apicdef.h b/arch/x86/include/asm/numaq/apicdef.h
deleted file mode 100644 (file)
index e012a46..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef __ASM_NUMAQ_APICDEF_H
-#define __ASM_NUMAQ_APICDEF_H
-
-
-#define APIC_ID_MASK (0xF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
-               return (((x)>>24)&0x0F);
-}
-
-#define         GET_APIC_ID(x)  get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/numaq/ipi.h b/arch/x86/include/asm/numaq/ipi.h
deleted file mode 100644 (file)
index a8374c6..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef __ASM_NUMAQ_IPI_H
-#define __ASM_NUMAQ_IPI_H
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
-
-static inline void send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
-       send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
-       send_IPI_mask(cpu_online_mask, vector);
-}
-
-#endif /* __ASM_NUMAQ_IPI_H */
diff --git a/arch/x86/include/asm/numaq/mpparse.h b/arch/x86/include/asm/numaq/mpparse.h
deleted file mode 100644 (file)
index a2eeefc..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_NUMAQ_MPPARSE_H
-#define __ASM_NUMAQ_MPPARSE_H
-
-extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
-
-#endif /* __ASM_NUMAQ_MPPARSE_H */
diff --git a/arch/x86/include/asm/numaq/wakecpu.h b/arch/x86/include/asm/numaq/wakecpu.h
deleted file mode 100644 (file)
index 6f499df..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-#ifndef __ASM_NUMAQ_WAKECPU_H
-#define __ASM_NUMAQ_WAKECPU_H
-
-/* This file copes with machines that wakeup secondary CPUs by NMIs */
-
-#define TRAMPOLINE_PHYS_LOW (0x8)
-#define TRAMPOLINE_PHYS_HIGH (0xa)
-
-/* We don't do anything here because we use NMI's to boot instead */
-static inline void wait_for_init_deassert(atomic_t *deassert)
-{
-}
-
-/*
- * Because we use NMIs rather than the INIT-STARTUP sequence to
- * bootstrap the CPUs, the APIC may be in a weird state. Kick it.
- */
-static inline void smp_callin_clear_local_apic(void)
-{
-       clear_local_APIC();
-}
-
-static inline void store_NMI_vector(unsigned short *high, unsigned short *low)
-{
-       printk("Storing NMI vector\n");
-       *high =
-         *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH));
-       *low =
-         *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW));
-}
-
-static inline void restore_NMI_vector(unsigned short *high, unsigned short *low)
-{
-       printk("Restoring NMI vector\n");
-       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
-                                                                *high;
-       *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
-                                                                *low;
-}
-
-static inline void inquire_remote_apic(int apicid)
-{
-}
-
-#endif /* __ASM_NUMAQ_WAKECPU_H */
index 776579119a009feb1b2854a9ea18a68183d63243..89ed9d70b0aa6b65158b22b536677429281ea46b 100644 (file)
@@ -1,42 +1,11 @@
 #ifndef _ASM_X86_PAGE_H
 #define _ASM_X86_PAGE_H
 
-#include <linux/const.h>
-
-/* PAGE_SHIFT determines the page size */
-#define PAGE_SHIFT     12
-#define PAGE_SIZE      (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK      (~(PAGE_SIZE-1))
+#include <linux/types.h>
 
 #ifdef __KERNEL__
 
-#define __PHYSICAL_MASK                ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
-#define __VIRTUAL_MASK         ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
-
-/* Cast PAGE_MASK to a signed type so that it is sign-extended if
-   virtual addresses are 32-bits but physical addresses are larger
-   (ie, 32-bit PAE). */
-#define PHYSICAL_PAGE_MASK     (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
-
-/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
-#define PTE_PFN_MASK           ((pteval_t)PHYSICAL_PAGE_MASK)
-
-/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
-#define PTE_FLAGS_MASK         (~PTE_PFN_MASK)
-
-#define PMD_PAGE_SIZE          (_AC(1, UL) << PMD_SHIFT)
-#define PMD_PAGE_MASK          (~(PMD_PAGE_SIZE-1))
-
-#define HPAGE_SHIFT            PMD_SHIFT
-#define HPAGE_SIZE             (_AC(1,UL) << HPAGE_SHIFT)
-#define HPAGE_MASK             (~(HPAGE_SIZE - 1))
-#define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
-
-#define HUGE_MAX_HSTATE 2
-
-#ifndef __ASSEMBLY__
-#include <linux/types.h>
-#endif
+#include <asm/page_types.h>
 
 #ifdef CONFIG_X86_64
 #include <asm/page_64.h>
 #include <asm/page_32.h>
 #endif /* CONFIG_X86_64 */
 
-#define PAGE_OFFSET            ((unsigned long)__PAGE_OFFSET)
-
-#define VM_DATA_DEFAULT_FLAGS \
-       (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
-        VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
-
 #ifndef __ASSEMBLY__
 
-typedef struct { pgdval_t pgd; } pgd_t;
-typedef struct { pgprotval_t pgprot; } pgprot_t;
-
-extern int page_is_ram(unsigned long pagenr);
-extern int devmem_is_allowed(unsigned long pagenr);
-extern void map_devmem(unsigned long pfn, unsigned long size,
-                      pgprot_t vma_prot);
-extern void unmap_devmem(unsigned long pfn, unsigned long size,
-                        pgprot_t vma_prot);
-
-extern unsigned long max_low_pfn_mapped;
-extern unsigned long max_pfn_mapped;
-
 struct page;
 
 static inline void clear_user_page(void *page, unsigned long vaddr,
-                               struct page *pg)
+                                  struct page *pg)
 {
        clear_page(page);
 }
 
 static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
-                               struct page *topage)
+                                 struct page *topage)
 {
        copy_page(to, from);
 }
@@ -84,99 +33,6 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
        alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 
-static inline pgd_t native_make_pgd(pgdval_t val)
-{
-       return (pgd_t) { val };
-}
-
-static inline pgdval_t native_pgd_val(pgd_t pgd)
-{
-       return pgd.pgd;
-}
-
-#if PAGETABLE_LEVELS >= 3
-#if PAGETABLE_LEVELS == 4
-typedef struct { pudval_t pud; } pud_t;
-
-static inline pud_t native_make_pud(pmdval_t val)
-{
-       return (pud_t) { val };
-}
-
-static inline pudval_t native_pud_val(pud_t pud)
-{
-       return pud.pud;
-}
-#else  /* PAGETABLE_LEVELS == 3 */
-#include <asm-generic/pgtable-nopud.h>
-
-static inline pudval_t native_pud_val(pud_t pud)
-{
-       return native_pgd_val(pud.pgd);
-}
-#endif /* PAGETABLE_LEVELS == 4 */
-
-typedef struct { pmdval_t pmd; } pmd_t;
-
-static inline pmd_t native_make_pmd(pmdval_t val)
-{
-       return (pmd_t) { val };
-}
-
-static inline pmdval_t native_pmd_val(pmd_t pmd)
-{
-       return pmd.pmd;
-}
-#else  /* PAGETABLE_LEVELS == 2 */
-#include <asm-generic/pgtable-nopmd.h>
-
-static inline pmdval_t native_pmd_val(pmd_t pmd)
-{
-       return native_pgd_val(pmd.pud.pgd);
-}
-#endif /* PAGETABLE_LEVELS >= 3 */
-
-static inline pte_t native_make_pte(pteval_t val)
-{
-       return (pte_t) { .pte = val };
-}
-
-static inline pteval_t native_pte_val(pte_t pte)
-{
-       return pte.pte;
-}
-
-static inline pteval_t native_pte_flags(pte_t pte)
-{
-       return native_pte_val(pte) & PTE_FLAGS_MASK;
-}
-
-#define pgprot_val(x)  ((x).pgprot)
-#define __pgprot(x)    ((pgprot_t) { (x) } )
-
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else  /* !CONFIG_PARAVIRT */
-
-#define pgd_val(x)     native_pgd_val(x)
-#define __pgd(x)       native_make_pgd(x)
-
-#ifndef __PAGETABLE_PUD_FOLDED
-#define pud_val(x)     native_pud_val(x)
-#define __pud(x)       native_make_pud(x)
-#endif
-
-#ifndef __PAGETABLE_PMD_FOLDED
-#define pmd_val(x)     native_pmd_val(x)
-#define __pmd(x)       native_make_pmd(x)
-#endif
-
-#define pte_val(x)     native_pte_val(x)
-#define pte_flags(x)   native_pte_flags(x)
-#define __pte(x)       native_make_pte(x)
-
-#endif /* CONFIG_PARAVIRT */
-
 #define __pa(x)                __phys_addr((unsigned long)(x))
 #define __pa_nodebug(x)        __phys_addr_nodebug((unsigned long)(x))
 /* __pa_symbol should be used for C visible symbols.
index bcde0d7b432547be4068715919cc58e2ebfd8af9..da4e762406f7da5913080fb5394b9d5b1197965e 100644 (file)
@@ -1,82 +1,14 @@
 #ifndef _ASM_X86_PAGE_32_H
 #define _ASM_X86_PAGE_32_H
 
-/*
- * This handles the memory map.
- *
- * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
- * a virtual address space of one gigabyte, which limits the
- * amount of physical memory you can use to about 950MB.
- *
- * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
- * and CONFIG_HIGHMEM64G options in the kernel configuration.
- */
-#define __PAGE_OFFSET          _AC(CONFIG_PAGE_OFFSET, UL)
-
-#ifdef CONFIG_4KSTACKS
-#define THREAD_ORDER   0
-#else
-#define THREAD_ORDER   1
-#endif
-#define THREAD_SIZE    (PAGE_SIZE << THREAD_ORDER)
-
-#define STACKFAULT_STACK 0
-#define DOUBLEFAULT_STACK 1
-#define NMI_STACK 0
-#define DEBUG_STACK 0
-#define MCE_STACK 0
-#define N_EXCEPTION_STACKS 1
-
-#ifdef CONFIG_X86_PAE
-/* 44=32+12, the limit we can fit into an unsigned long pfn */
-#define __PHYSICAL_MASK_SHIFT  44
-#define __VIRTUAL_MASK_SHIFT   32
-#define PAGETABLE_LEVELS       3
-
-#ifndef __ASSEMBLY__
-typedef u64    pteval_t;
-typedef u64    pmdval_t;
-typedef u64    pudval_t;
-typedef u64    pgdval_t;
-typedef u64    pgprotval_t;
-
-typedef union {
-       struct {
-               unsigned long pte_low, pte_high;
-       };
-       pteval_t pte;
-} pte_t;
-#endif /* __ASSEMBLY__
- */
-#else  /* !CONFIG_X86_PAE */
-#define __PHYSICAL_MASK_SHIFT  32
-#define __VIRTUAL_MASK_SHIFT   32
-#define PAGETABLE_LEVELS       2
-
-#ifndef __ASSEMBLY__
-typedef unsigned long  pteval_t;
-typedef unsigned long  pmdval_t;
-typedef unsigned long  pudval_t;
-typedef unsigned long  pgdval_t;
-typedef unsigned long  pgprotval_t;
-
-typedef union {
-       pteval_t pte;
-       pteval_t pte_low;
-} pte_t;
-
-#endif /* __ASSEMBLY__ */
-#endif /* CONFIG_X86_PAE */
+#include <asm/page_32_types.h>
 
 #ifndef __ASSEMBLY__
-typedef struct page *pgtable_t;
-#endif
 
 #ifdef CONFIG_HUGETLB_PAGE
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 #endif
 
-#ifndef __ASSEMBLY__
 #define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
 #ifdef CONFIG_DEBUG_VIRTUAL
 extern unsigned long __phys_addr(unsigned long);
@@ -89,23 +21,6 @@ extern unsigned long __phys_addr(unsigned long);
 #define pfn_valid(pfn)         ((pfn) < max_mapnr)
 #endif /* CONFIG_FLATMEM */
 
-extern int nx_enabled;
-
-/*
- * This much address space is reserved for vmalloc() and iomap()
- * as well as fixmap mappings.
- */
-extern unsigned int __VMALLOC_RESERVE;
-extern int sysctl_legacy_va_layout;
-
-extern void find_low_pfn_range(void);
-extern unsigned long init_memory_mapping(unsigned long start,
-                                        unsigned long end);
-extern void initmem_init(unsigned long, unsigned long);
-extern void free_initmem(void);
-extern void setup_bootmem_allocator(void);
-
-
 #ifdef CONFIG_X86_USE_3DNOW
 #include <asm/mmx.h>
 
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
new file mode 100644 (file)
index 0000000..f1e4a79
--- /dev/null
@@ -0,0 +1,60 @@
+#ifndef _ASM_X86_PAGE_32_DEFS_H
+#define _ASM_X86_PAGE_32_DEFS_H
+
+#include <linux/const.h>
+
+/*
+ * This handles the memory map.
+ *
+ * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
+ * a virtual address space of one gigabyte, which limits the
+ * amount of physical memory you can use to about 950MB.
+ *
+ * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
+ * and CONFIG_HIGHMEM64G options in the kernel configuration.
+ */
+#define __PAGE_OFFSET          _AC(CONFIG_PAGE_OFFSET, UL)
+
+#ifdef CONFIG_4KSTACKS
+#define THREAD_ORDER   0
+#else
+#define THREAD_ORDER   1
+#endif
+#define THREAD_SIZE    (PAGE_SIZE << THREAD_ORDER)
+
+#define STACKFAULT_STACK 0
+#define DOUBLEFAULT_STACK 1
+#define NMI_STACK 0
+#define DEBUG_STACK 0
+#define MCE_STACK 0
+#define N_EXCEPTION_STACKS 1
+
+#ifdef CONFIG_X86_PAE
+/* 44=32+12, the limit we can fit into an unsigned long pfn */
+#define __PHYSICAL_MASK_SHIFT  44
+#define __VIRTUAL_MASK_SHIFT   32
+
+#else  /* !CONFIG_X86_PAE */
+#define __PHYSICAL_MASK_SHIFT  32
+#define __VIRTUAL_MASK_SHIFT   32
+#endif /* CONFIG_X86_PAE */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This much address space is reserved for vmalloc() and iomap()
+ * as well as fixmap mappings.
+ */
+extern unsigned int __VMALLOC_RESERVE;
+extern int sysctl_legacy_va_layout;
+
+extern void find_low_pfn_range(void);
+extern unsigned long init_memory_mapping(unsigned long start,
+                                        unsigned long end);
+extern void initmem_init(unsigned long, unsigned long);
+extern void free_initmem(void);
+extern void setup_bootmem_allocator(void);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PAGE_32_DEFS_H */
index 5ebca29f44f0d77890b4395d11313b589e386b9c..072694ed81a57317aff966a63ca778f2b2f4ba73 100644 (file)
@@ -1,105 +1,6 @@
 #ifndef _ASM_X86_PAGE_64_H
 #define _ASM_X86_PAGE_64_H
 
-#define PAGETABLE_LEVELS       4
-
-#define THREAD_ORDER   1
-#define THREAD_SIZE  (PAGE_SIZE << THREAD_ORDER)
-#define CURRENT_MASK (~(THREAD_SIZE - 1))
-
-#define EXCEPTION_STACK_ORDER 0
-#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-
-#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
-#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
-
-#define IRQSTACK_ORDER 2
-#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
-
-#define STACKFAULT_STACK 1
-#define DOUBLEFAULT_STACK 2
-#define NMI_STACK 3
-#define DEBUG_STACK 4
-#define MCE_STACK 5
-#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
-
-#define PUD_PAGE_SIZE          (_AC(1, UL) << PUD_SHIFT)
-#define PUD_PAGE_MASK          (~(PUD_PAGE_SIZE-1))
-
-/*
- * Set __PAGE_OFFSET to the most negative possible address +
- * PGDIR_SIZE*16 (pgd slot 272).  The gap is to allow a space for a
- * hypervisor to fit.  Choosing 16 slots here is arbitrary, but it's
- * what Xen requires.
- */
-#define __PAGE_OFFSET           _AC(0xffff880000000000, UL)
-
-#define __PHYSICAL_START       CONFIG_PHYSICAL_START
-#define __KERNEL_ALIGN         0x200000
-
-/*
- * Make sure kernel is aligned to 2MB address. Catching it at compile
- * time is better. Change your config file and compile the kernel
- * for a 2MB aligned address (CONFIG_PHYSICAL_START)
- */
-#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
-#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
-#endif
-
-#define __START_KERNEL         (__START_KERNEL_map + __PHYSICAL_START)
-#define __START_KERNEL_map     _AC(0xffffffff80000000, UL)
-
-/* See Documentation/x86_64/mm.txt for a description of the memory map. */
-#define __PHYSICAL_MASK_SHIFT  46
-#define __VIRTUAL_MASK_SHIFT   48
-
-/*
- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
- * arch/x86/kernel/head_64.S), and it is mapped here:
- */
-#define KERNEL_IMAGE_SIZE      (512 * 1024 * 1024)
-#define KERNEL_IMAGE_START     _AC(0xffffffff80000000, UL)
-
-#ifndef __ASSEMBLY__
-void clear_page(void *page);
-void copy_page(void *to, void *from);
-
-/* duplicated to the one in bootmem.h */
-extern unsigned long max_pfn;
-extern unsigned long phys_base;
-
-extern unsigned long __phys_addr(unsigned long);
-#define __phys_reloc_hide(x)   (x)
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef unsigned long  pteval_t;
-typedef unsigned long  pmdval_t;
-typedef unsigned long  pudval_t;
-typedef unsigned long  pgdval_t;
-typedef unsigned long  pgprotval_t;
-
-typedef struct page *pgtable_t;
-
-typedef struct { pteval_t pte; } pte_t;
-
-#define vmemmap ((struct page *)VMEMMAP_START)
-
-extern unsigned long init_memory_mapping(unsigned long start,
-                                        unsigned long end);
-
-extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
-extern void free_initmem(void);
-
-extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
-extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
-
-#endif /* !__ASSEMBLY__ */
-
-#ifdef CONFIG_FLATMEM
-#define pfn_valid(pfn)          ((pfn) < max_pfn)
-#endif
-
+#include <asm/page_64_types.h>
 
 #endif /* _ASM_X86_PAGE_64_H */
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
new file mode 100644 (file)
index 0000000..d38c91b
--- /dev/null
@@ -0,0 +1,89 @@
+#ifndef _ASM_X86_PAGE_64_DEFS_H
+#define _ASM_X86_PAGE_64_DEFS_H
+
+#define THREAD_ORDER   1
+#define THREAD_SIZE  (PAGE_SIZE << THREAD_ORDER)
+#define CURRENT_MASK (~(THREAD_SIZE - 1))
+
+#define EXCEPTION_STACK_ORDER 0
+#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
+
+#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
+#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
+
+#define IRQ_STACK_ORDER 2
+#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
+
+#define STACKFAULT_STACK 1
+#define DOUBLEFAULT_STACK 2
+#define NMI_STACK 3
+#define DEBUG_STACK 4
+#define MCE_STACK 5
+#define N_EXCEPTION_STACKS 5  /* hw limit: 7 */
+
+#define PUD_PAGE_SIZE          (_AC(1, UL) << PUD_SHIFT)
+#define PUD_PAGE_MASK          (~(PUD_PAGE_SIZE-1))
+
+/*
+ * Set __PAGE_OFFSET to the most negative possible address +
+ * PGDIR_SIZE*16 (pgd slot 272).  The gap is to allow a space for a
+ * hypervisor to fit.  Choosing 16 slots here is arbitrary, but it's
+ * what Xen requires.
+ */
+#define __PAGE_OFFSET           _AC(0xffff880000000000, UL)
+
+#define __PHYSICAL_START       CONFIG_PHYSICAL_START
+#define __KERNEL_ALIGN         0x200000
+
+/*
+ * Make sure kernel is aligned to 2MB address. Catching it at compile
+ * time is better. Change your config file and compile the kernel
+ * for a 2MB aligned address (CONFIG_PHYSICAL_START)
+ */
+#if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
+#error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
+#endif
+
+#define __START_KERNEL         (__START_KERNEL_map + __PHYSICAL_START)
+#define __START_KERNEL_map     _AC(0xffffffff80000000, UL)
+
+/* See Documentation/x86_64/mm.txt for a description of the memory map. */
+#define __PHYSICAL_MASK_SHIFT  46
+#define __VIRTUAL_MASK_SHIFT   48
+
+/*
+ * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
+ * arch/x86/kernel/head_64.S), and it is mapped here:
+ */
+#define KERNEL_IMAGE_SIZE      (512 * 1024 * 1024)
+#define KERNEL_IMAGE_START     _AC(0xffffffff80000000, UL)
+
+#ifndef __ASSEMBLY__
+void clear_page(void *page);
+void copy_page(void *to, void *from);
+
+/* duplicated to the one in bootmem.h */
+extern unsigned long max_pfn;
+extern unsigned long phys_base;
+
+extern unsigned long __phys_addr(unsigned long);
+#define __phys_reloc_hide(x)   (x)
+
+#define vmemmap ((struct page *)VMEMMAP_START)
+
+extern unsigned long init_memory_mapping(unsigned long start,
+                                        unsigned long end);
+
+extern void initmem_init(unsigned long start_pfn, unsigned long end_pfn);
+extern void free_initmem(void);
+
+extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
+extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_FLATMEM
+#define pfn_valid(pfn)          ((pfn) < max_pfn)
+#endif
+
+#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
new file mode 100644 (file)
index 0000000..826ad37
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef _ASM_X86_PAGE_DEFS_H
+#define _ASM_X86_PAGE_DEFS_H
+
+#include <linux/const.h>
+
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT     12
+#define PAGE_SIZE      (_AC(1,UL) << PAGE_SHIFT)
+#define PAGE_MASK      (~(PAGE_SIZE-1))
+
+#define __PHYSICAL_MASK                ((phys_addr_t)(1ULL << __PHYSICAL_MASK_SHIFT) - 1)
+#define __VIRTUAL_MASK         ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
+
+/* Cast PAGE_MASK to a signed type so that it is sign-extended if
+   virtual addresses are 32-bits but physical addresses are larger
+   (ie, 32-bit PAE). */
+#define PHYSICAL_PAGE_MASK     (((signed long)PAGE_MASK) & __PHYSICAL_MASK)
+
+#define PMD_PAGE_SIZE          (_AC(1, UL) << PMD_SHIFT)
+#define PMD_PAGE_MASK          (~(PMD_PAGE_SIZE-1))
+
+#define HPAGE_SHIFT            PMD_SHIFT
+#define HPAGE_SIZE             (_AC(1,UL) << HPAGE_SHIFT)
+#define HPAGE_MASK             (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
+
+#define HUGE_MAX_HSTATE 2
+
+#define PAGE_OFFSET            ((unsigned long)__PAGE_OFFSET)
+
+#define VM_DATA_DEFAULT_FLAGS \
+       (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+        VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+#ifdef CONFIG_X86_64
+#include <asm/page_64_types.h>
+#else
+#include <asm/page_32_types.h>
+#endif /* CONFIG_X86_64 */
+
+#ifndef __ASSEMBLY__
+
+extern int page_is_ram(unsigned long pagenr);
+extern int devmem_is_allowed(unsigned long pagenr);
+
+extern unsigned long max_low_pfn_mapped;
+extern unsigned long max_pfn_mapped;
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PAGE_DEFS_H */
index e299287e8e3396d95b3169c5a9e9a1086e662a1a..0617d5cc9712d00a3ca61ad80f515484cb7798e5 100644 (file)
@@ -4,7 +4,7 @@
  * para-virtualization: those hooks are defined here. */
 
 #ifdef CONFIG_PARAVIRT
-#include <asm/page.h>
+#include <asm/pgtable_types.h>
 #include <asm/asm.h>
 
 /* Bitmask of what can be clobbered: usually at least eax. */
 #define CLBR_EAX  (1 << 0)
 #define CLBR_ECX  (1 << 1)
 #define CLBR_EDX  (1 << 2)
+#define CLBR_EDI  (1 << 3)
 
-#ifdef CONFIG_X86_64
-#define CLBR_RSI  (1 << 3)
-#define CLBR_RDI  (1 << 4)
+#ifdef CONFIG_X86_32
+/* CLBR_ANY should match all regs platform has. For i386, that's just it */
+#define CLBR_ANY  ((1 << 4) - 1)
+
+#define CLBR_ARG_REGS  (CLBR_EAX | CLBR_EDX | CLBR_ECX)
+#define CLBR_RET_REG   (CLBR_EAX | CLBR_EDX)
+#define CLBR_SCRATCH   (0)
+#else
+#define CLBR_RAX  CLBR_EAX
+#define CLBR_RCX  CLBR_ECX
+#define CLBR_RDX  CLBR_EDX
+#define CLBR_RDI  CLBR_EDI
+#define CLBR_RSI  (1 << 4)
 #define CLBR_R8   (1 << 5)
 #define CLBR_R9   (1 << 6)
 #define CLBR_R10  (1 << 7)
 #define CLBR_R11  (1 << 8)
+
 #define CLBR_ANY  ((1 << 9) - 1)
+
+#define CLBR_ARG_REGS  (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
+                        CLBR_RCX | CLBR_R8 | CLBR_R9)
+#define CLBR_RET_REG   (CLBR_RAX)
+#define CLBR_SCRATCH   (CLBR_R10 | CLBR_R11)
+
 #include <asm/desc_defs.h>
-#else
-/* CLBR_ANY should match all regs platform has. For i386, that's just it */
-#define CLBR_ANY  ((1 << 3) - 1)
 #endif /* X86_64 */
 
+#define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
+
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
 #include <linux/cpumask.h>
@@ -40,6 +57,14 @@ struct tss_struct;
 struct mm_struct;
 struct desc_struct;
 
+/*
+ * Wrapper type for pointers to code which uses the non-standard
+ * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
+ */
+struct paravirt_callee_save {
+       void *func;
+};
+
 /* general info */
 struct pv_info {
        unsigned int kernel_rpl;
@@ -189,11 +214,15 @@ struct pv_irq_ops {
         * expected to use X86_EFLAGS_IF; all other bits
         * returned from save_fl are undefined, and may be ignored by
         * restore_fl.
+        *
+        * NOTE: These functions callers expect the callee to preserve
+        * more registers than the standard C calling convention.
         */
-       unsigned long (*save_fl)(void);
-       void (*restore_fl)(unsigned long);
-       void (*irq_disable)(void);
-       void (*irq_enable)(void);
+       struct paravirt_callee_save save_fl;
+       struct paravirt_callee_save restore_fl;
+       struct paravirt_callee_save irq_disable;
+       struct paravirt_callee_save irq_enable;
+
        void (*safe_halt)(void);
        void (*halt)(void);
 
@@ -244,7 +273,8 @@ struct pv_mmu_ops {
        void (*flush_tlb_user)(void);
        void (*flush_tlb_kernel)(void);
        void (*flush_tlb_single)(unsigned long addr);
-       void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
+       void (*flush_tlb_others)(const struct cpumask *cpus,
+                                struct mm_struct *mm,
                                 unsigned long va);
 
        /* Hooks for allocating and freeing a pagetable top-level */
@@ -278,12 +308,11 @@ struct pv_mmu_ops {
        void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
                                        pte_t *ptep, pte_t pte);
 
-       pteval_t (*pte_val)(pte_t);
-       pteval_t (*pte_flags)(pte_t);
-       pte_t (*make_pte)(pteval_t pte);
+       struct paravirt_callee_save pte_val;
+       struct paravirt_callee_save make_pte;
 
-       pgdval_t (*pgd_val)(pgd_t);
-       pgd_t (*make_pgd)(pgdval_t pgd);
+       struct paravirt_callee_save pgd_val;
+       struct paravirt_callee_save make_pgd;
 
 #if PAGETABLE_LEVELS >= 3
 #ifdef CONFIG_X86_PAE
@@ -298,12 +327,12 @@ struct pv_mmu_ops {
 
        void (*set_pud)(pud_t *pudp, pud_t pudval);
 
-       pmdval_t (*pmd_val)(pmd_t);
-       pmd_t (*make_pmd)(pmdval_t pmd);
+       struct paravirt_callee_save pmd_val;
+       struct paravirt_callee_save make_pmd;
 
 #if PAGETABLE_LEVELS == 4
-       pudval_t (*pud_val)(pud_t);
-       pud_t (*make_pud)(pudval_t pud);
+       struct paravirt_callee_save pud_val;
+       struct paravirt_callee_save make_pud;
 
        void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
 #endif /* PAGETABLE_LEVELS == 4 */
@@ -388,6 +417,8 @@ extern struct pv_lock_ops pv_lock_ops;
        asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
 
 unsigned paravirt_patch_nop(void);
+unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
+unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
 unsigned paravirt_patch_ignore(unsigned len);
 unsigned paravirt_patch_call(void *insnbuf,
                             const void *target, u16 tgt_clobbers,
@@ -479,25 +510,45 @@ int paravirt_disable_iospace(void);
  * makes sure the incoming and outgoing types are always correct.
  */
 #ifdef CONFIG_X86_32
-#define PVOP_VCALL_ARGS                        unsigned long __eax, __edx, __ecx
+#define PVOP_VCALL_ARGS                                \
+       unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
 #define PVOP_CALL_ARGS                 PVOP_VCALL_ARGS
+
+#define PVOP_CALL_ARG1(x)              "a" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x)              "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x)              "c" ((unsigned long)(x))
+
 #define PVOP_VCALL_CLOBBERS            "=a" (__eax), "=d" (__edx),     \
                                        "=c" (__ecx)
 #define PVOP_CALL_CLOBBERS             PVOP_VCALL_CLOBBERS
+
+#define PVOP_VCALLEE_CLOBBERS          "=a" (__eax), "=d" (__edx)
+#define PVOP_CALLEE_CLOBBERS           PVOP_VCALLEE_CLOBBERS
+
 #define EXTRA_CLOBBERS
 #define VEXTRA_CLOBBERS
-#else
-#define PVOP_VCALL_ARGS                unsigned long __edi, __esi, __edx, __ecx
+#else  /* CONFIG_X86_64 */
+#define PVOP_VCALL_ARGS                                        \
+       unsigned long __edi = __edi, __esi = __esi,     \
+               __edx = __edx, __ecx = __ecx
 #define PVOP_CALL_ARGS         PVOP_VCALL_ARGS, __eax
+
+#define PVOP_CALL_ARG1(x)              "D" ((unsigned long)(x))
+#define PVOP_CALL_ARG2(x)              "S" ((unsigned long)(x))
+#define PVOP_CALL_ARG3(x)              "d" ((unsigned long)(x))
+#define PVOP_CALL_ARG4(x)              "c" ((unsigned long)(x))
+
 #define PVOP_VCALL_CLOBBERS    "=D" (__edi),                           \
                                "=S" (__esi), "=d" (__edx),             \
                                "=c" (__ecx)
-
 #define PVOP_CALL_CLOBBERS     PVOP_VCALL_CLOBBERS, "=a" (__eax)
 
+#define PVOP_VCALLEE_CLOBBERS  "=a" (__eax)
+#define PVOP_CALLEE_CLOBBERS   PVOP_VCALLEE_CLOBBERS
+
 #define EXTRA_CLOBBERS  , "r8", "r9", "r10", "r11"
 #define VEXTRA_CLOBBERS         , "rax", "r8", "r9", "r10", "r11"
-#endif
+#endif /* CONFIG_X86_32 */
 
 #ifdef CONFIG_PARAVIRT_DEBUG
 #define PVOP_TEST_NULL(op)     BUG_ON(op == NULL)
@@ -505,10 +556,11 @@ int paravirt_disable_iospace(void);
 #define PVOP_TEST_NULL(op)     ((void)op)
 #endif
 
-#define __PVOP_CALL(rettype, op, pre, post, ...)                       \
+#define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,                \
+                     pre, post, ...)                                   \
        ({                                                              \
                rettype __ret;                                          \
-               PVOP_CALL_ARGS;                                 \
+               PVOP_CALL_ARGS;                                         \
                PVOP_TEST_NULL(op);                                     \
                /* This is 32-bit specific, but is okay in 64-bit */    \
                /* since this condition will never hold */              \
@@ -516,70 +568,113 @@ int paravirt_disable_iospace(void);
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
-                                    : PVOP_CALL_CLOBBERS               \
+                                    : call_clbr                        \
                                     : paravirt_type(op),               \
-                                      paravirt_clobber(CLBR_ANY),      \
+                                      paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
-                                    : "memory", "cc" EXTRA_CLOBBERS);  \
+                                    : "memory", "cc" extra_clbr);      \
                        __ret = (rettype)((((u64)__edx) << 32) | __eax); \
                } else {                                                \
                        asm volatile(pre                                \
                                     paravirt_alt(PARAVIRT_CALL)        \
                                     post                               \
-                                    : PVOP_CALL_CLOBBERS               \
+                                    : call_clbr                        \
                                     : paravirt_type(op),               \
-                                      paravirt_clobber(CLBR_ANY),      \
+                                      paravirt_clobber(clbr),          \
                                       ##__VA_ARGS__                    \
-                                    : "memory", "cc" EXTRA_CLOBBERS);  \
+                                    : "memory", "cc" extra_clbr);      \
                        __ret = (rettype)__eax;                         \
                }                                                       \
                __ret;                                                  \
        })
-#define __PVOP_VCALL(op, pre, post, ...)                               \
+
+#define __PVOP_CALL(rettype, op, pre, post, ...)                       \
+       ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,        \
+                     EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
+
+#define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)                 \
+       ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
+                     PVOP_CALLEE_CLOBBERS, ,                           \
+                     pre, post, ##__VA_ARGS__)
+
+
+#define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...)        \
        ({                                                              \
                PVOP_VCALL_ARGS;                                        \
                PVOP_TEST_NULL(op);                                     \
                asm volatile(pre                                        \
                             paravirt_alt(PARAVIRT_CALL)                \
                             post                                       \
-                            : PVOP_VCALL_CLOBBERS                      \
+                            : call_clbr                                \
                             : paravirt_type(op),                       \
-                              paravirt_clobber(CLBR_ANY),              \
+                              paravirt_clobber(clbr),                  \
                               ##__VA_ARGS__                            \
-                            : "memory", "cc" VEXTRA_CLOBBERS);         \
+                            : "memory", "cc" extra_clbr);              \
        })
 
+#define __PVOP_VCALL(op, pre, post, ...)                               \
+       ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,               \
+                      VEXTRA_CLOBBERS,                                 \
+                      pre, post, ##__VA_ARGS__)
+
+#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...)                        \
+       ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
+                     PVOP_VCALLEE_CLOBBERS, ,                          \
+                     pre, post, ##__VA_ARGS__)
+
+
+
 #define PVOP_CALL0(rettype, op)                                                \
        __PVOP_CALL(rettype, op, "", "")
 #define PVOP_VCALL0(op)                                                        \
        __PVOP_VCALL(op, "", "")
 
+#define PVOP_CALLEE0(rettype, op)                                      \
+       __PVOP_CALLEESAVE(rettype, op, "", "")
+#define PVOP_VCALLEE0(op)                                              \
+       __PVOP_VCALLEESAVE(op, "", "")
+
+
 #define PVOP_CALL1(rettype, op, arg1)                                  \
-       __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
+       __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
 #define PVOP_VCALL1(op, arg1)                                          \
-       __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
+       __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
+
+#define PVOP_CALLEE1(rettype, op, arg1)                                        \
+       __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
+#define PVOP_VCALLEE1(op, arg1)                                                \
+       __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
+
 
 #define PVOP_CALL2(rettype, op, arg1, arg2)                            \
-       __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
-       "1" ((unsigned long)(arg2)))
+       __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
+                   PVOP_CALL_ARG2(arg2))
 #define PVOP_VCALL2(op, arg1, arg2)                                    \
-       __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
-       "1" ((unsigned long)(arg2)))
+       __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
+                    PVOP_CALL_ARG2(arg2))
+
+#define PVOP_CALLEE2(rettype, op, arg1, arg2)                          \
+       __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),    \
+                         PVOP_CALL_ARG2(arg2))
+#define PVOP_VCALLEE2(op, arg1, arg2)                                  \
+       __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),            \
+                          PVOP_CALL_ARG2(arg2))
+
 
 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                      \
-       __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
-       "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
+       __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
+                   PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
 #define PVOP_VCALL3(op, arg1, arg2, arg3)                              \
-       __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
-       "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
+       __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
+                    PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
 
 /* This is the only difference in x86_64. We can make it much simpler */
 #ifdef CONFIG_X86_32
 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                        \
        __PVOP_CALL(rettype, op,                                        \
                    "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
-                   "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
-                   "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
+                   PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
+                   PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                                \
        __PVOP_VCALL(op,                                                \
                    "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
@@ -587,13 +682,13 @@ int paravirt_disable_iospace(void);
                    "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
 #else
 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                        \
-       __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
-       "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
-       "3"((unsigned long)(arg4)))
+       __PVOP_CALL(rettype, op, "", "",                                \
+                   PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
+                   PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                                \
-       __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
-       "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
-       "3"((unsigned long)(arg4)))
+       __PVOP_VCALL(op, "", "",                                        \
+                    PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),        \
+                    PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
 #endif
 
 static inline int paravirt_enabled(void)
@@ -984,10 +1079,11 @@ static inline void __flush_tlb_single(unsigned long addr)
        PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
 }
 
-static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+static inline void flush_tlb_others(const struct cpumask *cpumask,
+                                   struct mm_struct *mm,
                                    unsigned long va)
 {
-       PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
+       PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
 }
 
 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
@@ -1059,13 +1155,13 @@ static inline pte_t __pte(pteval_t val)
        pteval_t ret;
 
        if (sizeof(pteval_t) > sizeof(long))
-               ret = PVOP_CALL2(pteval_t,
-                                pv_mmu_ops.make_pte,
-                                val, (u64)val >> 32);
+               ret = PVOP_CALLEE2(pteval_t,
+                                  pv_mmu_ops.make_pte,
+                                  val, (u64)val >> 32);
        else
-               ret = PVOP_CALL1(pteval_t,
-                                pv_mmu_ops.make_pte,
-                                val);
+               ret = PVOP_CALLEE1(pteval_t,
+                                  pv_mmu_ops.make_pte,
+                                  val);
 
        return (pte_t) { .pte = ret };
 }
@@ -1075,29 +1171,12 @@ static inline pteval_t pte_val(pte_t pte)
        pteval_t ret;
 
        if (sizeof(pteval_t) > sizeof(long))
-               ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
-                                pte.pte, (u64)pte.pte >> 32);
-       else
-               ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
-                                pte.pte);
-
-       return ret;
-}
-
-static inline pteval_t pte_flags(pte_t pte)
-{
-       pteval_t ret;
-
-       if (sizeof(pteval_t) > sizeof(long))
-               ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_flags,
-                                pte.pte, (u64)pte.pte >> 32);
+               ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
+                                  pte.pte, (u64)pte.pte >> 32);
        else
-               ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_flags,
-                                pte.pte);
+               ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
+                                  pte.pte);
 
-#ifdef CONFIG_PARAVIRT_DEBUG
-       BUG_ON(ret & PTE_PFN_MASK);
-#endif
        return ret;
 }
 
@@ -1106,11 +1185,11 @@ static inline pgd_t __pgd(pgdval_t val)
        pgdval_t ret;
 
        if (sizeof(pgdval_t) > sizeof(long))
-               ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
-                                val, (u64)val >> 32);
+               ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
+                                  val, (u64)val >> 32);
        else
-               ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
-                                val);
+               ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
+                                  val);
 
        return (pgd_t) { ret };
 }
@@ -1120,11 +1199,11 @@ static inline pgdval_t pgd_val(pgd_t pgd)
        pgdval_t ret;
 
        if (sizeof(pgdval_t) > sizeof(long))
-               ret =  PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
-                                 pgd.pgd, (u64)pgd.pgd >> 32);
+               ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
+                                   pgd.pgd, (u64)pgd.pgd >> 32);
        else
-               ret =  PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
-                                 pgd.pgd);
+               ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
+                                   pgd.pgd);
 
        return ret;
 }
@@ -1188,11 +1267,11 @@ static inline pmd_t __pmd(pmdval_t val)
        pmdval_t ret;
 
        if (sizeof(pmdval_t) > sizeof(long))
-               ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
-                                val, (u64)val >> 32);
+               ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
+                                  val, (u64)val >> 32);
        else
-               ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
-                                val);
+               ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
+                                  val);
 
        return (pmd_t) { ret };
 }
@@ -1202,11 +1281,11 @@ static inline pmdval_t pmd_val(pmd_t pmd)
        pmdval_t ret;
 
        if (sizeof(pmdval_t) > sizeof(long))
-               ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
-                                 pmd.pmd, (u64)pmd.pmd >> 32);
+               ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
+                                   pmd.pmd, (u64)pmd.pmd >> 32);
        else
-               ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
-                                 pmd.pmd);
+               ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
+                                   pmd.pmd);
 
        return ret;
 }
@@ -1228,11 +1307,11 @@ static inline pud_t __pud(pudval_t val)
        pudval_t ret;
 
        if (sizeof(pudval_t) > sizeof(long))
-               ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
-                                val, (u64)val >> 32);
+               ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
+                                  val, (u64)val >> 32);
        else
-               ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
-                                val);
+               ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
+                                  val);
 
        return (pud_t) { ret };
 }
@@ -1242,11 +1321,11 @@ static inline pudval_t pud_val(pud_t pud)
        pudval_t ret;
 
        if (sizeof(pudval_t) > sizeof(long))
-               ret =  PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
-                                 pud.pud, (u64)pud.pud >> 32);
+               ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
+                                   pud.pud, (u64)pud.pud >> 32);
        else
-               ret =  PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
-                                 pud.pud);
+               ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
+                                   pud.pud);
 
        return ret;
 }
@@ -1374,9 +1453,10 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 }
 
 void _paravirt_nop(void);
-#define paravirt_nop   ((void *)_paravirt_nop)
+u32 _paravirt_ident_32(u32);
+u64 _paravirt_ident_64(u64);
 
-void paravirt_use_bytelocks(void);
+#define paravirt_nop   ((void *)_paravirt_nop)
 
 #ifdef CONFIG_SMP
 
@@ -1426,12 +1506,37 @@ extern struct paravirt_patch_site __parainstructions[],
        __parainstructions_end[];
 
 #ifdef CONFIG_X86_32
-#define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
-#define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
+#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
+#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
+
+/* save and restore all caller-save registers, except return value */
+#define PV_SAVE_ALL_CALLER_REGS                "pushl %ecx;"
+#define PV_RESTORE_ALL_CALLER_REGS     "popl  %ecx;"
+
 #define PV_FLAGS_ARG "0"
 #define PV_EXTRA_CLOBBERS
 #define PV_VEXTRA_CLOBBERS
 #else
+/* save and restore all caller-save registers, except return value */
+#define PV_SAVE_ALL_CALLER_REGS                                                \
+       "push %rcx;"                                                    \
+       "push %rdx;"                                                    \
+       "push %rsi;"                                                    \
+       "push %rdi;"                                                    \
+       "push %r8;"                                                     \
+       "push %r9;"                                                     \
+       "push %r10;"                                                    \
+       "push %r11;"
+#define PV_RESTORE_ALL_CALLER_REGS                                     \
+       "pop %r11;"                                                     \
+       "pop %r10;"                                                     \
+       "pop %r9;"                                                      \
+       "pop %r8;"                                                      \
+       "pop %rdi;"                                                     \
+       "pop %rsi;"                                                     \
+       "pop %rdx;"                                                     \
+       "pop %rcx;"
+
 /* We save some registers, but all of them, that's too much. We clobber all
  * caller saved registers but the argument parameter */
 #define PV_SAVE_REGS "pushq %%rdi;"
@@ -1441,52 +1546,76 @@ extern struct paravirt_patch_site __parainstructions[],
 #define PV_FLAGS_ARG "D"
 #endif
 
+/*
+ * Generate a thunk around a function which saves all caller-save
+ * registers except for the return value.  This allows C functions to
+ * be called from assembler code where fewer than normal registers are
+ * available.  It may also help code generation around calls from C
+ * code if the common case doesn't use many registers.
+ *
+ * When a callee is wrapped in a thunk, the caller can assume that all
+ * arg regs and all scratch registers are preserved across the
+ * call. The return value in rax/eax will not be saved, even for void
+ * functions.
+ */
+#define PV_CALLEE_SAVE_REGS_THUNK(func)                                        \
+       extern typeof(func) __raw_callee_save_##func;                   \
+       static void *__##func##__ __used = func;                        \
+                                                                       \
+       asm(".pushsection .text;"                                       \
+           "__raw_callee_save_" #func ": "                             \
+           PV_SAVE_ALL_CALLER_REGS                                     \
+           "call " #func ";"                                           \
+           PV_RESTORE_ALL_CALLER_REGS                                  \
+           "ret;"                                                      \
+           ".popsection")
+
+/* Get a reference to a callee-save function */
+#define PV_CALLEE_SAVE(func)                                           \
+       ((struct paravirt_callee_save) { __raw_callee_save_##func })
+
+/* Promise that "func" already uses the right calling convention */
+#define __PV_IS_CALLEE_SAVE(func)                      \
+       ((struct paravirt_callee_save) { func })
+
 static inline unsigned long __raw_local_save_flags(void)
 {
        unsigned long f;
 
-       asm volatile(paravirt_alt(PV_SAVE_REGS
-                                 PARAVIRT_CALL
-                                 PV_RESTORE_REGS)
+       asm volatile(paravirt_alt(PARAVIRT_CALL)
                     : "=a"(f)
                     : paravirt_type(pv_irq_ops.save_fl),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "cc" PV_VEXTRA_CLOBBERS);
+                    : "memory", "cc");
        return f;
 }
 
 static inline void raw_local_irq_restore(unsigned long f)
 {
-       asm volatile(paravirt_alt(PV_SAVE_REGS
-                                 PARAVIRT_CALL
-                                 PV_RESTORE_REGS)
+       asm volatile(paravirt_alt(PARAVIRT_CALL)
                     : "=a"(f)
                     : PV_FLAGS_ARG(f),
                       paravirt_type(pv_irq_ops.restore_fl),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "cc" PV_EXTRA_CLOBBERS);
+                    : "memory", "cc");
 }
 
 static inline void raw_local_irq_disable(void)
 {
-       asm volatile(paravirt_alt(PV_SAVE_REGS
-                                 PARAVIRT_CALL
-                                 PV_RESTORE_REGS)
+       asm volatile(paravirt_alt(PARAVIRT_CALL)
                     :
                     : paravirt_type(pv_irq_ops.irq_disable),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
+                    : "memory", "eax", "cc");
 }
 
 static inline void raw_local_irq_enable(void)
 {
-       asm volatile(paravirt_alt(PV_SAVE_REGS
-                                 PARAVIRT_CALL
-                                 PV_RESTORE_REGS)
+       asm volatile(paravirt_alt(PARAVIRT_CALL)
                     :
                     : paravirt_type(pv_irq_ops.irq_enable),
                       paravirt_clobber(CLBR_EAX)
-                    : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
+                    : "memory", "eax", "cc");
 }
 
 static inline unsigned long __raw_local_irq_save(void)
@@ -1529,33 +1658,49 @@ static inline unsigned long __raw_local_irq_save(void)
        .popsection
 
 
+#define COND_PUSH(set, mask, reg)                      \
+       .if ((~(set)) & mask); push %reg; .endif
+#define COND_POP(set, mask, reg)                       \
+       .if ((~(set)) & mask); pop %reg; .endif
+
 #ifdef CONFIG_X86_64
-#define PV_SAVE_REGS                           \
-       push %rax;                              \
-       push %rcx;                              \
-       push %rdx;                              \
-       push %rsi;                              \
-       push %rdi;                              \
-       push %r8;                               \
-       push %r9;                               \
-       push %r10;                              \
-       push %r11
-#define PV_RESTORE_REGS                                \
-       pop %r11;                               \
-       pop %r10;                               \
-       pop %r9;                                \
-       pop %r8;                                \
-       pop %rdi;                               \
-       pop %rsi;                               \
-       pop %rdx;                               \
-       pop %rcx;                               \
-       pop %rax
+
+#define PV_SAVE_REGS(set)                      \
+       COND_PUSH(set, CLBR_RAX, rax);          \
+       COND_PUSH(set, CLBR_RCX, rcx);          \
+       COND_PUSH(set, CLBR_RDX, rdx);          \
+       COND_PUSH(set, CLBR_RSI, rsi);          \
+       COND_PUSH(set, CLBR_RDI, rdi);          \
+       COND_PUSH(set, CLBR_R8, r8);            \
+       COND_PUSH(set, CLBR_R9, r9);            \
+       COND_PUSH(set, CLBR_R10, r10);          \
+       COND_PUSH(set, CLBR_R11, r11)
+#define PV_RESTORE_REGS(set)                   \
+       COND_POP(set, CLBR_R11, r11);           \
+       COND_POP(set, CLBR_R10, r10);           \
+       COND_POP(set, CLBR_R9, r9);             \
+       COND_POP(set, CLBR_R8, r8);             \
+       COND_POP(set, CLBR_RDI, rdi);           \
+       COND_POP(set, CLBR_RSI, rsi);           \
+       COND_POP(set, CLBR_RDX, rdx);           \
+       COND_POP(set, CLBR_RCX, rcx);           \
+       COND_POP(set, CLBR_RAX, rax)
+
 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
 #define PARA_INDIRECT(addr)    *addr(%rip)
 #else
-#define PV_SAVE_REGS   pushl %eax; pushl %edi; pushl %ecx; pushl %edx
-#define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
+#define PV_SAVE_REGS(set)                      \
+       COND_PUSH(set, CLBR_EAX, eax);          \
+       COND_PUSH(set, CLBR_EDI, edi);          \
+       COND_PUSH(set, CLBR_ECX, ecx);          \
+       COND_PUSH(set, CLBR_EDX, edx)
+#define PV_RESTORE_REGS(set)                   \
+       COND_POP(set, CLBR_EDX, edx);           \
+       COND_POP(set, CLBR_ECX, ecx);           \
+       COND_POP(set, CLBR_EDI, edi);           \
+       COND_POP(set, CLBR_EAX, eax)
+
 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
 #define PARA_INDIRECT(addr)    *%cs:addr
@@ -1567,15 +1712,15 @@ static inline unsigned long __raw_local_irq_save(void)
 
 #define DISABLE_INTERRUPTS(clobbers)                                   \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
-                 PV_SAVE_REGS;                                         \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
-                 PV_RESTORE_REGS;)                     \
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 
 #define ENABLE_INTERRUPTS(clobbers)                                    \
        PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
-                 PV_SAVE_REGS;                                         \
+                 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
                  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
-                 PV_RESTORE_REGS;)
+                 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
 
 #define USERGS_SYSRET32                                                        \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
@@ -1605,11 +1750,15 @@ static inline unsigned long __raw_local_irq_save(void)
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
                  swapgs)
 
+/*
+ * Note: swapgs is very special, and in practise is either going to be
+ * implemented with a single "swapgs" instruction or something very
+ * special.  Either way, we don't need to save any registers for
+ * it.
+ */
 #define SWAPGS                                                         \
        PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
-                 PV_SAVE_REGS;                                         \
-                 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
-                 PV_RESTORE_REGS                                       \
+                 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
                 )
 
 #define GET_CR2_INTO_RCX                               \
index b8493b3b98901c811add567762a22b73aaa85c9a..2cd07b9422f49cbe8b87e561e140f0f31c28e77f 100644 (file)
@@ -2,13 +2,12 @@
 #define _ASM_X86_PAT_H
 
 #include <linux/types.h>
+#include <asm/pgtable_types.h>
 
 #ifdef CONFIG_X86_PAT
 extern int pat_enabled;
-extern void validate_pat_support(struct cpuinfo_x86 *c);
 #else
 static const int pat_enabled;
-static inline void validate_pat_support(struct cpuinfo_x86 *c) { }
 #endif
 
 extern void pat_init(void);
@@ -17,6 +16,11 @@ extern int reserve_memtype(u64 start, u64 end,
                unsigned long req_type, unsigned long *ret_type);
 extern int free_memtype(u64 start, u64 end);
 
-extern void pat_disable(char *reason);
+extern int kernel_map_sync_memtype(u64 base, unsigned long size,
+               unsigned long flag);
+extern void map_devmem(unsigned long pfn, unsigned long size,
+                      struct pgprot vma_prot);
+extern void unmap_devmem(unsigned long pfn, unsigned long size,
+                        struct pgprot vma_prot);
 
 #endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/pci-functions.h b/arch/x86/include/asm/pci-functions.h
new file mode 100644 (file)
index 0000000..ed0bab4
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ *     PCI BIOS function numbering for conventional PCI BIOS 
+ *     systems
+ */
+
+#define PCIBIOS_PCI_FUNCTION_ID        0xb1XX
+#define PCIBIOS_PCI_BIOS_PRESENT       0xb101
+#define PCIBIOS_FIND_PCI_DEVICE                0xb102
+#define PCIBIOS_FIND_PCI_CLASS_CODE    0xb103
+#define PCIBIOS_GENERATE_SPECIAL_CYCLE 0xb106
+#define PCIBIOS_READ_CONFIG_BYTE       0xb108
+#define PCIBIOS_READ_CONFIG_WORD       0xb109
+#define PCIBIOS_READ_CONFIG_DWORD      0xb10a
+#define PCIBIOS_WRITE_CONFIG_BYTE      0xb10b
+#define PCIBIOS_WRITE_CONFIG_WORD      0xb10c
+#define PCIBIOS_WRITE_CONFIG_DWORD     0xb10d
+#define PCIBIOS_GET_ROUTING_OPTIONS    0xb10e
+#define PCIBIOS_SET_PCI_HW_INT         0xb10f
+
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
deleted file mode 100644 (file)
index 2fbfff8..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-#ifndef _ASM_X86_PDA_H
-#define _ASM_X86_PDA_H
-
-#ifndef __ASSEMBLY__
-#include <linux/stddef.h>
-#include <linux/types.h>
-#include <linux/cache.h>
-#include <asm/page.h>
-
-/* Per processor datastructure. %gs points to it while the kernel runs */
-struct x8664_pda {
-       struct task_struct *pcurrent;   /* 0  Current process */
-       unsigned long data_offset;      /* 8 Per cpu data offset from linker
-                                          address */
-       unsigned long kernelstack;      /* 16 top of kernel stack for current */
-       unsigned long oldrsp;           /* 24 user rsp for system call */
-       int irqcount;                   /* 32 Irq nesting counter. Starts -1 */
-       unsigned int cpunumber;         /* 36 Logical CPU number */
-#ifdef CONFIG_CC_STACKPROTECTOR
-       unsigned long stack_canary;     /* 40 stack canary value */
-                                       /* gcc-ABI: this canary MUST be at
-                                          offset 40!!! */
-#endif
-       char *irqstackptr;
-       short nodenumber;               /* number of current node (32k max) */
-       short in_bootmem;               /* pda lives in bootmem */
-       unsigned int __softirq_pending;
-       unsigned int __nmi_count;       /* number of NMI on this CPUs */
-       short mmu_state;
-       short isidle;
-       struct mm_struct *active_mm;
-       unsigned apic_timer_irqs;
-       unsigned irq0_irqs;
-       unsigned irq_resched_count;
-       unsigned irq_call_count;
-       unsigned irq_tlb_count;
-       unsigned irq_thermal_count;
-       unsigned irq_threshold_count;
-       unsigned irq_spurious_count;
-} ____cacheline_aligned_in_smp;
-
-extern struct x8664_pda **_cpu_pda;
-extern void pda_init(int);
-
-#define cpu_pda(i) (_cpu_pda[i])
-
-/*
- * There is no fast way to get the base address of the PDA, all the accesses
- * have to mention %fs/%gs.  So it needs to be done this Torvaldian way.
- */
-extern void __bad_pda_field(void) __attribute__((noreturn));
-
-/*
- * proxy_pda doesn't actually exist, but tell gcc it is accessed for
- * all PDA accesses so it gets read/write dependencies right.
- */
-extern struct x8664_pda _proxy_pda;
-
-#define pda_offset(field) offsetof(struct x8664_pda, field)
-
-#define pda_to_op(op, field, val)                                      \
-do {                                                                   \
-       typedef typeof(_proxy_pda.field) T__;                           \
-       if (0) { T__ tmp__; tmp__ = (val); }    /* type checking */     \
-       switch (sizeof(_proxy_pda.field)) {                             \
-       case 2:                                                         \
-               asm(op "w %1,%%gs:%c2" :                                \
-                   "+m" (_proxy_pda.field) :                           \
-                   "ri" ((T__)val),                                    \
-                   "i"(pda_offset(field)));                            \
-               break;                                                  \
-       case 4:                                                         \
-               asm(op "l %1,%%gs:%c2" :                                \
-                   "+m" (_proxy_pda.field) :                           \
-                   "ri" ((T__)val),                                    \
-                   "i" (pda_offset(field)));                           \
-               break;                                                  \
-       case 8:                                                         \
-               asm(op "q %1,%%gs:%c2":                                 \
-                   "+m" (_proxy_pda.field) :                           \
-                   "ri" ((T__)val),                                    \
-                   "i"(pda_offset(field)));                            \
-               break;                                                  \
-       default:                                                        \
-               __bad_pda_field();                                      \
-       }                                                               \
-} while (0)
-
-#define pda_from_op(op, field)                 \
-({                                             \
-       typeof(_proxy_pda.field) ret__;         \
-       switch (sizeof(_proxy_pda.field)) {     \
-       case 2:                                 \
-               asm(op "w %%gs:%c1,%0" :        \
-                   "=r" (ret__) :              \
-                   "i" (pda_offset(field)),    \
-                   "m" (_proxy_pda.field));    \
-               break;                          \
-       case 4:                                 \
-               asm(op "l %%gs:%c1,%0":         \
-                   "=r" (ret__):               \
-                   "i" (pda_offset(field)),    \
-                   "m" (_proxy_pda.field));    \
-               break;                          \
-       case 8:                                 \
-               asm(op "q %%gs:%c1,%0":         \
-                   "=r" (ret__) :              \
-                   "i" (pda_offset(field)),    \
-                   "m" (_proxy_pda.field));    \
-               break;                          \
-       default:                                \
-               __bad_pda_field();              \
-       }                                       \
-       ret__;                                  \
-})
-
-#define read_pda(field)                pda_from_op("mov", field)
-#define write_pda(field, val)  pda_to_op("mov", field, val)
-#define add_pda(field, val)    pda_to_op("add", field, val)
-#define sub_pda(field, val)    pda_to_op("sub", field, val)
-#define or_pda(field, val)     pda_to_op("or", field, val)
-
-/* This is not atomic against other CPUs -- CPU preemption needs to be off */
-#define test_and_clear_bit_pda(bit, field)                             \
-({                                                                     \
-       int old__;                                                      \
-       asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0"                    \
-                    : "=r" (old__), "+m" (_proxy_pda.field)            \
-                    : "dIr" (bit), "i" (pda_offset(field)) : "memory");\
-       old__;                                                          \
-})
-
-#endif
-
-#define PDA_STACKOFFSET (5*8)
-
-#endif /* _ASM_X86_PDA_H */
index ece72053ba63b74cc79df41f88d5c9e49b77b69a..aee103b26d01778c987e7f828876b876473a3bfd 100644 (file)
@@ -2,53 +2,12 @@
 #define _ASM_X86_PERCPU_H
 
 #ifdef CONFIG_X86_64
-#include <linux/compiler.h>
-
-/* Same as asm-generic/percpu.h, except that we store the per cpu offset
-   in the PDA. Longer term the PDA and every per cpu variable
-   should be just put into a single section and referenced directly
-   from %gs */
-
-#ifdef CONFIG_SMP
-#include <asm/pda.h>
-
-#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
-#define __my_cpu_offset read_pda(data_offset)
-
-#define per_cpu_offset(x) (__per_cpu_offset(x))
-
+#define __percpu_seg           gs
+#define __percpu_mov_op                movq
+#else
+#define __percpu_seg           fs
+#define __percpu_mov_op                movl
 #endif
-#include <asm-generic/percpu.h>
-
-DECLARE_PER_CPU(struct x8664_pda, pda);
-
-/*
- * These are supposed to be implemented as a single instruction which
- * operates on the per-cpu data base segment.  x86-64 doesn't have
- * that yet, so this is a fairly inefficient workaround for the
- * meantime.  The single instruction is atomic with respect to
- * preemption and interrupts, so we need to explicitly disable
- * interrupts here to achieve the same effect.  However, because it
- * can be used from within interrupt-disable/enable, we can't actually
- * disable interrupts; disabling preemption is enough.
- */
-#define x86_read_percpu(var)                                           \
-       ({                                                              \
-               typeof(per_cpu_var(var)) __tmp;                         \
-               preempt_disable();                                      \
-               __tmp = __get_cpu_var(var);                             \
-               preempt_enable();                                       \
-               __tmp;                                                  \
-       })
-
-#define x86_write_percpu(var, val)                                     \
-       do {                                                            \
-               preempt_disable();                                      \
-               __get_cpu_var(var) = (val);                             \
-               preempt_enable();                                       \
-       } while(0)
-
-#else /* CONFIG_X86_64 */
 
 #ifdef __ASSEMBLY__
 
@@ -65,47 +24,48 @@ DECLARE_PER_CPU(struct x8664_pda, pda);
  *    PER_CPU(cpu_gdt_descr, %ebx)
  */
 #ifdef CONFIG_SMP
-#define PER_CPU(var, reg)                              \
-       movl %fs:per_cpu__##this_cpu_off, reg;          \
+#define PER_CPU(var, reg)                                              \
+       __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg;       \
        lea per_cpu__##var(reg), reg
-#define PER_CPU_VAR(var)       %fs:per_cpu__##var
+#define PER_CPU_VAR(var)       %__percpu_seg:per_cpu__##var
 #else /* ! SMP */
-#define PER_CPU(var, reg)                      \
-       movl $per_cpu__##var, reg
+#define PER_CPU(var, reg)                                              \
+       __percpu_mov_op $per_cpu__##var, reg
 #define PER_CPU_VAR(var)       per_cpu__##var
 #endif /* SMP */
 
+#ifdef CONFIG_X86_64_SMP
+#define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
+#else
+#define INIT_PER_CPU_VAR(var)  per_cpu__##var
+#endif
+
 #else /* ...!ASSEMBLY */
 
+#include <linux/stringify.h>
+
+#ifdef CONFIG_SMP
+#define __percpu_arg(x)                "%%"__stringify(__percpu_seg)":%P" #x
+#define __my_cpu_offset                percpu_read(this_cpu_off)
+#else
+#define __percpu_arg(x)                "%" #x
+#endif
+
 /*
- * PER_CPU finds an address of a per-cpu variable.
+ * Initialized pointers to per-cpu variables needed for the boot
+ * processor need to use these macros to get the proper address
+ * offset from __per_cpu_load on SMP.
  *
- * Args:
- *    var - variable name
- *    cpu - 32bit register containing the current CPU number
- *
- * The resulting address is stored in the "cpu" argument.
- *
- * Example:
- *    PER_CPU(cpu_gdt_descr, %ebx)
+ * There also must be an entry in vmlinux_64.lds.S
  */
-#ifdef CONFIG_SMP
-
-#define __my_cpu_offset x86_read_percpu(this_cpu_off)
-
-/* fs segment starts at (positive) offset == __per_cpu_offset[cpu] */
-#define __percpu_seg "%%fs:"
-
-#else  /* !SMP */
-
-#define __percpu_seg ""
-
-#endif /* SMP */
-
-#include <asm-generic/percpu.h>
+#define DECLARE_INIT_PER_CPU(var) \
+       extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
 
-/* We can use this directly for local CPU (faster). */
-DECLARE_PER_CPU(unsigned long, this_cpu_off);
+#ifdef CONFIG_X86_64_SMP
+#define init_per_cpu_var(var)  init_per_cpu__##var
+#else
+#define init_per_cpu_var(var)  per_cpu_var(var)
+#endif
 
 /* For arch-specific code, we can use direct single-insn ops (they
  * don't give an lvalue though). */
@@ -120,20 +80,25 @@ do {                                                       \
        }                                               \
        switch (sizeof(var)) {                          \
        case 1:                                         \
-               asm(op "b %1,"__percpu_seg"%0"          \
+               asm(op "b %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
                    : "ri" ((T__)val));                 \
                break;                                  \
        case 2:                                         \
-               asm(op "w %1,"__percpu_seg"%0"          \
+               asm(op "w %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
                    : "ri" ((T__)val));                 \
                break;                                  \
        case 4:                                         \
-               asm(op "l %1,"__percpu_seg"%0"          \
+               asm(op "l %1,"__percpu_arg(0)           \
                    : "+m" (var)                        \
                    : "ri" ((T__)val));                 \
                break;                                  \
+       case 8:                                         \
+               asm(op "q %1,"__percpu_arg(0)           \
+                   : "+m" (var)                        \
+                   : "re" ((T__)val));                 \
+               break;                                  \
        default: __bad_percpu_size();                   \
        }                                               \
 } while (0)
@@ -143,17 +108,22 @@ do {                                                      \
        typeof(var) ret__;                              \
        switch (sizeof(var)) {                          \
        case 1:                                         \
-               asm(op "b "__percpu_seg"%1,%0"          \
+               asm(op "b "__percpu_arg(1)",%0"         \
                    : "=r" (ret__)                      \
                    : "m" (var));                       \
                break;                                  \
        case 2:                                         \
-               asm(op "w "__percpu_seg"%1,%0"          \
+               asm(op "w "__percpu_arg(1)",%0"         \
                    : "=r" (ret__)                      \
                    : "m" (var));                       \
                break;                                  \
        case 4:                                         \
-               asm(op "l "__percpu_seg"%1,%0"          \
+               asm(op "l "__percpu_arg(1)",%0"         \
+                   : "=r" (ret__)                      \
+                   : "m" (var));                       \
+               break;                                  \
+       case 8:                                         \
+               asm(op "q "__percpu_arg(1)",%0"         \
                    : "=r" (ret__)                      \
                    : "m" (var));                       \
                break;                                  \
@@ -162,13 +132,30 @@ do {                                                      \
        ret__;                                          \
 })
 
-#define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
-#define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
-#define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
-#define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
-#define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
+#define percpu_read(var)       percpu_from_op("mov", per_cpu__##var)
+#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
+#define percpu_add(var, val)   percpu_to_op("add", per_cpu__##var, val)
+#define percpu_sub(var, val)   percpu_to_op("sub", per_cpu__##var, val)
+#define percpu_and(var, val)   percpu_to_op("and", per_cpu__##var, val)
+#define percpu_or(var, val)    percpu_to_op("or", per_cpu__##var, val)
+#define percpu_xor(var, val)   percpu_to_op("xor", per_cpu__##var, val)
+
+/* This is not atomic against other CPUs -- CPU preemption needs to be off */
+#define x86_test_and_clear_bit_percpu(bit, var)                                \
+({                                                                     \
+       int old__;                                                      \
+       asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0"           \
+                    : "=r" (old__), "+m" (per_cpu__##var)              \
+                    : "dIr" (bit));                                    \
+       old__;                                                          \
+})
+
+#include <asm-generic/percpu.h>
+
+/* We can use this directly for local CPU (faster). */
+DECLARE_PER_CPU(unsigned long, this_cpu_off);
+
 #endif /* !__ASSEMBLY__ */
-#endif /* !CONFIG_X86_64 */
 
 #ifdef CONFIG_SMP
 
@@ -195,9 +182,9 @@ do {                                                        \
 #define        early_per_cpu_ptr(_name) (_name##_early_ptr)
 #define        early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
 #define        early_per_cpu(_name, _cpu)                              \
-       (early_per_cpu_ptr(_name) ?                             \
-               early_per_cpu_ptr(_name)[_cpu] :                \
-               per_cpu(_name, _cpu))
+       *(early_per_cpu_ptr(_name) ?                            \
+               &early_per_cpu_ptr(_name)[_cpu] :               \
+               &per_cpu(_name, _cpu))
 
 #else  /* !CONFIG_SMP */
 #define        DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)          \
diff --git a/arch/x86/include/asm/pgtable-2level-defs.h b/arch/x86/include/asm/pgtable-2level-defs.h
deleted file mode 100644 (file)
index d77db89..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
-#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
-
-#define SHARED_KERNEL_PMD      0
-
-/*
- * traditional i386 two-level paging structure:
- */
-
-#define PGDIR_SHIFT    22
-#define PTRS_PER_PGD   1024
-
-/*
- * the i386 is two-level, so we don't really have any
- * PMD directory physically.
- */
-
-#define PTRS_PER_PTE   1024
-
-#endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */
index e0d199fe1d83896578421d79aac509ebd843fdf3..c1774ac9da7a90f30dd47da6ac99e7b9b98494fd 100644 (file)
@@ -53,8 +53,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
-#define pte_none(x)            (!(x).pte_low)
-
 /*
  * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
  * split up the 29 bits of offset into this range:
diff --git a/arch/x86/include/asm/pgtable-2level_types.h b/arch/x86/include/asm/pgtable-2level_types.h
new file mode 100644 (file)
index 0000000..daacc23
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef _ASM_X86_PGTABLE_2LEVEL_DEFS_H
+#define _ASM_X86_PGTABLE_2LEVEL_DEFS_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+typedef unsigned long  pteval_t;
+typedef unsigned long  pmdval_t;
+typedef unsigned long  pudval_t;
+typedef unsigned long  pgdval_t;
+typedef unsigned long  pgprotval_t;
+
+typedef union {
+       pteval_t pte;
+       pteval_t pte_low;
+} pte_t;
+#endif /* !__ASSEMBLY__ */
+
+#define SHARED_KERNEL_PMD      0
+#define PAGETABLE_LEVELS       2
+
+/*
+ * traditional i386 two-level paging structure:
+ */
+
+#define PGDIR_SHIFT    22
+#define PTRS_PER_PGD   1024
+
+
+/*
+ * the i386 is two-level, so we don't really have any
+ * PMD directory physically.
+ */
+
+#define PTRS_PER_PTE   1024
+
+#endif /* _ASM_X86_PGTABLE_2LEVEL_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable-3level-defs.h b/arch/x86/include/asm/pgtable-3level-defs.h
deleted file mode 100644 (file)
index 6256136..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H
-#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H
-
-#ifdef CONFIG_PARAVIRT
-#define SHARED_KERNEL_PMD      (pv_info.shared_kernel_pmd)
-#else
-#define SHARED_KERNEL_PMD      1
-#endif
-
-/*
- * PGDIR_SHIFT determines what a top-level page table entry can map
- */
-#define PGDIR_SHIFT    30
-#define PTRS_PER_PGD   4
-
-/*
- * PMD_SHIFT determines the size of the area a middle-level
- * page table can map
- */
-#define PMD_SHIFT      21
-#define PTRS_PER_PMD   512
-
-/*
- * entries per page directory level
- */
-#define PTRS_PER_PTE   512
-
-#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
index 447da43cddb360a6f92a2ce98896599359546e6d..3f13cdf61156b4ed330ca60ea717b2044bc9faa6 100644 (file)
        printk("%s:%d: bad pgd %p(%016Lx).\n",                          \
               __FILE__, __LINE__, &(e), pgd_val(e))
 
-static inline int pud_none(pud_t pud)
-{
-       return pud_val(pud) == 0;
-}
-
-static inline int pud_bad(pud_t pud)
-{
-       return (pud_val(pud) & ~(PTE_PFN_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0;
-}
-
-static inline int pud_present(pud_t pud)
-{
-       return pud_val(pud) & _PAGE_PRESENT;
-}
-
 /* Rules for using set_pte: the pte being assigned *must* be
  * either not present or in a state where the hardware will
  * not attempt to update the pte.  In places where this is
@@ -120,15 +105,6 @@ static inline void pud_clear(pud_t *pudp)
                write_cr3(pgd);
 }
 
-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
-
-#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PTE_PFN_MASK))
-
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(pud, address) ((pmd_t *)pud_page_vaddr(*(pud)) +    \
-                                 pmd_index(address))
-
 #ifdef CONFIG_SMP
 static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
 {
@@ -145,17 +121,6 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
 #define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
 #endif
 
-#define __HAVE_ARCH_PTE_SAME
-static inline int pte_same(pte_t a, pte_t b)
-{
-       return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
-}
-
-static inline int pte_none(pte_t pte)
-{
-       return !pte.pte_low && !pte.pte_high;
-}
-
 /*
  * Bits 0, 6 and 7 are taken in the low part of the pte,
  * put the 32 bits of offset into the high part.
diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h
new file mode 100644 (file)
index 0000000..1bd5876
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef _ASM_X86_PGTABLE_3LEVEL_DEFS_H
+#define _ASM_X86_PGTABLE_3LEVEL_DEFS_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+typedef u64    pteval_t;
+typedef u64    pmdval_t;
+typedef u64    pudval_t;
+typedef u64    pgdval_t;
+typedef u64    pgprotval_t;
+
+typedef union {
+       struct {
+               unsigned long pte_low, pte_high;
+       };
+       pteval_t pte;
+} pte_t;
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_PARAVIRT
+#define SHARED_KERNEL_PMD      (pv_info.shared_kernel_pmd)
+#else
+#define SHARED_KERNEL_PMD      1
+#endif
+
+#define PAGETABLE_LEVELS       3
+
+/*
+ * PGDIR_SHIFT determines what a top-level page table entry can map
+ */
+#define PGDIR_SHIFT    30
+#define PTRS_PER_PGD   4
+
+/*
+ * PMD_SHIFT determines the size of the area a middle-level
+ * page table can map
+ */
+#define PMD_SHIFT      21
+#define PTRS_PER_PMD   512
+
+/*
+ * entries per page directory level
+ */
+#define PTRS_PER_PTE   512
+
+
+#endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */
index 4f5af8447d549eee4a088ba77f1d1f3991c0b2d5..d0812e155f1d60da04c614bee8b97e2e33869876 100644 (file)
@@ -1,164 +1,9 @@
 #ifndef _ASM_X86_PGTABLE_H
 #define _ASM_X86_PGTABLE_H
 
-#define FIRST_USER_ADDRESS     0
-
-#define _PAGE_BIT_PRESENT      0       /* is present */
-#define _PAGE_BIT_RW           1       /* writeable */
-#define _PAGE_BIT_USER         2       /* userspace addressable */
-#define _PAGE_BIT_PWT          3       /* page write through */
-#define _PAGE_BIT_PCD          4       /* page cache disabled */
-#define _PAGE_BIT_ACCESSED     5       /* was accessed (raised by CPU) */
-#define _PAGE_BIT_DIRTY                6       /* was written to (raised by CPU) */
-#define _PAGE_BIT_PSE          7       /* 4 MB (or 2MB) page */
-#define _PAGE_BIT_PAT          7       /* on 4KB pages */
-#define _PAGE_BIT_GLOBAL       8       /* Global TLB entry PPro+ */
-#define _PAGE_BIT_UNUSED1      9       /* available for programmer */
-#define _PAGE_BIT_IOMAP                10      /* flag used to indicate IO mapping */
-#define _PAGE_BIT_UNUSED3      11
-#define _PAGE_BIT_PAT_LARGE    12      /* On 2MB or 1GB pages */
-#define _PAGE_BIT_SPECIAL      _PAGE_BIT_UNUSED1
-#define _PAGE_BIT_CPA_TEST     _PAGE_BIT_UNUSED1
-#define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
-
-/* If _PAGE_BIT_PRESENT is clear, we use these: */
-/* - if the user mapped it with PROT_NONE; pte_present gives true */
-#define _PAGE_BIT_PROTNONE     _PAGE_BIT_GLOBAL
-/* - set: nonlinear file mapping, saved PTE; unset:swap */
-#define _PAGE_BIT_FILE         _PAGE_BIT_DIRTY
-
-#define _PAGE_PRESENT  (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
-#define _PAGE_RW       (_AT(pteval_t, 1) << _PAGE_BIT_RW)
-#define _PAGE_USER     (_AT(pteval_t, 1) << _PAGE_BIT_USER)
-#define _PAGE_PWT      (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
-#define _PAGE_PCD      (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
-#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
-#define _PAGE_DIRTY    (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
-#define _PAGE_PSE      (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
-#define _PAGE_GLOBAL   (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
-#define _PAGE_UNUSED1  (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
-#define _PAGE_IOMAP    (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
-#define _PAGE_UNUSED3  (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
-#define _PAGE_PAT      (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
-#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
-#define _PAGE_SPECIAL  (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
-#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
-#define __HAVE_ARCH_PTE_SPECIAL
-
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
-#define _PAGE_NX       (_AT(pteval_t, 1) << _PAGE_BIT_NX)
-#else
-#define _PAGE_NX       (_AT(pteval_t, 0))
-#endif
+#include <asm/page.h>
 
-#define _PAGE_FILE     (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
-#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
-
-#define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |        \
-                        _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |    \
-                        _PAGE_DIRTY)
-
-/* Set of bits not changed in pte_modify */
-#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |         \
-                        _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
-
-#define _PAGE_CACHE_MASK       (_PAGE_PCD | _PAGE_PWT)
-#define _PAGE_CACHE_WB         (0)
-#define _PAGE_CACHE_WC         (_PAGE_PWT)
-#define _PAGE_CACHE_UC_MINUS   (_PAGE_PCD)
-#define _PAGE_CACHE_UC         (_PAGE_PCD | _PAGE_PWT)
-
-#define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
-#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
-                                _PAGE_ACCESSED | _PAGE_NX)
-
-#define PAGE_SHARED_EXEC       __pgprot(_PAGE_PRESENT | _PAGE_RW |     \
-                                        _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_COPY_NOEXEC       __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
-                                        _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_COPY_EXEC         __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
-                                        _PAGE_ACCESSED)
-#define PAGE_COPY              PAGE_COPY_NOEXEC
-#define PAGE_READONLY          __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
-                                        _PAGE_ACCESSED | _PAGE_NX)
-#define PAGE_READONLY_EXEC     __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
-                                        _PAGE_ACCESSED)
-
-#define __PAGE_KERNEL_EXEC                                             \
-       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
-#define __PAGE_KERNEL          (__PAGE_KERNEL_EXEC | _PAGE_NX)
-
-#define __PAGE_KERNEL_RO               (__PAGE_KERNEL & ~_PAGE_RW)
-#define __PAGE_KERNEL_RX               (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
-#define __PAGE_KERNEL_EXEC_NOCACHE     (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
-#define __PAGE_KERNEL_WC               (__PAGE_KERNEL | _PAGE_CACHE_WC)
-#define __PAGE_KERNEL_NOCACHE          (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
-#define __PAGE_KERNEL_UC_MINUS         (__PAGE_KERNEL | _PAGE_PCD)
-#define __PAGE_KERNEL_VSYSCALL         (__PAGE_KERNEL_RX | _PAGE_USER)
-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
-#define __PAGE_KERNEL_LARGE            (__PAGE_KERNEL | _PAGE_PSE)
-#define __PAGE_KERNEL_LARGE_NOCACHE    (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
-#define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
-
-#define __PAGE_KERNEL_IO               (__PAGE_KERNEL | _PAGE_IOMAP)
-#define __PAGE_KERNEL_IO_NOCACHE       (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
-#define __PAGE_KERNEL_IO_UC_MINUS      (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
-#define __PAGE_KERNEL_IO_WC            (__PAGE_KERNEL_WC | _PAGE_IOMAP)
-
-#define PAGE_KERNEL                    __pgprot(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO                 __pgprot(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_EXEC               __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RX                 __pgprot(__PAGE_KERNEL_RX)
-#define PAGE_KERNEL_WC                 __pgprot(__PAGE_KERNEL_WC)
-#define PAGE_KERNEL_NOCACHE            __pgprot(__PAGE_KERNEL_NOCACHE)
-#define PAGE_KERNEL_UC_MINUS           __pgprot(__PAGE_KERNEL_UC_MINUS)
-#define PAGE_KERNEL_EXEC_NOCACHE       __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
-#define PAGE_KERNEL_LARGE              __pgprot(__PAGE_KERNEL_LARGE)
-#define PAGE_KERNEL_LARGE_NOCACHE      __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
-#define PAGE_KERNEL_LARGE_EXEC         __pgprot(__PAGE_KERNEL_LARGE_EXEC)
-#define PAGE_KERNEL_VSYSCALL           __pgprot(__PAGE_KERNEL_VSYSCALL)
-#define PAGE_KERNEL_VSYSCALL_NOCACHE   __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
-
-#define PAGE_KERNEL_IO                 __pgprot(__PAGE_KERNEL_IO)
-#define PAGE_KERNEL_IO_NOCACHE         __pgprot(__PAGE_KERNEL_IO_NOCACHE)
-#define PAGE_KERNEL_IO_UC_MINUS                __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
-#define PAGE_KERNEL_IO_WC              __pgprot(__PAGE_KERNEL_IO_WC)
-
-/*         xwr */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_EXEC
-#define __P101 PAGE_READONLY_EXEC
-#define __P110 PAGE_COPY_EXEC
-#define __P111 PAGE_COPY_EXEC
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_EXEC
-#define __S101 PAGE_READONLY_EXEC
-#define __S110 PAGE_SHARED_EXEC
-#define __S111 PAGE_SHARED_EXEC
-
-/*
- * early identity mapping  pte attrib macros.
- */
-#ifdef CONFIG_X86_64
-#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
-#else
-/*
- * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
- * bits are combined, this will alow user to access the high address mapped
- * VDSO in the presence of CONFIG_COMPAT_VDSO
- */
-#define PTE_IDENT_ATTR  0x003          /* PRESENT+RW */
-#define PDE_IDENT_ATTR  0x067          /* PRESENT+RW+USER+DIRTY+ACCESSED */
-#define PGD_IDENT_ATTR  0x001          /* PRESENT (no other attributes) */
-#endif
+#include <asm/pgtable_types.h>
 
 /*
  * Macro to mark a page protection value as UC-
 
 #ifndef __ASSEMBLY__
 
-#define pgprot_writecombine    pgprot_writecombine
-extern pgprot_t pgprot_writecombine(pgprot_t prot);
-
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
@@ -183,6 +25,66 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 extern spinlock_t pgd_lock;
 extern struct list_head pgd_list;
 
+#ifdef CONFIG_PARAVIRT
+#include <asm/paravirt.h>
+#else  /* !CONFIG_PARAVIRT */
+#define set_pte(ptep, pte)             native_set_pte(ptep, pte)
+#define set_pte_at(mm, addr, ptep, pte)        native_set_pte_at(mm, addr, ptep, pte)
+
+#define set_pte_present(mm, addr, ptep, pte)                           \
+       native_set_pte_present(mm, addr, ptep, pte)
+#define set_pte_atomic(ptep, pte)                                      \
+       native_set_pte_atomic(ptep, pte)
+
+#define set_pmd(pmdp, pmd)             native_set_pmd(pmdp, pmd)
+
+#ifndef __PAGETABLE_PUD_FOLDED
+#define set_pgd(pgdp, pgd)             native_set_pgd(pgdp, pgd)
+#define pgd_clear(pgd)                 native_pgd_clear(pgd)
+#endif
+
+#ifndef set_pud
+# define set_pud(pudp, pud)            native_set_pud(pudp, pud)
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pud_clear(pud)                 native_pud_clear(pud)
+#endif
+
+#define pte_clear(mm, addr, ptep)      native_pte_clear(mm, addr, ptep)
+#define pmd_clear(pmd)                 native_pmd_clear(pmd)
+
+#define pte_update(mm, addr, ptep)              do { } while (0)
+#define pte_update_defer(mm, addr, ptep)        do { } while (0)
+
+static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
+{
+       native_pagetable_setup_start(base);
+}
+
+static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
+{
+       native_pagetable_setup_done(base);
+}
+
+#define pgd_val(x)     native_pgd_val(x)
+#define __pgd(x)       native_make_pgd(x)
+
+#ifndef __PAGETABLE_PUD_FOLDED
+#define pud_val(x)     native_pud_val(x)
+#define __pud(x)       native_make_pud(x)
+#endif
+
+#ifndef __PAGETABLE_PMD_FOLDED
+#define pmd_val(x)     native_pmd_val(x)
+#define __pmd(x)       native_make_pmd(x)
+#endif
+
+#define pte_val(x)     native_pte_val(x)
+#define __pte(x)       native_make_pte(x)
+
+#endif /* CONFIG_PARAVIRT */
+
 /*
  * The following only work if pte_present() is true.
  * Undefined behaviour if not..
@@ -236,72 +138,84 @@ static inline unsigned long pte_pfn(pte_t pte)
 
 static inline int pmd_large(pmd_t pte)
 {
-       return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+       return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
                (_PAGE_PSE | _PAGE_PRESENT);
 }
 
+static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
+{
+       pteval_t v = native_pte_val(pte);
+
+       return native_make_pte(v | set);
+}
+
+static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
+{
+       pteval_t v = native_pte_val(pte);
+
+       return native_make_pte(v & ~clear);
+}
+
 static inline pte_t pte_mkclean(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+       return pte_clear_flags(pte, _PAGE_DIRTY);
 }
 
 static inline pte_t pte_mkold(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+       return pte_clear_flags(pte, _PAGE_ACCESSED);
 }
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_RW);
+       return pte_clear_flags(pte, _PAGE_RW);
 }
 
 static inline pte_t pte_mkexec(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_NX);
+       return pte_clear_flags(pte, _PAGE_NX);
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_DIRTY);
+       return pte_set_flags(pte, _PAGE_DIRTY);
 }
 
 static inline pte_t pte_mkyoung(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_ACCESSED);
+       return pte_set_flags(pte, _PAGE_ACCESSED);
 }
 
 static inline pte_t pte_mkwrite(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_RW);
+       return pte_set_flags(pte, _PAGE_RW);
 }
 
 static inline pte_t pte_mkhuge(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_PSE);
+       return pte_set_flags(pte, _PAGE_PSE);
 }
 
 static inline pte_t pte_clrhuge(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_PSE);
+       return pte_clear_flags(pte, _PAGE_PSE);
 }
 
 static inline pte_t pte_mkglobal(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_GLOBAL);
+       return pte_set_flags(pte, _PAGE_GLOBAL);
 }
 
 static inline pte_t pte_clrglobal(pte_t pte)
 {
-       return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
+       return pte_clear_flags(pte, _PAGE_GLOBAL);
 }
 
 static inline pte_t pte_mkspecial(pte_t pte)
 {
-       return __pte(pte_val(pte) | _PAGE_SPECIAL);
+       return pte_set_flags(pte, _PAGE_SPECIAL);
 }
 
-extern pteval_t __supported_pte_mask;
-
 /*
  * Mask out unsupported bits in a present pgprot.  Non-present pgprots
  * can use those bits for other purposes, so leave them be.
@@ -374,82 +288,197 @@ static inline int is_new_memtype_allowed(unsigned long flags,
        return 1;
 }
 
-#ifndef __ASSEMBLY__
-/* Indicate that x86 has its own track and untrack pfn vma functions */
-#define __HAVE_PFNMAP_TRACKING
-
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-struct file;
-pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
-                              unsigned long size, pgprot_t vma_prot);
-int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
-                              unsigned long size, pgprot_t *vma_prot);
-#endif
-
-/* Install a pte for a particular vaddr in kernel space. */
-void set_pte_vaddr(unsigned long vaddr, pte_t pte);
+pmd_t *populate_extra_pmd(unsigned long vaddr);
+pte_t *populate_extra_pte(unsigned long vaddr);
+#endif /* __ASSEMBLY__ */
 
 #ifdef CONFIG_X86_32
-extern void native_pagetable_setup_start(pgd_t *base);
-extern void native_pagetable_setup_done(pgd_t *base);
+# include "pgtable_32.h"
 #else
-static inline void native_pagetable_setup_start(pgd_t *base) {}
-static inline void native_pagetable_setup_done(pgd_t *base) {}
+# include "pgtable_64.h"
 #endif
 
-struct seq_file;
-extern void arch_report_meminfo(struct seq_file *m);
+#ifndef __ASSEMBLY__
+#include <linux/mm_types.h>
 
-#ifdef CONFIG_PARAVIRT
-#include <asm/paravirt.h>
-#else  /* !CONFIG_PARAVIRT */
-#define set_pte(ptep, pte)             native_set_pte(ptep, pte)
-#define set_pte_at(mm, addr, ptep, pte)        native_set_pte_at(mm, addr, ptep, pte)
+static inline int pte_none(pte_t pte)
+{
+       return !pte.pte;
+}
 
-#define set_pte_present(mm, addr, ptep, pte)                           \
-       native_set_pte_present(mm, addr, ptep, pte)
-#define set_pte_atomic(ptep, pte)                                      \
-       native_set_pte_atomic(ptep, pte)
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t a, pte_t b)
+{
+       return a.pte == b.pte;
+}
 
-#define set_pmd(pmdp, pmd)             native_set_pmd(pmdp, pmd)
+static inline int pte_present(pte_t a)
+{
+       return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
+}
 
-#ifndef __PAGETABLE_PUD_FOLDED
-#define set_pgd(pgdp, pgd)             native_set_pgd(pgdp, pgd)
-#define pgd_clear(pgd)                 native_pgd_clear(pgd)
-#endif
+static inline int pmd_present(pmd_t pmd)
+{
+       return pmd_flags(pmd) & _PAGE_PRESENT;
+}
 
-#ifndef set_pud
-# define set_pud(pudp, pud)            native_set_pud(pudp, pud)
-#endif
+static inline int pmd_none(pmd_t pmd)
+{
+       /* Only check low word on 32-bit platforms, since it might be
+          out of sync with upper half. */
+       return (unsigned long)native_pmd_val(pmd) == 0;
+}
 
-#ifndef __PAGETABLE_PMD_FOLDED
-#define pud_clear(pud)                 native_pud_clear(pud)
-#endif
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+       return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
+}
 
-#define pte_clear(mm, addr, ptep)      native_pte_clear(mm, addr, ptep)
-#define pmd_clear(pmd)                 native_pmd_clear(pmd)
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pmd_page(pmd)  pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
 
-#define pte_update(mm, addr, ptep)              do { } while (0)
-#define pte_update_defer(mm, addr, ptep)        do { } while (0)
+/*
+ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
+ *
+ * this macro returns the index of the entry in the pmd page which would
+ * control the given virtual address
+ */
+static inline unsigned pmd_index(unsigned long address)
+{
+       return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
+}
 
-static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * (Currently stuck as a macro because of indirect forward reference
+ * to linux/mm.h:page_to_nid())
+ */
+#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this function returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+static inline unsigned pte_index(unsigned long address)
 {
-       native_pagetable_setup_start(base);
+       return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 }
 
-static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
+static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
 {
-       native_pagetable_setup_done(base);
+       return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
 }
-#endif /* CONFIG_PARAVIRT */
 
-#endif /* __ASSEMBLY__ */
+static inline int pmd_bad(pmd_t pmd)
+{
+       return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
+}
 
-#ifdef CONFIG_X86_32
-# include "pgtable_32.h"
+static inline unsigned long pages_to_mb(unsigned long npg)
+{
+       return npg >> (20 - PAGE_SHIFT);
+}
+
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)        \
+       remap_pfn_range(vma, vaddr, pfn, size, prot)
+
+#if PAGETABLE_LEVELS > 2
+static inline int pud_none(pud_t pud)
+{
+       return native_pud_val(pud) == 0;
+}
+
+static inline int pud_present(pud_t pud)
+{
+       return pud_flags(pud) & _PAGE_PRESENT;
+}
+
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+       return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
+}
+
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pud_page(pud)          pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
+
+/* Find an entry in the second-level page table.. */
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+       return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+       return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
+static inline int pud_large(pud_t pud)
+{
+       return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
+               (_PAGE_PSE | _PAGE_PRESENT);
+}
+
+static inline int pud_bad(pud_t pud)
+{
+       return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
+}
 #else
-# include "pgtable_64.h"
-#endif
+static inline int pud_large(pud_t pud)
+{
+       return 0;
+}
+#endif /* PAGETABLE_LEVELS > 2 */
+
+#if PAGETABLE_LEVELS > 3
+static inline int pgd_present(pgd_t pgd)
+{
+       return pgd_flags(pgd) & _PAGE_PRESENT;
+}
+
+static inline unsigned long pgd_page_vaddr(pgd_t pgd)
+{
+       return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
+}
+
+/*
+ * Currently stuck as a macro due to indirect forward reference to
+ * linux/mmzone.h's __section_mem_map_addr() definition:
+ */
+#define pgd_page(pgd)          pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
+
+/* to find an entry in a page-table-directory. */
+static inline unsigned pud_index(unsigned long address)
+{
+       return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
+}
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
+{
+       return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+       return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
+}
+
+static inline int pgd_none(pgd_t pgd)
+{
+       return !native_pgd_val(pgd);
+}
+#endif /* PAGETABLE_LEVELS > 3 */
+
+#endif /* __ASSEMBLY__ */
 
 /*
  * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
@@ -476,28 +505,6 @@ static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
 
 #ifndef __ASSEMBLY__
 
-enum {
-       PG_LEVEL_NONE,
-       PG_LEVEL_4K,
-       PG_LEVEL_2M,
-       PG_LEVEL_1G,
-       PG_LEVEL_NUM
-};
-
-#ifdef CONFIG_PROC_FS
-extern void update_page_count(int level, unsigned long pages);
-#else
-static inline void update_page_count(int level, unsigned long pages) { }
-#endif
-
-/*
- * Helper function that returns the kernel pagetable entry controlling
- * the virtual address 'address'. NULL means no pagetable entry present.
- * NOTE: the return type is pte_t but if the pmd is PSE then we return it
- * as a pte too.
- */
-extern pte_t *lookup_address(unsigned long address, unsigned int *level);
-
 /* local pte updates need not use xchg for locking */
 static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
 {
index 72b020deb46b9352a5365410b2011e55b4c56021..97612fc7632f0ef95ebb3afecd3886bfe1b176ae 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_PGTABLE_32_H
 #define _ASM_X86_PGTABLE_32_H
 
+#include <asm/pgtable_32_types.h>
 
 /*
  * The Linux memory management assumes a three-level page table setup. On
@@ -33,47 +34,6 @@ void paging_init(void);
 
 extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
 
-/*
- * The Linux x86 paging architecture is 'compile-time dual-mode', it
- * implements both the traditional 2-level x86 page tables and the
- * newer 3-level PAE-mode page tables.
- */
-#ifdef CONFIG_X86_PAE
-# include <asm/pgtable-3level-defs.h>
-# define PMD_SIZE      (1UL << PMD_SHIFT)
-# define PMD_MASK      (~(PMD_SIZE - 1))
-#else
-# include <asm/pgtable-2level-defs.h>
-#endif
-
-#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK     (~(PGDIR_SIZE - 1))
-
-/* Just any arbitrary offset to the start of the vmalloc VM area: the
- * current 8MB value just means that there will be a 8MB "hole" after the
- * physical memory until the kernel virtual memory starts.  That means that
- * any out-of-bounds memory accesses will hopefully be caught.
- * The vmalloc() routines leaves a hole of 4kB between each vmalloced
- * area for the same reason. ;)
- */
-#define VMALLOC_OFFSET (8 * 1024 * 1024)
-#define VMALLOC_START  ((unsigned long)high_memory + VMALLOC_OFFSET)
-#ifdef CONFIG_X86_PAE
-#define LAST_PKMAP 512
-#else
-#define LAST_PKMAP 1024
-#endif
-
-#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1))        \
-                   & PMD_MASK)
-
-#ifdef CONFIG_HIGHMEM
-# define VMALLOC_END   (PKMAP_BASE - 2 * PAGE_SIZE)
-#else
-# define VMALLOC_END   (FIXADDR_START - 2 * PAGE_SIZE)
-#endif
-
-#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
 
 /*
  * Define this if things work differently on an i386 and an i486:
@@ -85,55 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
 /* The boot page tables (all created as a single array) */
 extern unsigned long pg0[];
 
-#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
-
-/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
-#define pmd_none(x)    (!(unsigned long)pmd_val((x)))
-#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
-#define pmd_bad(x) ((pmd_val(x) & (PTE_FLAGS_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
 #ifdef CONFIG_X86_PAE
 # include <asm/pgtable-3level.h>
 #else
 # include <asm/pgtable-2level.h>
 #endif
 
-/*
- * Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- */
-#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
-
-
-static inline int pud_large(pud_t pud) { return 0; }
-
-/*
- * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
- *
- * this macro returns the index of the entry in the pmd page which would
- * control the given virtual address
- */
-#define pmd_index(address)                             \
-       (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-
-/*
- * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
- *
- * this macro returns the index of the entry in the pte page which would
- * control the given virtual address
- */
-#define pte_index(address)                                     \
-       (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address)                                \
-       ((pte_t *)pmd_page_vaddr(*(dir)) +  pte_index((address)))
-
-#define pmd_page(pmd) (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
-
-#define pmd_page_vaddr(pmd)                                    \
-       ((unsigned long)__va(pmd_val((pmd)) & PTE_PFN_MASK))
-
 #if defined(CONFIG_HIGHPTE)
 #define pte_offset_map(dir, address)                                   \
        ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)), KM_PTE0) +          \
@@ -176,7 +93,4 @@ do {                                         \
 #define kern_addr_valid(kaddr) (0)
 #endif
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)        \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #endif /* _ASM_X86_PGTABLE_32_H */
diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
new file mode 100644 (file)
index 0000000..2733fad
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef _ASM_X86_PGTABLE_32_DEFS_H
+#define _ASM_X86_PGTABLE_32_DEFS_H
+
+/*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+#ifdef CONFIG_X86_PAE
+# include <asm/pgtable-3level_types.h>
+# define PMD_SIZE      (1UL << PMD_SHIFT)
+# define PMD_MASK      (~(PMD_SIZE - 1))
+#else
+# include <asm/pgtable-2level_types.h>
+#endif
+
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE - 1))
+
+/* Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 8MB value just means that there will be a 8MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ */
+#define VMALLOC_OFFSET (8 * 1024 * 1024)
+
+#ifndef __ASSEMBLER__
+extern bool __vmalloc_start_set; /* set once high_memory is set */
+#endif
+
+#define VMALLOC_START  ((unsigned long)high_memory + VMALLOC_OFFSET)
+#ifdef CONFIG_X86_PAE
+#define LAST_PKMAP 512
+#else
+#define LAST_PKMAP 1024
+#endif
+
+#define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE * (LAST_PKMAP + 1))        \
+                   & PMD_MASK)
+
+#ifdef CONFIG_HIGHMEM
+# define VMALLOC_END   (PKMAP_BASE - 2 * PAGE_SIZE)
+#else
+# define VMALLOC_END   (FIXADDR_START - 2 * PAGE_SIZE)
+#endif
+
+#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
+
+#endif /* _ASM_X86_PGTABLE_32_DEFS_H */
index ba09289accaa0322c70fb580a40f83677a900c9f..6b87bc6d50189263691c10819618bc97deb8c086 100644 (file)
@@ -2,6 +2,8 @@
 #define _ASM_X86_PGTABLE_64_H
 
 #include <linux/const.h>
+#include <asm/pgtable_64_types.h>
+
 #ifndef __ASSEMBLY__
 
 /*
@@ -11,7 +13,6 @@
 #include <asm/processor.h>
 #include <linux/bitops.h>
 #include <linux/threads.h>
-#include <asm/pda.h>
 
 extern pud_t level3_kernel_pgt[512];
 extern pud_t level3_ident_pgt[512];
@@ -26,32 +27,6 @@ extern void paging_init(void);
 
 #endif /* !__ASSEMBLY__ */
 
-#define SHARED_KERNEL_PMD      0
-
-/*
- * PGDIR_SHIFT determines what a top-level page table entry can map
- */
-#define PGDIR_SHIFT    39
-#define PTRS_PER_PGD   512
-
-/*
- * 3rd level page
- */
-#define PUD_SHIFT      30
-#define PTRS_PER_PUD   512
-
-/*
- * PMD_SHIFT determines the size of the area a middle-level
- * page table can map
- */
-#define PMD_SHIFT      21
-#define PTRS_PER_PMD   512
-
-/*
- * entries per page directory level
- */
-#define PTRS_PER_PTE   512
-
 #ifndef __ASSEMBLY__
 
 #define pte_ERROR(e)                                   \
@@ -67,9 +42,6 @@ extern void paging_init(void);
        printk("%s:%d: bad pgd %p(%016lx).\n",          \
               __FILE__, __LINE__, &(e), pgd_val(e))
 
-#define pgd_none(x)    (!pgd_val(x))
-#define pud_none(x)    (!pud_val(x))
-
 struct mm_struct;
 
 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte);
@@ -134,48 +106,6 @@ static inline void native_pgd_clear(pgd_t *pgd)
        native_set_pgd(pgd, native_make_pgd(0));
 }
 
-#define pte_same(a, b)         ((a).pte == (b).pte)
-
-#endif /* !__ASSEMBLY__ */
-
-#define PMD_SIZE       (_AC(1, UL) << PMD_SHIFT)
-#define PMD_MASK       (~(PMD_SIZE - 1))
-#define PUD_SIZE       (_AC(1, UL) << PUD_SHIFT)
-#define PUD_MASK       (~(PUD_SIZE - 1))
-#define PGDIR_SIZE     (_AC(1, UL) << PGDIR_SHIFT)
-#define PGDIR_MASK     (~(PGDIR_SIZE - 1))
-
-
-#define MAXMEM          _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
-#define VMALLOC_START    _AC(0xffffc20000000000, UL)
-#define VMALLOC_END      _AC(0xffffe1ffffffffff, UL)
-#define VMEMMAP_START   _AC(0xffffe20000000000, UL)
-#define MODULES_VADDR    _AC(0xffffffffa0000000, UL)
-#define MODULES_END      _AC(0xffffffffff000000, UL)
-#define MODULES_LEN   (MODULES_END - MODULES_VADDR)
-
-#ifndef __ASSEMBLY__
-
-static inline int pgd_bad(pgd_t pgd)
-{
-       return (pgd_val(pgd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
-}
-
-static inline int pud_bad(pud_t pud)
-{
-       return (pud_val(pud) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
-}
-
-static inline int pmd_bad(pmd_t pmd)
-{
-       return (pmd_val(pmd) & ~(PTE_PFN_MASK | _PAGE_USER)) != _KERNPG_TABLE;
-}
-
-#define pte_none(x)    (!pte_val((x)))
-#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
-
-#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))   /* FIXME: is this right? */
-
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
@@ -184,41 +114,12 @@ static inline int pmd_bad(pmd_t pmd)
 /*
  * Level 4 access.
  */
-#define pgd_page_vaddr(pgd)                                            \
-       ((unsigned long)__va((unsigned long)pgd_val((pgd)) & PTE_PFN_MASK))
-#define pgd_page(pgd)          (pfn_to_page(pgd_val((pgd)) >> PAGE_SHIFT))
-#define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT)
 static inline int pgd_large(pgd_t pgd) { return 0; }
 #define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
 
 /* PUD - Level3 access */
-/* to find an entry in a page-table-directory. */
-#define pud_page_vaddr(pud)                                            \
-       ((unsigned long)__va(pud_val((pud)) & PHYSICAL_PAGE_MASK))
-#define pud_page(pud)  (pfn_to_page(pud_val((pud)) >> PAGE_SHIFT))
-#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
-#define pud_offset(pgd, address)                                       \
-       ((pud_t *)pgd_page_vaddr(*(pgd)) + pud_index((address)))
-#define pud_present(pud) (pud_val((pud)) & _PAGE_PRESENT)
-
-static inline int pud_large(pud_t pte)
-{
-       return (pud_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
-               (_PAGE_PSE | _PAGE_PRESENT);
-}
 
 /* PMD  - Level 2 access */
-#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val((pmd)) & PTE_PFN_MASK))
-#define pmd_page(pmd)          (pfn_to_page(pmd_val((pmd)) >> PAGE_SHIFT))
-
-#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-#define pmd_offset(dir, address) ((pmd_t *)pud_page_vaddr(*(dir)) + \
-                                 pmd_index(address))
-#define pmd_none(x)    (!pmd_val((x)))
-#define pmd_present(x) (pmd_val((x)) & _PAGE_PRESENT)
-#define pfn_pmd(nr, prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val((prot))))
-#define pmd_pfn(x)  ((pmd_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
-
 #define pte_to_pgoff(pte) ((pte_val((pte)) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
 #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) |    \
                                            _PAGE_FILE })
@@ -226,13 +127,6 @@ static inline int pud_large(pud_t pte)
 
 /* PTE - Level 1 access. */
 
-/* page, protection -> pte */
-#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn((page)), (pgprot))
-
-#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \
-                                        pte_index((address)))
-
 /* x86-64 always has all page tables mapped. */
 #define pte_offset_map(dir, address) pte_offset_kernel((dir), (address))
 #define pte_offset_map_nested(dir, address) pte_offset_kernel((dir), (address))
@@ -266,9 +160,6 @@ extern int direct_gbpages;
 extern int kern_addr_valid(unsigned long addr);
 extern void cleanup_highmap(void);
 
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot)        \
-       remap_pfn_range(vma, vaddr, pfn, size, prot)
-
 #define HAVE_ARCH_UNMAPPED_AREA
 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
new file mode 100644 (file)
index 0000000..fbf42b8
--- /dev/null
@@ -0,0 +1,63 @@
+#ifndef _ASM_X86_PGTABLE_64_DEFS_H
+#define _ASM_X86_PGTABLE_64_DEFS_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef unsigned long  pteval_t;
+typedef unsigned long  pmdval_t;
+typedef unsigned long  pudval_t;
+typedef unsigned long  pgdval_t;
+typedef unsigned long  pgprotval_t;
+
+typedef struct { pteval_t pte; } pte_t;
+
+#endif /* !__ASSEMBLY__ */
+
+#define SHARED_KERNEL_PMD      0
+#define PAGETABLE_LEVELS       4
+
+/*
+ * PGDIR_SHIFT determines what a top-level page table entry can map
+ */
+#define PGDIR_SHIFT    39
+#define PTRS_PER_PGD   512
+
+/*
+ * 3rd level page
+ */
+#define PUD_SHIFT      30
+#define PTRS_PER_PUD   512
+
+/*
+ * PMD_SHIFT determines the size of the area a middle-level
+ * page table can map
+ */
+#define PMD_SHIFT      21
+#define PTRS_PER_PMD   512
+
+/*
+ * entries per page directory level
+ */
+#define PTRS_PER_PTE   512
+
+#define PMD_SIZE       (_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE - 1))
+#define PUD_SIZE       (_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK       (~(PUD_SIZE - 1))
+#define PGDIR_SIZE     (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE - 1))
+
+
+#define MAXMEM          _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
+#define VMALLOC_START    _AC(0xffffc20000000000, UL)
+#define VMALLOC_END      _AC(0xffffe1ffffffffff, UL)
+#define VMEMMAP_START   _AC(0xffffe20000000000, UL)
+#define MODULES_VADDR    _AC(0xffffffffa0000000, UL)
+#define MODULES_END      _AC(0xffffffffff000000, UL)
+#define MODULES_LEN   (MODULES_END - MODULES_VADDR)
+
+#endif /* _ASM_X86_PGTABLE_64_DEFS_H */
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
new file mode 100644 (file)
index 0000000..b8238dc
--- /dev/null
@@ -0,0 +1,329 @@
+#ifndef _ASM_X86_PGTABLE_DEFS_H
+#define _ASM_X86_PGTABLE_DEFS_H
+
+#include <linux/const.h>
+#include <asm/page_types.h>
+
+#define FIRST_USER_ADDRESS     0
+
+#define _PAGE_BIT_PRESENT      0       /* is present */
+#define _PAGE_BIT_RW           1       /* writeable */
+#define _PAGE_BIT_USER         2       /* userspace addressable */
+#define _PAGE_BIT_PWT          3       /* page write through */
+#define _PAGE_BIT_PCD          4       /* page cache disabled */
+#define _PAGE_BIT_ACCESSED     5       /* was accessed (raised by CPU) */
+#define _PAGE_BIT_DIRTY                6       /* was written to (raised by CPU) */
+#define _PAGE_BIT_PSE          7       /* 4 MB (or 2MB) page */
+#define _PAGE_BIT_PAT          7       /* on 4KB pages */
+#define _PAGE_BIT_GLOBAL       8       /* Global TLB entry PPro+ */
+#define _PAGE_BIT_UNUSED1      9       /* available for programmer */
+#define _PAGE_BIT_IOMAP                10      /* flag used to indicate IO mapping */
+#define _PAGE_BIT_UNUSED3      11
+#define _PAGE_BIT_PAT_LARGE    12      /* On 2MB or 1GB pages */
+#define _PAGE_BIT_SPECIAL      _PAGE_BIT_UNUSED1
+#define _PAGE_BIT_CPA_TEST     _PAGE_BIT_UNUSED1
+#define _PAGE_BIT_NX           63       /* No execute: only valid after cpuid check */
+
+/* If _PAGE_BIT_PRESENT is clear, we use these: */
+/* - if the user mapped it with PROT_NONE; pte_present gives true */
+#define _PAGE_BIT_PROTNONE     _PAGE_BIT_GLOBAL
+/* - set: nonlinear file mapping, saved PTE; unset:swap */
+#define _PAGE_BIT_FILE         _PAGE_BIT_DIRTY
+
+#define _PAGE_PRESENT  (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
+#define _PAGE_RW       (_AT(pteval_t, 1) << _PAGE_BIT_RW)
+#define _PAGE_USER     (_AT(pteval_t, 1) << _PAGE_BIT_USER)
+#define _PAGE_PWT      (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
+#define _PAGE_PCD      (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
+#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
+#define _PAGE_DIRTY    (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
+#define _PAGE_PSE      (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
+#define _PAGE_GLOBAL   (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
+#define _PAGE_UNUSED1  (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
+#define _PAGE_IOMAP    (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
+#define _PAGE_UNUSED3  (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
+#define _PAGE_PAT      (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
+#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
+#define _PAGE_SPECIAL  (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
+#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
+#define __HAVE_ARCH_PTE_SPECIAL
+
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+#define _PAGE_NX       (_AT(pteval_t, 1) << _PAGE_BIT_NX)
+#else
+#define _PAGE_NX       (_AT(pteval_t, 0))
+#endif
+
+#define _PAGE_FILE     (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
+#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
+
+#define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |        \
+                        _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |    \
+                        _PAGE_DIRTY)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT |         \
+                        _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define _PAGE_CACHE_MASK       (_PAGE_PCD | _PAGE_PWT)
+#define _PAGE_CACHE_WB         (0)
+#define _PAGE_CACHE_WC         (_PAGE_PWT)
+#define _PAGE_CACHE_UC_MINUS   (_PAGE_PCD)
+#define _PAGE_CACHE_UC         (_PAGE_PCD | _PAGE_PWT)
+
+#define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
+#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
+                                _PAGE_ACCESSED | _PAGE_NX)
+
+#define PAGE_SHARED_EXEC       __pgprot(_PAGE_PRESENT | _PAGE_RW |     \
+                                        _PAGE_USER | _PAGE_ACCESSED)
+#define PAGE_COPY_NOEXEC       __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
+                                        _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_COPY_EXEC         __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
+                                        _PAGE_ACCESSED)
+#define PAGE_COPY              PAGE_COPY_NOEXEC
+#define PAGE_READONLY          __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
+                                        _PAGE_ACCESSED | _PAGE_NX)
+#define PAGE_READONLY_EXEC     __pgprot(_PAGE_PRESENT | _PAGE_USER |   \
+                                        _PAGE_ACCESSED)
+
+#define __PAGE_KERNEL_EXEC                                             \
+       (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
+#define __PAGE_KERNEL          (__PAGE_KERNEL_EXEC | _PAGE_NX)
+
+#define __PAGE_KERNEL_RO               (__PAGE_KERNEL & ~_PAGE_RW)
+#define __PAGE_KERNEL_RX               (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
+#define __PAGE_KERNEL_EXEC_NOCACHE     (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_WC               (__PAGE_KERNEL | _PAGE_CACHE_WC)
+#define __PAGE_KERNEL_NOCACHE          (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_UC_MINUS         (__PAGE_KERNEL | _PAGE_PCD)
+#define __PAGE_KERNEL_VSYSCALL         (__PAGE_KERNEL_RX | _PAGE_USER)
+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
+#define __PAGE_KERNEL_LARGE            (__PAGE_KERNEL | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_NOCACHE    (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
+#define __PAGE_KERNEL_LARGE_EXEC       (__PAGE_KERNEL_EXEC | _PAGE_PSE)
+
+#define __PAGE_KERNEL_IO               (__PAGE_KERNEL | _PAGE_IOMAP)
+#define __PAGE_KERNEL_IO_NOCACHE       (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
+#define __PAGE_KERNEL_IO_UC_MINUS      (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
+#define __PAGE_KERNEL_IO_WC            (__PAGE_KERNEL_WC | _PAGE_IOMAP)
+
+#define PAGE_KERNEL                    __pgprot(__PAGE_KERNEL)
+#define PAGE_KERNEL_RO                 __pgprot(__PAGE_KERNEL_RO)
+#define PAGE_KERNEL_EXEC               __pgprot(__PAGE_KERNEL_EXEC)
+#define PAGE_KERNEL_RX                 __pgprot(__PAGE_KERNEL_RX)
+#define PAGE_KERNEL_WC                 __pgprot(__PAGE_KERNEL_WC)
+#define PAGE_KERNEL_NOCACHE            __pgprot(__PAGE_KERNEL_NOCACHE)
+#define PAGE_KERNEL_UC_MINUS           __pgprot(__PAGE_KERNEL_UC_MINUS)
+#define PAGE_KERNEL_EXEC_NOCACHE       __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
+#define PAGE_KERNEL_LARGE              __pgprot(__PAGE_KERNEL_LARGE)
+#define PAGE_KERNEL_LARGE_NOCACHE      __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
+#define PAGE_KERNEL_LARGE_EXEC         __pgprot(__PAGE_KERNEL_LARGE_EXEC)
+#define PAGE_KERNEL_VSYSCALL           __pgprot(__PAGE_KERNEL_VSYSCALL)
+#define PAGE_KERNEL_VSYSCALL_NOCACHE   __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
+
+#define PAGE_KERNEL_IO                 __pgprot(__PAGE_KERNEL_IO)
+#define PAGE_KERNEL_IO_NOCACHE         __pgprot(__PAGE_KERNEL_IO_NOCACHE)
+#define PAGE_KERNEL_IO_UC_MINUS                __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
+#define PAGE_KERNEL_IO_WC              __pgprot(__PAGE_KERNEL_IO_WC)
+
+/*         xwr */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_EXEC
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_EXEC
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+/*
+ * early identity mapping  pte attrib macros.
+ */
+#ifdef CONFIG_X86_64
+#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
+#else
+/*
+ * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
+ * bits are combined, this will alow user to access the high address mapped
+ * VDSO in the presence of CONFIG_COMPAT_VDSO
+ */
+#define PTE_IDENT_ATTR  0x003          /* PRESENT+RW */
+#define PDE_IDENT_ATTR  0x067          /* PRESENT+RW+USER+DIRTY+ACCESSED */
+#define PGD_IDENT_ATTR  0x001          /* PRESENT (no other attributes) */
+#endif
+
+#ifdef CONFIG_X86_32
+# include "pgtable_32_types.h"
+#else
+# include "pgtable_64_types.h"
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/* PTE_PFN_MASK extracts the PFN from a (pte|pmd|pud|pgd)val_t */
+#define PTE_PFN_MASK           ((pteval_t)PHYSICAL_PAGE_MASK)
+
+/* PTE_FLAGS_MASK extracts the flags from a (pte|pmd|pud|pgd)val_t */
+#define PTE_FLAGS_MASK         (~PTE_PFN_MASK)
+
+typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
+
+typedef struct { pgdval_t pgd; } pgd_t;
+
+static inline pgd_t native_make_pgd(pgdval_t val)
+{
+       return (pgd_t) { val };
+}
+
+static inline pgdval_t native_pgd_val(pgd_t pgd)
+{
+       return pgd.pgd;
+}
+
+static inline pgdval_t pgd_flags(pgd_t pgd)
+{
+       return native_pgd_val(pgd) & PTE_FLAGS_MASK;
+}
+
+#if PAGETABLE_LEVELS > 3
+typedef struct { pudval_t pud; } pud_t;
+
+static inline pud_t native_make_pud(pmdval_t val)
+{
+       return (pud_t) { val };
+}
+
+static inline pudval_t native_pud_val(pud_t pud)
+{
+       return pud.pud;
+}
+#else
+#include <asm-generic/pgtable-nopud.h>
+
+static inline pudval_t native_pud_val(pud_t pud)
+{
+       return native_pgd_val(pud.pgd);
+}
+#endif
+
+#if PAGETABLE_LEVELS > 2
+typedef struct { pmdval_t pmd; } pmd_t;
+
+static inline pmd_t native_make_pmd(pmdval_t val)
+{
+       return (pmd_t) { val };
+}
+
+static inline pmdval_t native_pmd_val(pmd_t pmd)
+{
+       return pmd.pmd;
+}
+#else
+#include <asm-generic/pgtable-nopmd.h>
+
+static inline pmdval_t native_pmd_val(pmd_t pmd)
+{
+       return native_pgd_val(pmd.pud.pgd);
+}
+#endif
+
+static inline pudval_t pud_flags(pud_t pud)
+{
+       return native_pud_val(pud) & PTE_FLAGS_MASK;
+}
+
+static inline pmdval_t pmd_flags(pmd_t pmd)
+{
+       return native_pmd_val(pmd) & PTE_FLAGS_MASK;
+}
+
+static inline pte_t native_make_pte(pteval_t val)
+{
+       return (pte_t) { .pte = val };
+}
+
+static inline pteval_t native_pte_val(pte_t pte)
+{
+       return pte.pte;
+}
+
+static inline pteval_t pte_flags(pte_t pte)
+{
+       return native_pte_val(pte) & PTE_FLAGS_MASK;
+}
+
+#define pgprot_val(x)  ((x).pgprot)
+#define __pgprot(x)    ((pgprot_t) { (x) } )
+
+
+typedef struct page *pgtable_t;
+
+extern pteval_t __supported_pte_mask;
+extern int nx_enabled;
+extern void set_nx(void);
+
+#define pgprot_writecombine    pgprot_writecombine
+extern pgprot_t pgprot_writecombine(pgprot_t prot);
+
+/* Indicate that x86 has its own track and untrack pfn vma functions */
+#define __HAVE_PFNMAP_TRACKING
+
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+struct file;
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                              unsigned long size, pgprot_t vma_prot);
+int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
+                              unsigned long size, pgprot_t *vma_prot);
+
+/* Install a pte for a particular vaddr in kernel space. */
+void set_pte_vaddr(unsigned long vaddr, pte_t pte);
+
+#ifdef CONFIG_X86_32
+extern void native_pagetable_setup_start(pgd_t *base);
+extern void native_pagetable_setup_done(pgd_t *base);
+#else
+static inline void native_pagetable_setup_start(pgd_t *base) {}
+static inline void native_pagetable_setup_done(pgd_t *base) {}
+#endif
+
+struct seq_file;
+extern void arch_report_meminfo(struct seq_file *m);
+
+enum {
+       PG_LEVEL_NONE,
+       PG_LEVEL_4K,
+       PG_LEVEL_2M,
+       PG_LEVEL_1G,
+       PG_LEVEL_NUM
+};
+
+#ifdef CONFIG_PROC_FS
+extern void update_page_count(int level, unsigned long pages);
+#else
+static inline void update_page_count(int level, unsigned long pages) { }
+#endif
+
+/*
+ * Helper function that returns the kernel pagetable entry controlling
+ * the virtual address 'address'. NULL means no pagetable entry present.
+ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
+ * as a pte too.
+ */
+extern pte_t *lookup_address(unsigned long address, unsigned int *level);
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_X86_PGTABLE_DEFS_H */
index a8894647dd9aba3fd5139eb2277e39a10d17cacb..3ac5032fae092a0ba788f5b4971085ca548edd22 100644 (file)
@@ -6,8 +6,4 @@
 #define ARCH_GET_FS 0x1003
 #define ARCH_GET_GS 0x1004
 
-#ifdef CONFIG_X86_64
-extern long sys_arch_prctl(int, unsigned long);
-#endif /* CONFIG_X86_64 */
-
 #endif /* _ASM_X86_PRCTL_H */
index 3bfd5235a9eb46dbf5e1386cf8ba34b6e8765066..ae85a8d66a30601a1a22c3b6b84f3df6d3056477 100644 (file)
@@ -16,6 +16,7 @@ struct mm_struct;
 #include <asm/cpufeature.h>
 #include <asm/system.h>
 #include <asm/page.h>
+#include <asm/pgtable_types.h>
 #include <asm/percpu.h>
 #include <asm/msr.h>
 #include <asm/desc_defs.h>
@@ -73,10 +74,10 @@ struct cpuinfo_x86 {
        char                    pad0;
 #else
        /* Number of 4K pages in DTLB/ITLB combined(in pages): */
-       int                      x86_tlbsize;
+       int                     x86_tlbsize;
+#endif
        __u8                    x86_virt_bits;
        __u8                    x86_phys_bits;
-#endif
        /* CPUID returned core id bits: */
        __u8                    x86_coreid_bits;
        /* Max extended CPUID function supported: */
@@ -247,7 +248,6 @@ struct x86_hw_tss {
 #define IO_BITMAP_LONGS                        (IO_BITMAP_BYTES/sizeof(long))
 #define IO_BITMAP_OFFSET               offsetof(struct tss_struct, io_bitmap)
 #define INVALID_IO_BITMAP_OFFSET       0x8000
-#define INVALID_IO_BITMAP_OFFSET_LAZY  0x9000
 
 struct tss_struct {
        /*
@@ -262,11 +262,6 @@ struct tss_struct {
         * be within the limit.
         */
        unsigned long           io_bitmap[IO_BITMAP_LONGS + 1];
-       /*
-        * Cache the current maximum and the last task that used the bitmap:
-        */
-       unsigned long           io_bitmap_max;
-       struct thread_struct    *io_bitmap_owner;
 
        /*
         * .. and then another 0x100 bytes for the emergency kernel stack:
@@ -378,9 +373,33 @@ union thread_xstate {
 
 #ifdef CONFIG_X86_64
 DECLARE_PER_CPU(struct orig_ist, orig_ist);
+
+union irq_stack_union {
+       char irq_stack[IRQ_STACK_SIZE];
+       /*
+        * GCC hardcodes the stack canary as %gs:40.  Since the
+        * irq_stack is the object at %gs:0, we reserve the bottom
+        * 48 bytes of the irq stack for the canary.
+        */
+       struct {
+               char gs_base[40];
+               unsigned long stack_canary;
+       };
+};
+
+DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
+DECLARE_INIT_PER_CPU(irq_stack_union);
+
+DECLARE_PER_CPU(char *, irq_stack_ptr);
+DECLARE_PER_CPU(unsigned int, irq_count);
+extern unsigned long kernel_eflags;
+extern asmlinkage void ignore_sysret(void);
+#else  /* X86_64 */
+#ifdef CONFIG_CC_STACKPROTECTOR
+DECLARE_PER_CPU(unsigned long, stack_canary);
 #endif
+#endif /* X86_64 */
 
-extern void print_cpu_info(struct cpuinfo_x86 *);
 extern unsigned int xstate_size;
 extern void free_thread_xstate(struct task_struct *);
 extern struct kmem_cache *task_xstate_cachep;
@@ -752,9 +771,9 @@ extern int sysenter_setup(void);
 extern struct desc_ptr         early_gdt_descr;
 
 extern void cpu_set_gdt(int);
-extern void switch_to_new_gdt(void);
+extern void switch_to_new_gdt(int);
+extern void load_percpu_segment(int);
 extern void cpu_init(void);
-extern void init_gdt(int cpu);
 
 static inline unsigned long get_debugctlmsr(void)
 {
@@ -839,6 +858,7 @@ static inline void spin_lock_prefetch(const void *x)
  * User space process size: 3GB (default).
  */
 #define TASK_SIZE              PAGE_OFFSET
+#define TASK_SIZE_MAX          TASK_SIZE
 #define STACK_TOP              TASK_SIZE
 #define STACK_TOP_MAX          STACK_TOP
 
@@ -898,7 +918,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
 /*
  * User space process size. 47bits minus one guard page.
  */
-#define TASK_SIZE64    ((1UL << 47) - PAGE_SIZE)
+#define TASK_SIZE_MAX  ((1UL << 47) - PAGE_SIZE)
 
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
@@ -907,12 +927,12 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
                                        0xc0000000 : 0xFFFFe000)
 
 #define TASK_SIZE              (test_thread_flag(TIF_IA32) ? \
-                                       IA32_PAGE_OFFSET : TASK_SIZE64)
+                                       IA32_PAGE_OFFSET : TASK_SIZE_MAX)
 #define TASK_SIZE_OF(child)    ((test_tsk_thread_flag(child, TIF_IA32)) ? \
-                                       IA32_PAGE_OFFSET : TASK_SIZE64)
+                                       IA32_PAGE_OFFSET : TASK_SIZE_MAX)
 
 #define STACK_TOP              TASK_SIZE
-#define STACK_TOP_MAX          TASK_SIZE64
+#define STACK_TOP_MAX          TASK_SIZE_MAX
 
 #define INIT_THREAD  { \
        .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
index d6a22f92ba7797a1f11b77e2b6ac90cd06499e07..49fb3ecf3bb341208a45935203d121ef60eba4d0 100644 (file)
@@ -18,11 +18,7 @@ extern void syscall32_cpu_init(void);
 
 extern void check_efer(void);
 
-#ifdef CONFIG_X86_BIOS_REBOOT
 extern int reboot_force;
-#else
-static const int reboot_force = 0;
-#endif
 
 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
 
index 6d34d954c22896a6b9a26bc5bd622ede33494a8b..e304b66abeea6eeb819f9c682124b67ff27e05fb 100644 (file)
@@ -28,7 +28,7 @@ struct pt_regs {
        int  xds;
        int  xes;
        int  xfs;
-       /* int  gs; */
+       int  xgs;
        long orig_eax;
        long eip;
        int  xcs;
@@ -50,7 +50,7 @@ struct pt_regs {
        unsigned long ds;
        unsigned long es;
        unsigned long fs;
-       /* int  gs; */
+       unsigned long gs;
        unsigned long orig_ax;
        unsigned long ip;
        unsigned long cs;
diff --git a/arch/x86/include/asm/rdc321x_defs.h b/arch/x86/include/asm/rdc321x_defs.h
new file mode 100644 (file)
index 0000000..c8e9c8b
--- /dev/null
@@ -0,0 +1,12 @@
+#define PFX    "rdc321x: "
+
+/* General purpose configuration and data registers */
+#define RDC3210_CFGREG_ADDR     0x0CF8
+#define RDC3210_CFGREG_DATA     0x0CFC
+
+#define RDC321X_GPIO_CTRL_REG1 0x48
+#define RDC321X_GPIO_CTRL_REG2 0x84
+#define RDC321X_GPIO_DATA_REG1 0x4c
+#define RDC321X_GPIO_DATA_REG2 0x88
+
+#define RDC321X_MAX_GPIO       58
index 1dc1b51ac6232f12b4de181c9c343328cc7bfd07..14e0ed86a6f986c80b0ecb240482d9bcc16ac9aa 100644 (file)
@@ -61,7 +61,7 @@
  *
  *  26 - ESPFIX small SS
  *  27 - per-cpu                       [ offset to per-cpu data area ]
- *  28 - unused
+ *  28 - stack_canary-20               [ for stack protector ]
  *  29 - unused
  *  30 - unused
  *  31 - TSS for double fault handler
 #define __KERNEL_PERCPU 0
 #endif
 
+#define GDT_ENTRY_STACK_CANARY         (GDT_ENTRY_KERNEL_BASE + 16)
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __KERNEL_STACK_CANARY          (GDT_ENTRY_STACK_CANARY * 8)
+#else
+#define __KERNEL_STACK_CANARY          0
+#endif
+
 #define GDT_ENTRY_DOUBLEFAULT_TSS      31
 
 /*
index ebe858cdc8a3048f75ecc10526ee17753f97bc05..05c6f6b11fd5d25354ddcd7d3d07cf7cf7bf3fe5 100644 (file)
@@ -1,33 +1,19 @@
 #ifndef _ASM_X86_SETUP_H
 #define _ASM_X86_SETUP_H
 
+#ifdef __KERNEL__
+
 #define COMMAND_LINE_SIZE 2048
 
 #ifndef __ASSEMBLY__
 
-/* Interrupt control for vSMPowered x86_64 systems */
-void vsmp_init(void);
-
-
-void setup_bios_corruption_check(void);
-
-
-#ifdef CONFIG_X86_VISWS
-extern void visws_early_detect(void);
-extern int is_visws_box(void);
-#else
-static inline void visws_early_detect(void) { }
-static inline int is_visws_box(void) { return 0; }
-#endif
-
-extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
-extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
 /*
  * Any setup quirks to be performed?
  */
 struct mpc_cpu;
 struct mpc_bus;
 struct mpc_oemtable;
+
 struct x86_quirks {
        int (*arch_pre_time_init)(void);
        int (*arch_time_init)(void);
@@ -43,20 +29,19 @@ struct x86_quirks {
        void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
        void (*mpc_oem_pci_bus)(struct mpc_bus *m);
        void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
-                                    unsigned short oemsize);
+                               unsigned short oemsize);
        int (*setup_ioapic_ids)(void);
-       int (*update_genapic)(void);
 };
 
-extern struct x86_quirks *x86_quirks;
-extern unsigned long saved_video_mode;
+extern void x86_quirk_pre_intr_init(void);
+extern void x86_quirk_intr_init(void);
 
-#ifndef CONFIG_PARAVIRT
-#define paravirt_post_allocator_init() do {} while (0)
-#endif
-#endif /* __ASSEMBLY__ */
+extern void x86_quirk_trap_init(void);
 
-#ifdef __KERNEL__
+extern void x86_quirk_pre_time_init(void);
+extern void x86_quirk_time_init(void);
+
+#endif /* __ASSEMBLY__ */
 
 #ifdef __i386__
 
@@ -78,6 +63,30 @@ extern unsigned long saved_video_mode;
 #ifndef __ASSEMBLY__
 #include <asm/bootparam.h>
 
+/* Interrupt control for vSMPowered x86_64 systems */
+#ifdef CONFIG_X86_VSMP
+void vsmp_init(void);
+#else
+static inline void vsmp_init(void) { }
+#endif
+
+void setup_bios_corruption_check(void);
+
+#ifdef CONFIG_X86_VISWS
+extern void visws_early_detect(void);
+extern int is_visws_box(void);
+#else
+static inline void visws_early_detect(void) { }
+static inline int is_visws_box(void) { return 0; }
+#endif
+
+extern struct x86_quirks *x86_quirks;
+extern unsigned long saved_video_mode;
+
+#ifndef CONFIG_PARAVIRT
+#define paravirt_post_allocator_init() do {} while (0)
+#endif
+
 #ifndef _SETUP
 
 /*
@@ -100,7 +109,6 @@ extern unsigned long init_pg_tables_start;
 extern unsigned long init_pg_tables_end;
 
 #else
-void __init x86_64_init_pda(void);
 void __init x86_64_start_kernel(char *real_mode);
 void __init x86_64_start_reservations(char *real_mode_data);
 
diff --git a/arch/x86/include/asm/setup_arch.h b/arch/x86/include/asm/setup_arch.h
new file mode 100644 (file)
index 0000000..3884620
--- /dev/null
@@ -0,0 +1,3 @@
+/* Hook to call BIOS initialisation function */
+
+/* no action for generic */
index 19953df61c52005cdd08772da175c9d389a8a10d..47d0e21f2b9ec85b3bb0658c88eb2c611be8091d 100644 (file)
 #  include <asm/io_apic.h>
 # endif
 #endif
-#include <asm/pda.h>
 #include <asm/thread_info.h>
-
-#ifdef CONFIG_X86_64
-
-extern cpumask_var_t cpu_callin_mask;
-extern cpumask_var_t cpu_callout_mask;
-extern cpumask_var_t cpu_initialized_mask;
-extern cpumask_var_t cpu_sibling_setup_mask;
-
-#else /* CONFIG_X86_32 */
-
-extern cpumask_t cpu_callin_map;
-extern cpumask_t cpu_callout_map;
-extern cpumask_t cpu_initialized;
-extern cpumask_t cpu_sibling_setup_map;
-
-#define cpu_callin_mask                ((struct cpumask *)&cpu_callin_map)
-#define cpu_callout_mask       ((struct cpumask *)&cpu_callout_map)
-#define cpu_initialized_mask   ((struct cpumask *)&cpu_initialized)
-#define cpu_sibling_setup_mask ((struct cpumask *)&cpu_sibling_setup_map)
-
-#endif /* CONFIG_X86_32 */
-
-extern void (*mtrr_hook)(void);
-extern void zap_low_mappings(void);
-
-extern int __cpuinit get_local_pda(int cpu);
+#include <asm/cpumask.h>
 
 extern int smp_num_siblings;
 extern unsigned int num_processors;
@@ -50,9 +24,7 @@ extern unsigned int num_processors;
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 DECLARE_PER_CPU(cpumask_t, cpu_core_map);
 DECLARE_PER_CPU(u16, cpu_llc_id);
-#ifdef CONFIG_X86_32
 DECLARE_PER_CPU(int, cpu_number);
-#endif
 
 static inline struct cpumask *cpu_sibling_mask(int cpu)
 {
@@ -167,8 +139,6 @@ void play_dead_common(void);
 void native_send_call_func_ipi(const struct cpumask *mask);
 void native_send_call_func_single_ipi(int cpu);
 
-extern void prefill_possible_map(void);
-
 void smp_store_cpu_info(int id);
 #define cpu_physical_id(cpu)   per_cpu(x86_cpu_to_apicid, cpu)
 
@@ -177,10 +147,6 @@ static inline int num_booting_cpus(void)
 {
        return cpumask_weight(cpu_callout_mask);
 }
-#else
-static inline void prefill_possible_map(void)
-{
-}
 #endif /* CONFIG_SMP */
 
 extern unsigned disabled_cpus __cpuinitdata;
@@ -191,11 +157,11 @@ extern unsigned disabled_cpus __cpuinitdata;
  * from the initial startup. We map APIC_BASE very early in page_setup(),
  * so this is correct in the x86 case.
  */
-#define raw_smp_processor_id() (x86_read_percpu(cpu_number))
+#define raw_smp_processor_id() (percpu_read(cpu_number))
 extern int safe_smp_processor_id(void);
 
 #elif defined(CONFIG_X86_64_SMP)
-#define raw_smp_processor_id() read_pda(cpunumber)
+#define raw_smp_processor_id() (percpu_read(cpu_number))
 
 #define stack_smp_processor_id()                                       \
 ({                                                             \
@@ -205,10 +171,6 @@ extern int safe_smp_processor_id(void);
 })
 #define safe_smp_processor_id()                smp_processor_id()
 
-#else /* !CONFIG_X86_32_SMP && !CONFIG_X86_64_SMP */
-#define cpu_physical_id(cpu)           boot_cpu_physical_apicid
-#define safe_smp_processor_id()                0
-#define stack_smp_processor_id()       0
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -220,28 +182,9 @@ static inline int logical_smp_processor_id(void)
        return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
 }
 
-#include <mach_apicdef.h>
-static inline unsigned int read_apic_id(void)
-{
-       unsigned int reg;
-
-       reg = *(u32 *)(APIC_BASE + APIC_ID);
-
-       return GET_APIC_ID(reg);
-}
 #endif
 
-
-# if defined(APIC_DEFINITION) || defined(CONFIG_X86_64)
 extern int hard_smp_processor_id(void);
-# else
-#include <mach_apicdef.h>
-static inline int hard_smp_processor_id(void)
-{
-       /* we don't want to mark this access volatile - bad code generation */
-       return read_apic_id();
-}
-# endif /* APIC_DEFINITION */
 
 #else /* CONFIG_X86_LOCAL_APIC */
 
@@ -251,11 +194,5 @@ static inline int hard_smp_processor_id(void)
 
 #endif /* CONFIG_X86_LOCAL_APIC */
 
-#ifdef CONFIG_X86_HAS_BOOT_CPU_ID
-extern unsigned char boot_cpu_id;
-#else
-#define boot_cpu_id    0
-#endif
-
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_X86_SMP_H */
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
new file mode 100644 (file)
index 0000000..1def601
--- /dev/null
@@ -0,0 +1,61 @@
+/* two abstractions specific to kernel/smpboot.c, mainly to cater to visws
+ * which needs to alter them. */
+
+static inline void smpboot_clear_io_apic_irqs(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+       io_apic_irqs = 0;
+#endif
+}
+
+static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
+{
+       CMOS_WRITE(0xa, 0xf);
+       local_flush_tlb();
+       pr_debug("1.\n");
+       *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_high)) =
+                                                                start_eip >> 4;
+       pr_debug("2.\n");
+       *((volatile unsigned short *)phys_to_virt(apic->trampoline_phys_low)) =
+                                                        start_eip & 0xf;
+       pr_debug("3.\n");
+}
+
+static inline void smpboot_restore_warm_reset_vector(void)
+{
+       /*
+        * Install writable page 0 entry to set BIOS data area.
+        */
+       local_flush_tlb();
+
+       /*
+        * Paranoid:  Set warm reset code and vector here back
+        * to default values.
+        */
+       CMOS_WRITE(0, 0xf);
+
+       *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
+}
+
+static inline void __init smpboot_setup_io_apic(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+       /*
+        * Here we can be sure that there is an IO-APIC in the system. Let's
+        * go and set it up:
+        */
+       if (!skip_ioapic_setup && nr_ioapics)
+               setup_IO_APIC();
+       else {
+               nr_ioapics = 0;
+               localise_nmi_watchdog();
+       }
+#endif
+}
+
+static inline void smpboot_clear_io_apic(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+       nr_ioapics = 0;
+#endif
+}
index 8247e94ac6b18dc337f9404cffc97800660c64d8..3a569665668020755ac910307ee21114f7f7a7ca 100644 (file)
@@ -172,70 +172,8 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
        return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1;
 }
 
-#ifdef CONFIG_PARAVIRT
-/*
- * Define virtualization-friendly old-style lock byte lock, for use in
- * pv_lock_ops if desired.
- *
- * This differs from the pre-2.6.24 spinlock by always using xchgb
- * rather than decb to take the lock; this allows it to use a
- * zero-initialized lock structure.  It also maintains a 1-byte
- * contention counter, so that we can implement
- * __byte_spin_is_contended.
- */
-struct __byte_spinlock {
-       s8 lock;
-       s8 spinners;
-};
-
-static inline int __byte_spin_is_locked(raw_spinlock_t *lock)
-{
-       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-       return bl->lock != 0;
-}
-
-static inline int __byte_spin_is_contended(raw_spinlock_t *lock)
-{
-       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-       return bl->spinners != 0;
-}
-
-static inline void __byte_spin_lock(raw_spinlock_t *lock)
-{
-       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-       s8 val = 1;
-
-       asm("1: xchgb %1, %0\n"
-           "   test %1,%1\n"
-           "   jz 3f\n"
-           "   " LOCK_PREFIX "incb %2\n"
-           "2: rep;nop\n"
-           "   cmpb $1, %0\n"
-           "   je 2b\n"
-           "   " LOCK_PREFIX "decb %2\n"
-           "   jmp 1b\n"
-           "3:"
-           : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory");
-}
-
-static inline int __byte_spin_trylock(raw_spinlock_t *lock)
-{
-       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-       u8 old = 1;
-
-       asm("xchgb %1,%0"
-           : "+m" (bl->lock), "+q" (old) : : "memory");
+#ifndef CONFIG_PARAVIRT
 
-       return old == 0;
-}
-
-static inline void __byte_spin_unlock(raw_spinlock_t *lock)
-{
-       struct __byte_spinlock *bl = (struct __byte_spinlock *)lock;
-       smp_wmb();
-       bl->lock = 0;
-}
-#else  /* !CONFIG_PARAVIRT */
 static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
 {
        return __ticket_spin_is_locked(lock);
@@ -268,7 +206,7 @@ static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
        __raw_spin_lock(lock);
 }
 
-#endif /* CONFIG_PARAVIRT */
+#endif
 
 static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
 {
@@ -330,8 +268,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock)
 {
        atomic_t *count = (atomic_t *)lock;
 
-       atomic_dec(count);
-       if (atomic_read(count) >= 0)
+       if (atomic_dec_return(count) >= 0)
                return 1;
        atomic_inc(count);
        return 0;
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
new file mode 100644 (file)
index 0000000..c2d742c
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function.  The pattern is called stack canary
+ * and unfortunately gcc requires it to be at a fixed offset from %gs.
+ * On x86_64, the offset is 40 bytes and on x86_32 20 bytes.  x86_64
+ * and x86_32 use segment registers differently and thus handles this
+ * requirement differently.
+ *
+ * On x86_64, %gs is shared by percpu area and stack canary.  All
+ * percpu symbols are zero based and %gs points to the base of percpu
+ * area.  The first occupant of the percpu area is always
+ * irq_stack_union which contains stack_canary at offset 40.  Userland
+ * %gs is always saved and restored on kernel entry and exit using
+ * swapgs, so stack protector doesn't add any complexity there.
+ *
+ * On x86_32, it's slightly more complicated.  As in x86_64, %gs is
+ * used for userland TLS.  Unfortunately, some processors are much
+ * slower at loading segment registers with different value when
+ * entering and leaving the kernel, so the kernel uses %fs for percpu
+ * area and manages %gs lazily so that %gs is switched only when
+ * necessary, usually during task switch.
+ *
+ * As gcc requires the stack canary at %gs:20, %gs can't be managed
+ * lazily if stack protector is enabled, so the kernel saves and
+ * restores userland %gs on kernel entry and exit.  This behavior is
+ * controlled by CONFIG_X86_32_LAZY_GS and accessors are defined in
+ * system.h to hide the details.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+
+#include <asm/tsc.h>
+#include <asm/processor.h>
+#include <asm/percpu.h>
+#include <asm/system.h>
+#include <asm/desc.h>
+#include <linux/random.h>
+
+/*
+ * 24 byte read-only segment initializer for stack canary.  Linker
+ * can't handle the address bit shifting.  Address will be set in
+ * head_32 for boot CPU and setup_per_cpu_areas() for others.
+ */
+#define GDT_STACK_CANARY_INIT                                          \
+       [GDT_ENTRY_STACK_CANARY] = { { { 0x00000018, 0x00409000 } } },
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+       u64 canary;
+       u64 tsc;
+
+#ifdef CONFIG_X86_64
+       BUILD_BUG_ON(offsetof(union irq_stack_union, stack_canary) != 40);
+#endif
+       /*
+        * We both use the random pool and the current TSC as a source
+        * of randomness. The TSC only matters for very early init,
+        * there it already has some randomness on most systems. Later
+        * on during the bootup the random pool has true entropy too.
+        */
+       get_random_bytes(&canary, sizeof(canary));
+       tsc = __native_read_tsc();
+       canary += tsc + (tsc << 32UL);
+
+       current->stack_canary = canary;
+#ifdef CONFIG_X86_64
+       percpu_write(irq_stack_union.stack_canary, canary);
+#else
+       percpu_write(stack_canary, canary);
+#endif
+}
+
+static inline void setup_stack_canary_segment(int cpu)
+{
+#ifdef CONFIG_X86_32
+       unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu) - 20;
+       struct desc_struct *gdt_table = get_cpu_gdt_table(cpu);
+       struct desc_struct desc;
+
+       desc = gdt_table[GDT_ENTRY_STACK_CANARY];
+       desc.base0 = canary & 0xffff;
+       desc.base1 = (canary >> 16) & 0xff;
+       desc.base2 = (canary >> 24) & 0xff;
+       write_gdt_entry(gdt_table, GDT_ENTRY_STACK_CANARY, &desc, DESCTYPE_S);
+#endif
+}
+
+static inline void load_stack_canary_segment(void)
+{
+#ifdef CONFIG_X86_32
+       asm("mov %0, %%gs" : : "r" (__KERNEL_STACK_CANARY) : "memory");
+#endif
+}
+
+#else  /* CC_STACKPROTECTOR */
+
+#define GDT_STACK_CANARY_INIT
+
+/* dummy boot_init_stack_canary() is defined in linux/stackprotector.h */
+
+static inline void setup_stack_canary_segment(int cpu)
+{ }
+
+static inline void load_stack_canary_segment(void)
+{
+#ifdef CONFIG_X86_32
+       asm volatile ("mov %0, %%gs" : : "r" (0));
+#endif
+}
+
+#endif /* CC_STACKPROTECTOR */
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/x86/include/asm/summit/apic.h b/arch/x86/include/asm/summit/apic.h
deleted file mode 100644 (file)
index 93d2c86..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-#ifndef __ASM_SUMMIT_APIC_H
-#define __ASM_SUMMIT_APIC_H
-
-#include <asm/smp.h>
-#include <linux/gfp.h>
-
-#define esr_disable (1)
-#define NO_BALANCE_IRQ (0)
-
-/* In clustered mode, the high nibble of APIC ID is a cluster number.
- * The low nibble is a 4-bit bitmap. */
-#define XAPIC_DEST_CPUS_SHIFT  4
-#define XAPIC_DEST_CPUS_MASK   ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
-#define XAPIC_DEST_CLUSTER_MASK        (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
-
-#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
-
-static inline const cpumask_t *target_cpus(void)
-{
-       /* CPU_MASK_ALL (0xff) has undefined behaviour with
-        * dest_LowestPrio mode logical clustered apic interrupt routing
-        * Just start on cpu 0.  IRQ balancing will spread load
-        */
-       return &cpumask_of_cpu(0);
-}
-
-#define INT_DELIVERY_MODE (dest_LowestPrio)
-#define INT_DEST_MODE 1     /* logical delivery broadcast to all procs */
-
-static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid)
-{
-       return 0;
-}
-
-/* we don't use the phys_cpu_present_map to indicate apicid presence */
-static inline unsigned long check_apicid_present(int bit)
-{
-       return 1;
-}
-
-#define apicid_cluster(apicid) ((apicid) & XAPIC_DEST_CLUSTER_MASK)
-
-extern u8 cpu_2_logical_apicid[];
-
-static inline void init_apic_ldr(void)
-{
-       unsigned long val, id;
-       int count = 0;
-       u8 my_id = (u8)hard_smp_processor_id();
-       u8 my_cluster = (u8)apicid_cluster(my_id);
-#ifdef CONFIG_SMP
-       u8 lid;
-       int i;
-
-       /* Create logical APIC IDs by counting CPUs already in cluster. */
-       for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
-               lid = cpu_2_logical_apicid[i];
-               if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
-                       ++count;
-       }
-#endif
-       /* We only have a 4 wide bitmap in cluster mode.  If a deranged
-        * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
-       BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
-       id = my_cluster | (1UL << count);
-       apic_write(APIC_DFR, APIC_DFR_VALUE);
-       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-       val |= SET_APIC_LOGICAL_ID(id);
-       apic_write(APIC_LDR, val);
-}
-
-static inline int multi_timer_check(int apic, int irq)
-{
-       return 0;
-}
-
-static inline int apic_id_registered(void)
-{
-       return 1;
-}
-
-static inline void setup_apic_routing(void)
-{
-       printk("Enabling APIC mode:  Summit.  Using %d I/O APICs\n",
-                                               nr_ioapics);
-}
-
-static inline int apicid_to_node(int logical_apicid)
-{
-#ifdef CONFIG_SMP
-       return apicid_2_node[hard_smp_processor_id()];
-#else
-       return 0;
-#endif
-}
-
-/* Mapping from cpu number to logical apicid */
-static inline int cpu_to_logical_apicid(int cpu)
-{
-#ifdef CONFIG_SMP
-       if (cpu >= nr_cpu_ids)
-               return BAD_APICID;
-       return (int)cpu_2_logical_apicid[cpu];
-#else
-       return logical_smp_processor_id();
-#endif
-}
-
-static inline int cpu_present_to_apicid(int mps_cpu)
-{
-       if (mps_cpu < nr_cpu_ids)
-               return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
-       else
-               return BAD_APICID;
-}
-
-static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map)
-{
-       /* For clustered we don't have a good way to do this yet - hack */
-       return physids_promote(0x0F);
-}
-
-static inline physid_mask_t apicid_to_cpu_present(int apicid)
-{
-       return physid_mask_of_physid(0);
-}
-
-static inline void setup_portio_remap(void)
-{
-}
-
-static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
-{
-       return 1;
-}
-
-static inline void enable_apic_mode(void)
-{
-}
-
-static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
-{
-       int num_bits_set;
-       int cpus_found = 0;
-       int cpu;
-       int apicid;
-
-       num_bits_set = cpus_weight(*cpumask);
-       /* Return id to all */
-       if (num_bits_set >= nr_cpu_ids)
-               return (int) 0xFF;
-       /*
-        * The cpus in the mask must all be on the apic cluster.  If are not
-        * on the same apicid cluster return default value of TARGET_CPUS.
-        */
-       cpu = first_cpu(*cpumask);
-       apicid = cpu_to_logical_apicid(cpu);
-       while (cpus_found < num_bits_set) {
-               if (cpu_isset(cpu, *cpumask)) {
-                       int new_apicid = cpu_to_logical_apicid(cpu);
-                       if (apicid_cluster(apicid) !=
-                                       apicid_cluster(new_apicid)){
-                               printk ("%s: Not a valid mask!\n", __func__);
-                               return 0xFF;
-                       }
-                       apicid = apicid | new_apicid;
-                       cpus_found++;
-               }
-               cpu++;
-       }
-       return apicid;
-}
-
-static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
-                                                 const struct cpumask *andmask)
-{
-       int apicid = cpu_to_logical_apicid(0);
-       cpumask_var_t cpumask;
-
-       if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
-               return apicid;
-
-       cpumask_and(cpumask, inmask, andmask);
-       cpumask_and(cpumask, cpumask, cpu_online_mask);
-       apicid = cpu_mask_to_apicid(cpumask);
-
-       free_cpumask_var(cpumask);
-       return apicid;
-}
-
-/* cpuid returns the value latched in the HW at reset, not the APIC ID
- * register's value.  For any box whose BIOS changes APIC IDs, like
- * clustered APIC systems, we must use hard_smp_processor_id.
- *
- * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
- */
-static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
-{
-       return hard_smp_processor_id() >> index_msb;
-}
-
-#endif /* __ASM_SUMMIT_APIC_H */
diff --git a/arch/x86/include/asm/summit/apicdef.h b/arch/x86/include/asm/summit/apicdef.h
deleted file mode 100644 (file)
index f3fbca1..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __ASM_SUMMIT_APICDEF_H
-#define __ASM_SUMMIT_APICDEF_H
-
-#define                APIC_ID_MASK            (0xFF<<24)
-
-static inline unsigned get_apic_id(unsigned long x)
-{
-       return (x>>24)&0xFF;
-}
-
-#define                GET_APIC_ID(x)  get_apic_id(x)
-
-#endif
diff --git a/arch/x86/include/asm/summit/ipi.h b/arch/x86/include/asm/summit/ipi.h
deleted file mode 100644 (file)
index a8a2c24..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#ifndef __ASM_SUMMIT_IPI_H
-#define __ASM_SUMMIT_IPI_H
-
-void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
-void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
-
-static inline void send_IPI_mask(const cpumask_t *mask, int vector)
-{
-       send_IPI_mask_sequence(mask, vector);
-}
-
-static inline void send_IPI_allbutself(int vector)
-{
-       cpumask_t mask = cpu_online_map;
-       cpu_clear(smp_processor_id(), mask);
-
-       if (!cpus_empty(mask))
-               send_IPI_mask(&mask, vector);
-}
-
-static inline void send_IPI_all(int vector)
-{
-       send_IPI_mask(&cpu_online_map, vector);
-}
-
-#endif /* __ASM_SUMMIT_IPI_H */
diff --git a/arch/x86/include/asm/summit/mpparse.h b/arch/x86/include/asm/summit/mpparse.h
deleted file mode 100644 (file)
index 380e86c..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-#ifndef __ASM_SUMMIT_MPPARSE_H
-#define __ASM_SUMMIT_MPPARSE_H
-
-#include <asm/tsc.h>
-
-extern int use_cyclone;
-
-#ifdef CONFIG_X86_SUMMIT_NUMA
-extern void setup_summit(void);
-#else
-#define setup_summit() {}
-#endif
-
-static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
-               char *productid)
-{
-       if (!strncmp(oem, "IBM ENSW", 8) &&
-                       (!strncmp(productid, "VIGIL SMP", 9)
-                        || !strncmp(productid, "EXA", 3)
-                        || !strncmp(productid, "RUTHLESS SMP", 12))){
-               mark_tsc_unstable("Summit based system");
-               use_cyclone = 1; /*enable cyclone-timer*/
-               setup_summit();
-               return 1;
-       }
-       return 0;
-}
-
-/* Hook from generic ACPI tables.c */
-static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       if (!strncmp(oem_id, "IBM", 3) &&
-           (!strncmp(oem_table_id, "SERVIGIL", 8)
-            || !strncmp(oem_table_id, "EXA", 3))){
-               mark_tsc_unstable("Summit based system");
-               use_cyclone = 1; /*enable cyclone-timer*/
-               setup_summit();
-               return 1;
-       }
-       return 0;
-}
-
-struct rio_table_hdr {
-       unsigned char version;      /* Version number of this data structure           */
-                                   /* Version 3 adds chassis_num & WP_index           */
-       unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil)   */
-       unsigned char num_rio_dev;  /* # of RIO I/O devices (Cyclones and Winnipegs)   */
-} __attribute__((packed));
-
-struct scal_detail {
-       unsigned char node_id;      /* Scalability Node ID                             */
-       unsigned long CBAR;         /* Address of 1MB register space                   */
-       unsigned char port0node;    /* Node ID port connected to: 0xFF=None            */
-       unsigned char port0port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
-       unsigned char port1node;    /* Node ID port connected to: 0xFF = None          */
-       unsigned char port1port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
-       unsigned char port2node;    /* Node ID port connected to: 0xFF = None          */
-       unsigned char port2port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
-       unsigned char chassis_num;  /* 1 based Chassis number (1 = boot node)          */
-} __attribute__((packed));
-
-struct rio_detail {
-       unsigned char node_id;      /* RIO Node ID                                     */
-       unsigned long BBAR;         /* Address of 1MB register space                   */
-       unsigned char type;         /* Type of device                                  */
-       unsigned char owner_id;     /* For WPEG: Node ID of Cyclone that owns this WPEG*/
-                                   /* For CYC:  Node ID of Twister that owns this CYC */
-       unsigned char port0node;    /* Node ID port connected to: 0xFF=None            */
-       unsigned char port0port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
-       unsigned char port1node;    /* Node ID port connected to: 0xFF=None            */
-       unsigned char port1port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
-       unsigned char first_slot;   /* For WPEG: Lowest slot number below this WPEG    */
-                                   /* For CYC:  0                                     */
-       unsigned char status;       /* For WPEG: Bit 0 = 1 : the XAPIC is used         */
-                                   /*                 = 0 : the XAPIC is not used, ie:*/
-                                   /*                     ints fwded to another XAPIC */
-                                   /*           Bits1:7 Reserved                      */
-                                   /* For CYC:  Bits0:7 Reserved                      */
-       unsigned char WP_index;     /* For WPEG: WPEG instance index - lower ones have */
-                                   /*           lower slot numbers/PCI bus numbers    */
-                                   /* For CYC:  No meaning                            */
-       unsigned char chassis_num;  /* 1 based Chassis number                          */
-                                   /* For LookOut WPEGs this field indicates the      */
-                                   /* Expansion Chassis #, enumerated from Boot       */
-                                   /* Node WPEG external port, then Boot Node CYC     */
-                                   /* external port, then Next Vigil chassis WPEG     */
-                                   /* external port, etc.                             */
-                                   /* Shared Lookouts have only 1 chassis number (the */
-                                   /* first one assigned)                             */
-} __attribute__((packed));
-
-
-typedef enum {
-       CompatTwister = 0,  /* Compatibility Twister               */
-       AltTwister    = 1,  /* Alternate Twister of internal 8-way */
-       CompatCyclone = 2,  /* Compatibility Cyclone               */
-       AltCyclone    = 3,  /* Alternate Cyclone of internal 8-way */
-       CompatWPEG    = 4,  /* Compatibility WPEG                  */
-       AltWPEG       = 5,  /* Second Planar WPEG                  */
-       LookOutAWPEG  = 6,  /* LookOut WPEG                        */
-       LookOutBWPEG  = 7,  /* LookOut WPEG                        */
-} node_type;
-
-static inline int is_WPEG(struct rio_detail *rio){
-       return (rio->type == CompatWPEG || rio->type == AltWPEG ||
-               rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
-}
-
-#endif /* __ASM_SUMMIT_MPPARSE_H */
index c0b0bda754eeece404b25e3fbe206e790abc87de..7043408f6904a9f2d94c817b3488f80fbb4976df 100644 (file)
@@ -29,21 +29,21 @@ asmlinkage int sys_get_thread_area(struct user_desc __user *);
 /* X86_32 only */
 #ifdef CONFIG_X86_32
 /* kernel/process_32.c */
-asmlinkage int sys_fork(struct pt_regs);
-asmlinkage int sys_clone(struct pt_regs);
-asmlinkage int sys_vfork(struct pt_regs);
-asmlinkage int sys_execve(struct pt_regs);
+int sys_fork(struct pt_regs *);
+int sys_clone(struct pt_regs *);
+int sys_vfork(struct pt_regs *);
+int sys_execve(struct pt_regs *);
 
 /* kernel/signal_32.c */
 asmlinkage int sys_sigsuspend(int, int, old_sigset_t);
 asmlinkage int sys_sigaction(int, const struct old_sigaction __user *,
                             struct old_sigaction __user *);
-asmlinkage int sys_sigaltstack(unsigned long);
-asmlinkage unsigned long sys_sigreturn(unsigned long);
-asmlinkage int sys_rt_sigreturn(unsigned long);
+int sys_sigaltstack(struct pt_regs *);
+unsigned long sys_sigreturn(struct pt_regs *);
+long sys_rt_sigreturn(struct pt_regs *);
 
 /* kernel/ioport.c */
-asmlinkage long sys_iopl(unsigned long);
+long sys_iopl(struct pt_regs *);
 
 /* kernel/sys_i386_32.c */
 asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
@@ -59,8 +59,8 @@ struct oldold_utsname;
 asmlinkage int sys_olduname(struct oldold_utsname __user *);
 
 /* kernel/vm86_32.c */
-asmlinkage int sys_vm86old(struct pt_regs);
-asmlinkage int sys_vm86(struct pt_regs);
+int sys_vm86old(struct pt_regs *);
+int sys_vm86(struct pt_regs *);
 
 #else /* CONFIG_X86_32 */
 
@@ -74,6 +74,7 @@ asmlinkage long sys_vfork(struct pt_regs *);
 asmlinkage long sys_execve(char __user *, char __user * __user *,
                           char __user * __user *,
                           struct pt_regs *);
+long sys_arch_prctl(int, unsigned long);
 
 /* kernel/ioport.c */
 asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
@@ -81,7 +82,7 @@ asmlinkage long sys_iopl(unsigned int, struct pt_regs *);
 /* kernel/signal_64.c */
 asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
                                struct pt_regs *);
-asmlinkage long sys_rt_sigreturn(struct pt_regs *);
+long sys_rt_sigreturn(struct pt_regs *);
 
 /* kernel/sys_x86_64.c */
 asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
index 8e626ea33a1a64d1565c10a0784200ef92ec9a31..643c59b4bc6ea133aa19244ce02bd00be342d7d1 100644 (file)
 struct task_struct; /* one of the stranger aspects of C forward declarations */
 struct task_struct *__switch_to(struct task_struct *prev,
                                struct task_struct *next);
+struct tss_struct;
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+                     struct tss_struct *tss);
 
 #ifdef CONFIG_X86_32
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __switch_canary                                                        \
+       "movl %P[task_canary](%[next]), %%ebx\n\t"                      \
+       "movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
+#define __switch_canary_oparam                                         \
+       , [stack_canary] "=m" (per_cpu_var(stack_canary))
+#define __switch_canary_iparam                                         \
+       , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+#else  /* CC_STACKPROTECTOR */
+#define __switch_canary
+#define __switch_canary_oparam
+#define __switch_canary_iparam
+#endif /* CC_STACKPROTECTOR */
+
 /*
  * Saving eflags is important. It switches not only IOPL between tasks,
  * it also protects other tasks from NT leaking through sysenter etc.
@@ -44,6 +61,7 @@ do {                                                                  \
                     "movl %[next_sp],%%esp\n\t"        /* restore ESP   */ \
                     "movl $1f,%[prev_ip]\n\t"  /* save    EIP   */     \
                     "pushl %[next_ip]\n\t"     /* restore EIP   */     \
+                    __switch_canary                                    \
                     "jmp __switch_to\n"        /* regparm call  */     \
                     "1:\t"                                             \
                     "popl %%ebp\n\t"           /* restore EBP   */     \
@@ -58,6 +76,8 @@ do {                                                                  \
                       "=b" (ebx), "=c" (ecx), "=d" (edx),              \
                       "=S" (esi), "=D" (edi)                           \
                                                                        \
+                      __switch_canary_oparam                           \
+                                                                       \
                       /* input parameters: */                          \
                     : [next_sp]  "m" (next->thread.sp),                \
                       [next_ip]  "m" (next->thread.ip),                \
@@ -66,6 +86,8 @@ do {                                                                  \
                       [prev]     "a" (prev),                           \
                       [next]     "d" (next)                            \
                                                                        \
+                      __switch_canary_iparam                           \
+                                                                       \
                     : /* reloaded segment registers */                 \
                        "memory");                                      \
 } while (0)
@@ -86,27 +108,44 @@ do {                                                                       \
        , "rcx", "rbx", "rdx", "r8", "r9", "r10", "r11", \
          "r12", "r13", "r14", "r15"
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+#define __switch_canary                                                          \
+       "movq %P[task_canary](%%rsi),%%r8\n\t"                            \
+       "movq %%r8,"__percpu_arg([gs_canary])"\n\t"
+#define __switch_canary_oparam                                           \
+       , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
+#define __switch_canary_iparam                                           \
+       , [task_canary] "i" (offsetof(struct task_struct, stack_canary))
+#else  /* CC_STACKPROTECTOR */
+#define __switch_canary
+#define __switch_canary_oparam
+#define __switch_canary_iparam
+#endif /* CC_STACKPROTECTOR */
+
 /* Save restore flags to clear handle leaking NT */
 #define switch_to(prev, next, last) \
-       asm volatile(SAVE_CONTEXT                                                   \
+       asm volatile(SAVE_CONTEXT                                         \
             "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
             "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */    \
             "call __switch_to\n\t"                                       \
             ".globl thread_return\n"                                     \
             "thread_return:\n\t"                                         \
-            "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"                       \
+            "movq "__percpu_arg([current_task])",%%rsi\n\t"              \
+            __switch_canary                                              \
             "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
-            LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"        \
             "movq %%rax,%%rdi\n\t"                                       \
-            "jc   ret_from_fork\n\t"                                     \
+            "testl  %[_tif_fork],%P[ti_flags](%%r8)\n\t"         \
+            "jnz   ret_from_fork\n\t"                                    \
             RESTORE_CONTEXT                                              \
             : "=a" (last)                                                \
+              __switch_canary_oparam                                     \
             : [next] "S" (next), [prev] "D" (prev),                      \
               [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
               [ti_flags] "i" (offsetof(struct thread_info, flags)),      \
-              [tif_fork] "i" (TIF_FORK),                                 \
+              [_tif_fork] "i" (_TIF_FORK),                               \
               [thread_info] "i" (offsetof(struct task_struct, stack)),   \
-              [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))  \
+              [current_task] "m" (per_cpu_var(current_task))             \
+              __switch_canary_iparam                                     \
             : "memory", "cc" __EXTRA_CLOBBER)
 #endif
 
@@ -165,6 +204,25 @@ extern void native_load_gs_index(unsigned);
 #define savesegment(seg, value)                                \
        asm("mov %%" #seg ",%0":"=r" (value) : : "memory")
 
+/*
+ * x86_32 user gs accessors.
+ */
+#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_32_LAZY_GS
+#define get_user_gs(regs)      (u16)({unsigned long v; savesegment(gs, v); v;})
+#define set_user_gs(regs, v)   loadsegment(gs, (unsigned long)(v))
+#define task_user_gs(tsk)      ((tsk)->thread.gs)
+#define lazy_save_gs(v)                savesegment(gs, (v))
+#define lazy_load_gs(v)                loadsegment(gs, (v))
+#else  /* X86_32_LAZY_GS */
+#define get_user_gs(regs)      (u16)((regs)->gs)
+#define set_user_gs(regs, v)   do { (regs)->gs = (v); } while (0)
+#define task_user_gs(tsk)      (task_pt_regs(tsk)->gs)
+#define lazy_save_gs(v)                do { } while (0)
+#define lazy_load_gs(v)                do { } while (0)
+#endif /* X86_32_LAZY_GS */
+#endif /* X86_32 */
+
 static inline unsigned long get_limit(unsigned long segment)
 {
        unsigned long __limit;
index 98789647baa9b8a6d1e414f9e40c95c70f704a11..df9d5f78385e442ae8c92b8556928e86c270021c 100644 (file)
@@ -40,6 +40,7 @@ struct thread_info {
                                                */
        __u8                    supervisor_stack[0];
 #endif
+       int                     uaccess_err;
 };
 
 #define INIT_THREAD_INFO(tsk)                  \
@@ -194,25 +195,21 @@ static inline struct thread_info *current_thread_info(void)
 
 #else /* X86_32 */
 
-#include <asm/pda.h>
+#include <asm/percpu.h>
+#define KERNEL_STACK_OFFSET (5*8)
 
 /*
  * macros/functions for gaining access to the thread information structure
  * preempt_count needs to be 1 initially, until the scheduler is functional.
  */
 #ifndef __ASSEMBLY__
-static inline struct thread_info *current_thread_info(void)
-{
-       struct thread_info *ti;
-       ti = (void *)(read_pda(kernelstack) + PDA_STACKOFFSET - THREAD_SIZE);
-       return ti;
-}
+DECLARE_PER_CPU(unsigned long, kernel_stack);
 
-/* do not use in interrupt context */
-static inline struct thread_info *stack_thread_info(void)
+static inline struct thread_info *current_thread_info(void)
 {
        struct thread_info *ti;
-       asm("andq %%rsp,%0; " : "=r" (ti) : "0" (~(THREAD_SIZE - 1)));
+       ti = (void *)(percpu_read(kernel_stack) +
+                     KERNEL_STACK_OFFSET - THREAD_SIZE);
        return ti;
 }
 
@@ -220,8 +217,8 @@ static inline struct thread_info *stack_thread_info(void)
 
 /* how to get the thread information struct from ASM */
 #define GET_THREAD_INFO(reg) \
-       movq %gs:pda_kernelstack,reg ; \
-       subq $(THREAD_SIZE-PDA_STACKOFFSET),reg
+       movq PER_CPU_VAR(kernel_stack),reg ; \
+       subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
 
 #endif
 
index 2bb6a835c453c0ced2d9b7e78e998e7fde5d8c3e..a81195eaa2b3dc3d4378d523ee79c256ca752e52 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/init.h>
 #include <linux/pm.h>
 #include <linux/percpu.h>
+#include <linux/interrupt.h>
 
 #define TICK_SIZE (tick_nsec / 1000)
 
@@ -12,6 +13,7 @@ unsigned long native_calibrate_tsc(void);
 #ifdef CONFIG_X86_32
 extern int timer_ack;
 extern int recalibrate_cpu_khz(void);
+extern irqreturn_t timer_interrupt(int irq, void *dev_id);
 #endif /* CONFIG_X86_32 */
 
 extern int no_timer_check;
index 0e7bbb5491169f69c4b0b0e2d0e98a8445d95fcd..d3539f998f88c71dd6c9a784628c7d084d7521bc 100644 (file)
@@ -113,7 +113,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
                __flush_tlb();
 }
 
-static inline void native_flush_tlb_others(const cpumask_t *cpumask,
+static inline void native_flush_tlb_others(const struct cpumask *cpumask,
                                           struct mm_struct *mm,
                                           unsigned long va)
 {
@@ -142,31 +142,28 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
        flush_tlb_mm(vma->vm_mm);
 }
 
-void native_flush_tlb_others(const cpumask_t *cpumask, struct mm_struct *mm,
-                            unsigned long va);
+void native_flush_tlb_others(const struct cpumask *cpumask,
+                            struct mm_struct *mm, unsigned long va);
 
 #define TLBSTATE_OK    1
 #define TLBSTATE_LAZY  2
 
-#ifdef CONFIG_X86_32
 struct tlb_state {
        struct mm_struct *active_mm;
        int state;
-       char __cacheline_padding[L1_CACHE_BYTES-8];
 };
 DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
 
-void reset_lazy_tlbstate(void);
-#else
 static inline void reset_lazy_tlbstate(void)
 {
+       percpu_write(cpu_tlbstate.state, 0);
+       percpu_write(cpu_tlbstate.active_mm, &init_mm);
 }
-#endif
 
 #endif /* SMP */
 
 #ifndef CONFIG_PARAVIRT
-#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(&mask, mm, va)
+#define flush_tlb_others(mask, mm, va) native_flush_tlb_others(mask, mm, va)
 #endif
 
 static inline void flush_tlb_kernel_range(unsigned long start,
@@ -175,4 +172,6 @@ static inline void flush_tlb_kernel_range(unsigned long start,
        flush_tlb_all();
 }
 
+extern void zap_low_mappings(void);
+
 #endif /* _ASM_X86_TLBFLUSH_H */
index 4e2f2e0aab27b277eaff75a285b60d495cd2e1db..77cfb2cfb386d5b1a1727807b2a7c84ee8fc4c6c 100644 (file)
@@ -74,6 +74,8 @@ static inline const struct cpumask *cpumask_of_node(int node)
        return &node_to_cpumask_map[node];
 }
 
+static inline void setup_node_to_cpumask_map(void) { }
+
 #else /* CONFIG_X86_64 */
 
 /* Mappings between node number and cpus on that node. */
@@ -83,7 +85,8 @@ extern cpumask_t *node_to_cpumask_map;
 DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
 
 /* Returns the number of the current Node. */
-#define numa_node_id()         read_pda(nodenumber)
+DECLARE_PER_CPU(int, node_number);
+#define numa_node_id()         percpu_read(node_number)
 
 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
 extern int cpu_to_node(int cpu);
@@ -102,10 +105,7 @@ static inline int cpu_to_node(int cpu)
 /* Same function but used if called before per_cpu areas are setup */
 static inline int early_cpu_to_node(int cpu)
 {
-       if (early_per_cpu_ptr(x86_cpu_to_node_map))
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
-       return per_cpu(x86_cpu_to_node_map, cpu);
+       return early_per_cpu(x86_cpu_to_node_map, cpu);
 }
 
 /* Returns a pointer to the cpumask of CPUs on Node 'node'. */
@@ -122,6 +122,8 @@ static inline cpumask_t node_to_cpumask(int node)
 
 #endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
 
+extern void setup_node_to_cpumask_map(void);
+
 /*
  * Replace default node_to_cpumask_ptr with optimized version
  * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
@@ -192,9 +194,20 @@ extern int __node_distance(int, int);
 
 #else /* !CONFIG_NUMA */
 
-#define numa_node_id()         0
-#define        cpu_to_node(cpu)        0
-#define        early_cpu_to_node(cpu)  0
+static inline int numa_node_id(void)
+{
+       return 0;
+}
+
+static inline int cpu_to_node(int cpu)
+{
+       return 0;
+}
+
+static inline int early_cpu_to_node(int cpu)
+{
+       return 0;
+}
 
 static inline const cpumask_t *cpumask_of_node(int node)
 {
@@ -209,6 +222,8 @@ static inline int node_to_first_cpu(int node)
        return first_cpu(cpu_online_map);
 }
 
+static inline void setup_node_to_cpumask_map(void) { }
+
 /*
  * Replace default node_to_cpumask_ptr with optimized version
  * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
index 780ba0ab94f908da84b44d71f1740d47ac2677de..90f06c25221d792dab33f662c25012b7d0c39f59 100644 (file)
@@ -13,6 +13,7 @@ extern unsigned char *trampoline_base;
 
 extern unsigned long init_rsp;
 extern unsigned long initial_code;
+extern unsigned long initial_gs;
 
 #define TRAMPOLINE_SIZE roundup(trampoline_end - trampoline_data, PAGE_SIZE)
 #define TRAMPOLINE_BASE 0x6000
index cf3bb053da0b83cb51909168893c35726bc8290a..0d5342515b8696970ec533572055df8b98ef74a9 100644 (file)
@@ -41,7 +41,7 @@ dotraplinkage void do_int3(struct pt_regs *, long);
 dotraplinkage void do_overflow(struct pt_regs *, long);
 dotraplinkage void do_bounds(struct pt_regs *, long);
 dotraplinkage void do_invalid_op(struct pt_regs *, long);
-dotraplinkage void do_device_not_available(struct pt_regs);
+dotraplinkage void do_device_not_available(struct pt_regs *, long);
 dotraplinkage void do_coprocessor_segment_overrun(struct pt_regs *, long);
 dotraplinkage void do_invalid_TSS(struct pt_regs *, long);
 dotraplinkage void do_segment_not_present(struct pt_regs *, long);
index 4340055b755918fb8fd777df62f811ba5e26820c..b685ece89d5cdcf7cca51fabb9dfdce5dd3e021d 100644 (file)
@@ -121,7 +121,7 @@ extern int __get_user_bad(void);
 
 #define __get_user_x(size, ret, x, ptr)                      \
        asm volatile("call __get_user_" #size         \
-                    : "=a" (ret),"=d" (x)            \
+                    : "=a" (ret), "=d" (x)           \
                     : "0" (ptr))                     \
 
 /* Careful: we have to cast the result to the type of the pointer
@@ -181,12 +181,12 @@ extern int __get_user_bad(void);
 
 #define __put_user_x(size, x, ptr, __ret_pu)                   \
        asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
-                    :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
+                    : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 
 
 
 #ifdef CONFIG_X86_32
-#define __put_user_u64(x, addr, err)                                   \
+#define __put_user_asm_u64(x, addr, err, errret)                       \
        asm volatile("1:        movl %%eax,0(%2)\n"                     \
                     "2:        movl %%edx,4(%2)\n"                     \
                     "3:\n"                                             \
@@ -197,14 +197,24 @@ extern int __get_user_bad(void);
                     _ASM_EXTABLE(1b, 4b)                               \
                     _ASM_EXTABLE(2b, 4b)                               \
                     : "=r" (err)                                       \
-                    : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err))
+                    : "A" (x), "r" (addr), "i" (errret), "0" (err))
+
+#define __put_user_asm_ex_u64(x, addr)                                 \
+       asm volatile("1:        movl %%eax,0(%1)\n"                     \
+                    "2:        movl %%edx,4(%1)\n"                     \
+                    "3:\n"                                             \
+                    _ASM_EXTABLE(1b, 2b - 1b)                          \
+                    _ASM_EXTABLE(2b, 3b - 2b)                          \
+                    : : "A" (x), "r" (addr))
 
 #define __put_user_x8(x, ptr, __ret_pu)                                \
        asm volatile("call __put_user_8" : "=a" (__ret_pu)      \
                     : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
 #else
-#define __put_user_u64(x, ptr, retval) \
-       __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT)
+#define __put_user_asm_u64(x, ptr, retval, errret) \
+       __put_user_asm(x, ptr, retval, "q", "", "Zr", errret)
+#define __put_user_asm_ex_u64(x, addr) \
+       __put_user_asm_ex(x, addr, "q", "", "Zr")
 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
 #endif
 
@@ -276,10 +286,32 @@ do {                                                                      \
                __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
                break;                                                  \
        case 4:                                                         \
-               __put_user_asm(x, ptr, retval, "l", "k",  "ir", errret);\
+               __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
                break;                                                  \
        case 8:                                                         \
-               __put_user_u64((__typeof__(*ptr))(x), ptr, retval);     \
+               __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval,  \
+                                  errret);                             \
+               break;                                                  \
+       default:                                                        \
+               __put_user_bad();                                       \
+       }                                                               \
+} while (0)
+
+#define __put_user_size_ex(x, ptr, size)                               \
+do {                                                                   \
+       __chk_user_ptr(ptr);                                            \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               __put_user_asm_ex(x, ptr, "b", "b", "iq");              \
+               break;                                                  \
+       case 2:                                                         \
+               __put_user_asm_ex(x, ptr, "w", "w", "ir");              \
+               break;                                                  \
+       case 4:                                                         \
+               __put_user_asm_ex(x, ptr, "l", "k", "ir");              \
+               break;                                                  \
+       case 8:                                                         \
+               __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr);      \
                break;                                                  \
        default:                                                        \
                __put_user_bad();                                       \
@@ -311,9 +343,12 @@ do {                                                                       \
 
 #ifdef CONFIG_X86_32
 #define __get_user_asm_u64(x, ptr, retval, errret)     (x) = __get_user_bad()
+#define __get_user_asm_ex_u64(x, ptr)                  (x) = __get_user_bad()
 #else
 #define __get_user_asm_u64(x, ptr, retval, errret) \
         __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
+#define __get_user_asm_ex_u64(x, ptr) \
+        __get_user_asm_ex(x, ptr, "q", "", "=r")
 #endif
 
 #define __get_user_size(x, ptr, size, retval, errret)                  \
@@ -350,6 +385,33 @@ do {                                                                       \
                     : "=r" (err), ltype(x)                             \
                     : "m" (__m(addr)), "i" (errret), "0" (err))
 
+#define __get_user_size_ex(x, ptr, size)                               \
+do {                                                                   \
+       __chk_user_ptr(ptr);                                            \
+       switch (size) {                                                 \
+       case 1:                                                         \
+               __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
+               break;                                                  \
+       case 2:                                                         \
+               __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
+               break;                                                  \
+       case 4:                                                         \
+               __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
+               break;                                                  \
+       case 8:                                                         \
+               __get_user_asm_ex_u64(x, ptr);                          \
+               break;                                                  \
+       default:                                                        \
+               (x) = __get_user_bad();                                 \
+       }                                                               \
+} while (0)
+
+#define __get_user_asm_ex(x, addr, itype, rtype, ltype)                        \
+       asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
+                    "2:\n"                                             \
+                    _ASM_EXTABLE(1b, 2b - 1b)                          \
+                    : ltype(x) : "m" (__m(addr)))
+
 #define __put_user_nocheck(x, ptr, size)                       \
 ({                                                             \
        int __pu_err;                                           \
@@ -385,6 +447,26 @@ struct __large_struct { unsigned long buf[100]; };
                     _ASM_EXTABLE(1b, 3b)                               \
                     : "=r"(err)                                        \
                     : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
+
+#define __put_user_asm_ex(x, addr, itype, rtype, ltype)                        \
+       asm volatile("1:        mov"itype" %"rtype"0,%1\n"              \
+                    "2:\n"                                             \
+                    _ASM_EXTABLE(1b, 2b - 1b)                          \
+                    : : ltype(x), "m" (__m(addr)))
+
+/*
+ * uaccess_try and catch
+ */
+#define uaccess_try    do {                                            \
+       int prev_err = current_thread_info()->uaccess_err;              \
+       current_thread_info()->uaccess_err = 0;                         \
+       barrier();
+
+#define uaccess_catch(err)                                             \
+       (err) |= current_thread_info()->uaccess_err;                    \
+       current_thread_info()->uaccess_err = prev_err;                  \
+} while (0)
+
 /**
  * __get_user: - Get a simple variable from user space, with less checking.
  * @x:   Variable to store result.
@@ -408,6 +490,7 @@ struct __large_struct { unsigned long buf[100]; };
 
 #define __get_user(x, ptr)                                             \
        __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+
 /**
  * __put_user: - Write a simple value into user space, with less checking.
  * @x:   Value to copy to user space.
@@ -434,6 +517,45 @@ struct __large_struct { unsigned long buf[100]; };
 #define __get_user_unaligned __get_user
 #define __put_user_unaligned __put_user
 
+/*
+ * {get|put}_user_try and catch
+ *
+ * get_user_try {
+ *     get_user_ex(...);
+ * } get_user_catch(err)
+ */
+#define get_user_try           uaccess_try
+#define get_user_catch(err)    uaccess_catch(err)
+
+#define get_user_ex(x, ptr)    do {                                    \
+       unsigned long __gue_val;                                        \
+       __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr))));       \
+       (x) = (__force __typeof__(*(ptr)))__gue_val;                    \
+} while (0)
+
+#ifdef CONFIG_X86_WP_WORKS_OK
+
+#define put_user_try           uaccess_try
+#define put_user_catch(err)    uaccess_catch(err)
+
+#define put_user_ex(x, ptr)                                            \
+       __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#else /* !CONFIG_X86_WP_WORKS_OK */
+
+#define put_user_try           do {            \
+       int __uaccess_err = 0;
+
+#define put_user_catch(err)                    \
+       (err) |= __uaccess_err;                 \
+} while (0)
+
+#define put_user_ex(x, ptr)    do {            \
+       __uaccess_err |= __put_user(x, ptr);    \
+} while (0)
+
+#endif /* CONFIG_X86_WP_WORKS_OK */
+
 /*
  * movsl can be slow when source and dest are not both 8-byte aligned
  */
index 84210c479fca83524c6cef4c6bc069bcff76e272..8cc687326eb80f20726852c9ba66c68837e8176a 100644 (file)
@@ -188,16 +188,16 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
 extern long __copy_user_nocache(void *dst, const void __user *src,
                                unsigned size, int zerorest);
 
-static inline int __copy_from_user_nocache(void *dst, const void __user *src,
-                                          unsigned size)
+static inline int
+__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
 {
        might_sleep();
        return __copy_user_nocache(dst, src, size, 1);
 }
 
-static inline int __copy_from_user_inatomic_nocache(void *dst,
-                                                   const void __user *src,
-                                                   unsigned size)
+static inline int
+__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
+                                 unsigned size)
 {
        return __copy_user_nocache(dst, src, size, 0);
 }
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
new file mode 100644 (file)
index 0000000..c0a01b5
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _ASM_X86_UV_UV_H
+#define _ASM_X86_UV_UV_H
+
+enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC};
+
+struct cpumask;
+struct mm_struct;
+
+#ifdef CONFIG_X86_UV
+
+extern enum uv_system_type get_uv_system_type(void);
+extern int is_uv_system(void);
+extern void uv_cpu_init(void);
+extern void uv_system_init(void);
+extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+                                                struct mm_struct *mm,
+                                                unsigned long va,
+                                                unsigned int cpu);
+
+#else  /* X86_UV */
+
+static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
+static inline int is_uv_system(void)   { return 0; }
+static inline void uv_cpu_init(void)   { }
+static inline void uv_system_init(void)        { }
+static inline const struct cpumask *
+uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
+                   unsigned long va, unsigned int cpu)
+{ return cpumask; }
+
+#endif /* X86_UV */
+
+#endif /* _ASM_X86_UV_UV_H */
index 50423c7b56b2eed9ddce519fac35c0a478dd5f81..9b0e61bf7a88dc2a68521384ea68950cd900478e 100644 (file)
@@ -325,7 +325,6 @@ static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
 #define cpubit_isset(cpu, bau_local_cpumask) \
        test_bit((cpu), (bau_local_cpumask).bits)
 
-extern int uv_flush_tlb_others(cpumask_t *, struct mm_struct *, unsigned long);
 extern void uv_bau_message_intr1(void);
 extern void uv_bau_timeout_intr1(void);
 
index 777327ef05c19f0887c0b9b089a6f71135395e94..9f4dfba33b2899a17ee672c19a4891b09ac816dc 100644 (file)
@@ -199,6 +199,10 @@ DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
 #define SCIR_CPU_ACTIVITY      0x02    /* not idle */
 #define SCIR_CPU_HB_INTERVAL   (HZ)    /* once per second */
 
+/* Loop through all installed blades */
+#define for_each_possible_blade(bid)           \
+       for ((bid) = 0; (bid) < uv_num_possible_blades(); (bid)++)
+
 /*
  * Macros for converting between kernel virtual addresses, socket local physical
  * addresses, and UV global physical addresses.
diff --git a/arch/x86/include/asm/vic.h b/arch/x86/include/asm/vic.h
deleted file mode 100644 (file)
index 53100f3..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Copyright (C) 1999,2001
- *
- * Author: J.E.J.Bottomley@HansenPartnership.com
- *
- * Standard include definitions for the NCR Voyager Interrupt Controller */
-
-/* The eight CPI vectors.  To activate a CPI, you write a bit mask
- * corresponding to the processor set to be interrupted into the
- * relevant register.  That set of CPUs will then be interrupted with
- * the CPI */
-static const int VIC_CPI_Registers[] =
-       {0xFC00, 0xFC01, 0xFC08, 0xFC09,
-        0xFC10, 0xFC11, 0xFC18, 0xFC19 };
-
-#define VIC_PROC_WHO_AM_I              0xfc29
-#      define  QUAD_IDENTIFIER         0xC0
-#      define  EIGHT_SLOT_IDENTIFIER   0xE0
-#define QIC_EXTENDED_PROCESSOR_SELECT  0xFC72
-#define VIC_CPI_BASE_REGISTER          0xFC41
-#define VIC_PROCESSOR_ID               0xFC21
-#      define VIC_CPU_MASQUERADE_ENABLE 0x8
-
-#define VIC_CLAIM_REGISTER_0           0xFC38
-#define VIC_CLAIM_REGISTER_1           0xFC39
-#define VIC_REDIRECT_REGISTER_0                0xFC60
-#define VIC_REDIRECT_REGISTER_1                0xFC61
-#define VIC_PRIORITY_REGISTER          0xFC20
-
-#define VIC_PRIMARY_MC_BASE            0xFC48
-#define VIC_SECONDARY_MC_BASE          0xFC49
-
-#define QIC_PROCESSOR_ID               0xFC71
-#      define  QIC_CPUID_ENABLE        0x08
-
-#define QIC_VIC_CPI_BASE_REGISTER      0xFC79
-#define QIC_CPI_BASE_REGISTER          0xFC7A
-
-#define QIC_MASK_REGISTER0             0xFC80
-/* NOTE: these are masked high, enabled low */
-#      define QIC_PERF_TIMER           0x01
-#      define QIC_LPE                  0x02
-#      define QIC_SYS_INT              0x04
-#      define QIC_CMN_INT              0x08
-/* at the moment, just enable CMN_INT, disable SYS_INT */
-#      define QIC_DEFAULT_MASK0        (~(QIC_CMN_INT /* | VIC_SYS_INT */))
-#define QIC_MASK_REGISTER1             0xFC81
-#      define QIC_BOOT_CPI_MASK        0xFE
-/* Enable CPI's 1-6 inclusive */
-#      define QIC_CPI_ENABLE           0x81
-
-#define QIC_INTERRUPT_CLEAR0           0xFC8A
-#define QIC_INTERRUPT_CLEAR1           0xFC8B
-
-/* this is where we place the CPI vectors */
-#define VIC_DEFAULT_CPI_BASE           0xC0
-/* this is where we place the QIC CPI vectors */
-#define QIC_DEFAULT_CPI_BASE           0xD0
-
-#define VIC_BOOT_INTERRUPT_MASK                0xfe
-
-extern void smp_vic_timer_interrupt(void);
diff --git a/arch/x86/include/asm/voyager.h b/arch/x86/include/asm/voyager.h
deleted file mode 100644 (file)
index b3e6473..0000000
+++ /dev/null
@@ -1,529 +0,0 @@
-/* Copyright (C) 1999,2001
- *
- * Author: J.E.J.Bottomley@HansenPartnership.com
- *
- * Standard include definitions for the NCR Voyager system */
-
-#undef VOYAGER_DEBUG
-#undef VOYAGER_CAT_DEBUG
-
-#ifdef VOYAGER_DEBUG
-#define VDEBUG(x)      printk x
-#else
-#define VDEBUG(x)
-#endif
-
-/* There are three levels of voyager machine: 3,4 and 5. The rule is
- * if it's less than 3435 it's a Level 3 except for a 3360 which is
- * a level 4.  A 3435 or above is a Level 5 */
-#define VOYAGER_LEVEL5_AND_ABOVE       0x3435
-#define VOYAGER_LEVEL4                 0x3360
-
-/* The L4 DINO ASIC */
-#define VOYAGER_DINO                   0x43
-
-/* voyager ports in standard I/O space */
-#define VOYAGER_MC_SETUP       0x96
-
-
-#define        VOYAGER_CAT_CONFIG_PORT                 0x97
-#      define VOYAGER_CAT_DESELECT             0xff
-#define VOYAGER_SSPB_RELOCATION_PORT           0x98
-
-/* Valid CAT controller commands */
-/* start instruction register cycle */
-#define VOYAGER_CAT_IRCYC                      0x01
-/* start data register cycle */
-#define VOYAGER_CAT_DRCYC                      0x02
-/* move to execute state */
-#define VOYAGER_CAT_RUN                                0x0F
-/* end operation */
-#define VOYAGER_CAT_END                                0x80
-/* hold in idle state */
-#define VOYAGER_CAT_HOLD                       0x90
-/* single step an "intest" vector */
-#define VOYAGER_CAT_STEP                       0xE0
-/* return cat controller to CLEMSON mode */
-#define VOYAGER_CAT_CLEMSON                    0xFF
-
-/* the default cat command header */
-#define VOYAGER_CAT_HEADER                     0x7F
-
-/* the range of possible CAT module ids in the system */
-#define VOYAGER_MIN_MODULE                     0x10
-#define VOYAGER_MAX_MODULE                     0x1f
-
-/* The voyager registers per asic */
-#define VOYAGER_ASIC_ID_REG                    0x00
-#define VOYAGER_ASIC_TYPE_REG                  0x01
-/* the sub address registers can be made auto incrementing on reads */
-#define VOYAGER_AUTO_INC_REG                   0x02
-#      define VOYAGER_AUTO_INC                 0x04
-#      define VOYAGER_NO_AUTO_INC              0xfb
-#define VOYAGER_SUBADDRDATA                    0x03
-#define VOYAGER_SCANPATH                       0x05
-#      define VOYAGER_CONNECT_ASIC             0x01
-#      define VOYAGER_DISCONNECT_ASIC          0xfe
-#define VOYAGER_SUBADDRLO                      0x06
-#define VOYAGER_SUBADDRHI                      0x07
-#define VOYAGER_SUBMODSELECT                   0x08
-#define VOYAGER_SUBMODPRESENT                  0x09
-
-#define VOYAGER_SUBADDR_LO                     0xff
-#define VOYAGER_SUBADDR_HI                     0xffff
-
-/* the maximum size of a scan path -- used to form instructions */
-#define VOYAGER_MAX_SCAN_PATH                  0x100
-/* the biggest possible register size (in bytes) */
-#define VOYAGER_MAX_REG_SIZE                   4
-
-/* Total number of possible modules (including submodules) */
-#define VOYAGER_MAX_MODULES                    16
-/* Largest number of asics per module */
-#define VOYAGER_MAX_ASICS_PER_MODULE           7
-
-/* the CAT asic of each module is always the first one */
-#define VOYAGER_CAT_ID                         0
-#define VOYAGER_PSI                            0x1a
-
-/* voyager instruction operations and registers */
-#define VOYAGER_READ_CONFIG                    0x1
-#define VOYAGER_WRITE_CONFIG                   0x2
-#define VOYAGER_BYPASS                         0xff
-
-typedef struct voyager_asic {
-       __u8    asic_addr;      /* ASIC address; Level 4 */
-       __u8    asic_type;      /* ASIC type */
-       __u8    asic_id;        /* ASIC id */
-       __u8    jtag_id[4];     /* JTAG id */
-       __u8    asic_location;  /* Location within scan path; start w/ 0 */
-       __u8    bit_location;   /* Location within bit stream; start w/ 0 */
-       __u8    ireg_length;    /* Instruction register length */
-       __u16   subaddr;        /* Amount of sub address space */
-       struct voyager_asic *next;      /* Next asic in linked list */
-} voyager_asic_t;
-
-typedef struct voyager_module {
-       __u8    module_addr;            /* Module address */
-       __u8    scan_path_connected;    /* Scan path connected */
-       __u16   ee_size;                /* Size of the EEPROM */
-       __u16   num_asics;              /* Number of Asics */
-       __u16   inst_bits;              /* Instruction bits in the scan path */
-       __u16   largest_reg;            /* Largest register in the scan path */
-       __u16   smallest_reg;           /* Smallest register in the scan path */
-       voyager_asic_t   *asic;         /* First ASIC in scan path (CAT_I) */
-       struct   voyager_module *submodule;     /* Submodule pointer */
-       struct   voyager_module *next;          /* Next module in linked list */
-} voyager_module_t;
-
-typedef struct voyager_eeprom_hdr {
-        __u8  module_id[4];
-        __u8  version_id;
-        __u8  config_id;
-        __u16 boundry_id;      /* boundary scan id */
-        __u16 ee_size;         /* size of EEPROM */
-        __u8  assembly[11];    /* assembly # */
-        __u8  assembly_rev;    /* assembly rev */
-        __u8  tracer[4];       /* tracer number */
-        __u16 assembly_cksum;  /* asm checksum */
-        __u16 power_consump;   /* pwr requirements */
-        __u16 num_asics;       /* number of asics */
-        __u16 bist_time;       /* min. bist time */
-        __u16 err_log_offset;  /* error log offset */
-        __u16 scan_path_offset;/* scan path offset */
-        __u16 cct_offset;
-        __u16 log_length;      /* length of err log */
-        __u16 xsum_end;        /* offset to end of
-                                  checksum */
-        __u8  reserved[4];
-        __u8  sflag;           /* starting sentinal */
-        __u8  part_number[13]; /* prom part number */
-        __u8  version[10];     /* version number */
-        __u8  signature[8];
-        __u16 eeprom_chksum;
-        __u32  data_stamp_offset;
-        __u8  eflag ;           /* ending sentinal */
-} __attribute__((packed)) voyager_eprom_hdr_t;
-
-
-
-#define VOYAGER_EPROM_SIZE_OFFSET                              \
-       ((__u16)(&(((voyager_eprom_hdr_t *)0)->ee_size)))
-#define VOYAGER_XSUM_END_OFFSET                0x2a
-
-/* the following three definitions are for internal table layouts
- * in the module EPROMs.  We really only care about the IDs and
- * offsets */
-typedef struct voyager_sp_table {
-       __u8 asic_id;
-       __u8 bypass_flag;
-       __u16 asic_data_offset;
-       __u16 config_data_offset;
-} __attribute__((packed)) voyager_sp_table_t;
-
-typedef struct voyager_jtag_table {
-       __u8 icode[4];
-       __u8 runbist[4];
-       __u8 intest[4];
-       __u8 samp_preld[4];
-       __u8 ireg_len;
-} __attribute__((packed)) voyager_jtt_t;
-
-typedef struct voyager_asic_data_table {
-       __u8 jtag_id[4];
-       __u16 length_bsr;
-       __u16 length_bist_reg;
-       __u32 bist_clk;
-       __u16 subaddr_bits;
-       __u16 seed_bits;
-       __u16 sig_bits;
-       __u16 jtag_offset;
-} __attribute__((packed)) voyager_at_t;
-
-/* Voyager Interrupt Controller (VIC) registers */
-
-/* Base to add to Cross Processor Interrupts (CPIs) when triggering
- * the CPU IRQ line */
-/* register defines for the WCBICs (one per processor) */
-#define VOYAGER_WCBIC0 0x41            /* bus A node P1 processor 0 */
-#define VOYAGER_WCBIC1 0x49            /* bus A node P1 processor 1 */
-#define VOYAGER_WCBIC2 0x51            /* bus A node P2 processor 0 */
-#define VOYAGER_WCBIC3 0x59            /* bus A node P2 processor 1 */
-#define VOYAGER_WCBIC4 0x61            /* bus B node P1 processor 0 */
-#define VOYAGER_WCBIC5 0x69            /* bus B node P1 processor 1 */
-#define VOYAGER_WCBIC6 0x71            /* bus B node P2 processor 0 */
-#define VOYAGER_WCBIC7 0x79            /* bus B node P2 processor 1 */
-
-
-/* top of memory registers */
-#define VOYAGER_WCBIC_TOM_L    0x4
-#define VOYAGER_WCBIC_TOM_H    0x5
-
-/* register defines for Voyager Memory Contol (VMC)
- * these are present on L4 machines only */
-#define        VOYAGER_VMC1            0x81
-#define VOYAGER_VMC2           0x91
-#define VOYAGER_VMC3           0xa1
-#define VOYAGER_VMC4           0xb1
-
-/* VMC Ports */
-#define VOYAGER_VMC_MEMORY_SETUP       0x9
-#      define VMC_Interleaving         0x01
-#      define VMC_4Way                 0x02
-#      define VMC_EvenCacheLines       0x04
-#      define VMC_HighLine             0x08
-#      define VMC_Start0_Enable        0x20
-#      define VMC_Start1_Enable        0x40
-#      define VMC_Vremap               0x80
-#define VOYAGER_VMC_BANK_DENSITY       0xa
-#      define  VMC_BANK_EMPTY          0
-#      define  VMC_BANK_4MB            1
-#      define  VMC_BANK_16MB           2
-#      define  VMC_BANK_64MB           3
-#      define  VMC_BANK0_MASK          0x03
-#      define  VMC_BANK1_MASK          0x0C
-#      define  VMC_BANK2_MASK          0x30
-#      define  VMC_BANK3_MASK          0xC0
-
-/* Magellan Memory Controller (MMC) defines - present on L5 */
-#define VOYAGER_MMC_ASIC_ID            1
-/* the two memory modules corresponding to memory cards in the system */
-#define VOYAGER_MMC_MEMORY0_MODULE     0x14
-#define VOYAGER_MMC_MEMORY1_MODULE     0x15
-/* the Magellan Memory Address (MMA) defines */
-#define VOYAGER_MMA_ASIC_ID            2
-
-/* Submodule number for the Quad Baseboard */
-#define VOYAGER_QUAD_BASEBOARD         1
-
-/* ASIC defines for the Quad Baseboard */
-#define VOYAGER_QUAD_QDATA0            1
-#define VOYAGER_QUAD_QDATA1            2
-#define VOYAGER_QUAD_QABC              3
-
-/* Useful areas in extended CMOS */
-#define VOYAGER_PROCESSOR_PRESENT_MASK 0x88a
-#define VOYAGER_MEMORY_CLICKMAP                0xa23
-#define VOYAGER_DUMP_LOCATION          0xb1a
-
-/* SUS In Control bit - used to tell SUS that we don't need to be
- * babysat anymore */
-#define VOYAGER_SUS_IN_CONTROL_PORT    0x3ff
-#      define VOYAGER_IN_CONTROL_FLAG  0x80
-
-/* Voyager PSI defines */
-#define VOYAGER_PSI_STATUS_REG         0x08
-#      define PSI_DC_FAIL              0x01
-#      define PSI_MON                  0x02
-#      define PSI_FAULT                0x04
-#      define PSI_ALARM                0x08
-#      define PSI_CURRENT              0x10
-#      define PSI_DVM                  0x20
-#      define PSI_PSCFAULT             0x40
-#      define PSI_STAT_CHG             0x80
-
-#define VOYAGER_PSI_SUPPLY_REG         0x8000
-       /* read */
-#      define PSI_FAIL_DC              0x01
-#      define PSI_FAIL_AC              0x02
-#      define PSI_MON_INT              0x04
-#      define PSI_SWITCH_OFF           0x08
-#      define PSI_HX_OFF               0x10
-#      define PSI_SECURITY             0x20
-#      define PSI_CMOS_BATT_LOW        0x40
-#      define PSI_CMOS_BATT_FAIL       0x80
-       /* write */
-#      define PSI_CLR_SWITCH_OFF       0x13
-#      define PSI_CLR_HX_OFF           0x14
-#      define PSI_CLR_CMOS_BATT_FAIL   0x17
-
-#define VOYAGER_PSI_MASK               0x8001
-#      define PSI_MASK_MASK            0x10
-
-#define VOYAGER_PSI_AC_FAIL_REG                0x8004
-#define        AC_FAIL_STAT_CHANGE             0x80
-
-#define VOYAGER_PSI_GENERAL_REG                0x8007
-       /* read */
-#      define PSI_SWITCH_ON            0x01
-#      define PSI_SWITCH_ENABLED       0x02
-#      define PSI_ALARM_ENABLED        0x08
-#      define PSI_SECURE_ENABLED       0x10
-#      define PSI_COLD_RESET           0x20
-#      define PSI_COLD_START           0x80
-       /* write */
-#      define PSI_POWER_DOWN           0x10
-#      define PSI_SWITCH_DISABLE       0x01
-#      define PSI_SWITCH_ENABLE        0x11
-#      define PSI_CLEAR                0x12
-#      define PSI_ALARM_DISABLE        0x03
-#      define PSI_ALARM_ENABLE         0x13
-#      define PSI_CLEAR_COLD_RESET     0x05
-#      define PSI_SET_COLD_RESET       0x15
-#      define PSI_CLEAR_COLD_START     0x07
-#      define PSI_SET_COLD_START       0x17
-
-
-
-struct voyager_bios_info {
-       __u8    len;
-       __u8    major;
-       __u8    minor;
-       __u8    debug;
-       __u8    num_classes;
-       __u8    class_1;
-       __u8    class_2;
-};
-
-/* The following structures and definitions are for the Kernel/SUS
- * interface these are needed to find out how SUS initialised any Quad
- * boards in the system */
-
-#define        NUMBER_OF_MC_BUSSES     2
-#define SLOTS_PER_MC_BUS       8
-#define MAX_CPUS                16      /* 16 way CPU system */
-#define MAX_PROCESSOR_BOARDS   4       /* 4 processor slot system */
-#define MAX_CACHE_LEVELS       4       /* # of cache levels supported */
-#define MAX_SHARED_CPUS                4       /* # of CPUs that can share a LARC */
-#define NUMBER_OF_POS_REGS     8
-
-typedef struct {
-       __u8    MC_Slot;
-       __u8    POS_Values[NUMBER_OF_POS_REGS];
-} __attribute__((packed)) MC_SlotInformation_t;
-
-struct QuadDescription {
-       __u8  Type;     /* for type 0 (DYADIC or MONADIC) all fields
-                        * will be zero except for slot */
-       __u8 StructureVersion;
-       __u32 CPI_BaseAddress;
-       __u32  LARC_BankSize;
-       __u32 LocalMemoryStateBits;
-       __u8  Slot; /* Processor slots 1 - 4 */
-} __attribute__((packed));
-
-struct ProcBoardInfo {
-       __u8 Type;
-       __u8 StructureVersion;
-       __u8 NumberOfBoards;
-       struct QuadDescription QuadData[MAX_PROCESSOR_BOARDS];
-} __attribute__((packed));
-
-struct CacheDescription {
-       __u8 Level;
-       __u32 TotalSize;
-       __u16 LineSize;
-       __u8  Associativity;
-       __u8  CacheType;
-       __u8  WriteType;
-       __u8  Number_CPUs_SharedBy;
-       __u8  Shared_CPUs_Hardware_IDs[MAX_SHARED_CPUS];
-
-} __attribute__((packed));
-
-struct CPU_Description {
-       __u8 CPU_HardwareId;
-       char *FRU_String;
-       __u8 NumberOfCacheLevels;
-       struct CacheDescription CacheLevelData[MAX_CACHE_LEVELS];
-} __attribute__((packed));
-
-struct CPU_Info {
-       __u8 Type;
-       __u8 StructureVersion;
-       __u8 NumberOf_CPUs;
-       struct CPU_Description CPU_Data[MAX_CPUS];
-} __attribute__((packed));
-
-
-/*
- * This structure will be used by SUS and the OS.
- * The assumption about this structure is that no blank space is
- * packed in it by our friend the compiler.
- */
-typedef struct {
-       __u8    Mailbox_SUS;            /* Written to by SUS to give
-                                          commands/response to the OS */
-       __u8    Mailbox_OS;             /* Written to by the OS to give
-                                          commands/response to SUS */
-       __u8    SUS_MailboxVersion;     /* Tells the OS which iteration of the
-                                          interface SUS supports */
-       __u8    OS_MailboxVersion;      /* Tells SUS which iteration of the
-                                          interface the OS supports */
-       __u32   OS_Flags;               /* Flags set by the OS as info for
-                                          SUS */
-       __u32   SUS_Flags;              /* Flags set by SUS as info
-                                          for the OS */
-       __u32   WatchDogPeriod;         /* Watchdog period (in seconds) which
-                                          the DP uses to see if the OS
-                                          is dead */
-       __u32   WatchDogCount;          /* Updated by the OS on every tic. */
-       __u32   MemoryFor_SUS_ErrorLog; /* Flat 32 bit address which tells SUS
-                                          where to stuff the SUS error log
-                                          on a dump */
-       MC_SlotInformation_t MC_SlotInfo[NUMBER_OF_MC_BUSSES*SLOTS_PER_MC_BUS];
-                                       /* Storage for MCA POS data */
-       /* All new SECOND_PASS_INTERFACE fields added from this point */
-       struct ProcBoardInfo    *BoardData;
-       struct CPU_Info         *CPU_Data;
-       /* All new fields must be added from this point */
-} Voyager_KernelSUS_Mbox_t;
-
-/* structure for finding the right memory address to send a QIC CPI to */
-struct voyager_qic_cpi {
-       /* Each cache line (32 bytes) can trigger a cpi.  The cpi
-        * read/write may occur anywhere in the cache line---pick the
-        * middle to be safe */
-       struct  {
-               __u32 pad1[3];
-               __u32 cpi;
-               __u32 pad2[4];
-       } qic_cpi[8];
-};
-
-struct voyager_status {
-       __u32   power_fail:1;
-       __u32   switch_off:1;
-       __u32   request_from_kernel:1;
-};
-
-struct voyager_psi_regs {
-       __u8 cat_id;
-       __u8 cat_dev;
-       __u8 cat_control;
-       __u8 subaddr;
-       __u8 dummy4;
-       __u8 checkbit;
-       __u8 subaddr_low;
-       __u8 subaddr_high;
-       __u8 intstatus;
-       __u8 stat1;
-       __u8 stat3;
-       __u8 fault;
-       __u8 tms;
-       __u8 gen;
-       __u8 sysconf;
-       __u8 dummy15;
-};
-
-struct voyager_psi_subregs {
-       __u8 supply;
-       __u8 mask;
-       __u8 present;
-       __u8 DCfail;
-       __u8 ACfail;
-       __u8 fail;
-       __u8 UPSfail;
-       __u8 genstatus;
-};
-
-struct voyager_psi {
-       struct voyager_psi_regs regs;
-       struct voyager_psi_subregs subregs;
-};
-
-struct voyager_SUS {
-#define        VOYAGER_DUMP_BUTTON_NMI         0x1
-#define VOYAGER_SUS_VALID              0x2
-#define VOYAGER_SYSINT_COMPLETE                0x3
-       __u8    SUS_mbox;
-#define VOYAGER_NO_COMMAND             0x0
-#define VOYAGER_IGNORE_DUMP            0x1
-#define VOYAGER_DO_DUMP                        0x2
-#define VOYAGER_SYSINT_HANDSHAKE       0x3
-#define VOYAGER_DO_MEM_DUMP            0x4
-#define VOYAGER_SYSINT_WAS_RECOVERED   0x5
-       __u8    kernel_mbox;
-#define        VOYAGER_MAILBOX_VERSION         0x10
-       __u8    SUS_version;
-       __u8    kernel_version;
-#define VOYAGER_OS_HAS_SYSINT          0x1
-#define VOYAGER_OS_IN_PROGRESS         0x2
-#define VOYAGER_UPDATING_WDPERIOD      0x4
-       __u32   kernel_flags;
-#define VOYAGER_SUS_BOOTING            0x1
-#define VOYAGER_SUS_IN_PROGRESS                0x2
-       __u32   SUS_flags;
-       __u32   watchdog_period;
-       __u32   watchdog_count;
-       __u32   SUS_errorlog;
-       /* lots of system configuration stuff under here */
-};
-
-/* Variables exported by voyager_smp */
-extern __u32 voyager_extended_vic_processors;
-extern __u32 voyager_allowed_boot_processors;
-extern __u32 voyager_quad_processors;
-extern struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS];
-extern struct voyager_SUS *voyager_SUS;
-
-/* variables exported always */
-extern struct task_struct *voyager_thread;
-extern int voyager_level;
-extern struct voyager_status voyager_status;
-
-/* functions exported by the voyager and voyager_smp modules */
-extern int voyager_cat_readb(__u8 module, __u8 asic, int reg);
-extern void voyager_cat_init(void);
-extern void voyager_detect(struct voyager_bios_info *);
-extern void voyager_trap_init(void);
-extern void voyager_setup_irqs(void);
-extern int voyager_memory_detect(int region, __u32 *addr, __u32 *length);
-extern void voyager_smp_intr_init(void);
-extern __u8 voyager_extended_cmos_read(__u16 cmos_address);
-extern void voyager_smp_dump(void);
-extern void voyager_timer_interrupt(void);
-extern void smp_local_timer_interrupt(void);
-extern void voyager_power_off(void);
-extern void smp_voyager_power_off(void *dummy);
-extern void voyager_restart(void);
-extern void voyager_cat_power_off(void);
-extern void voyager_cat_do_common_interrupt(void);
-extern void voyager_handle_nmi(void);
-extern void voyager_smp_intr_init(void);
-/* Commands for the following are */
-#define        VOYAGER_PSI_READ        0
-#define VOYAGER_PSI_WRITE      1
-#define VOYAGER_PSI_SUBREAD    2
-#define VOYAGER_PSI_SUBWRITE   3
-extern void voyager_cat_psi(__u8, __u16, __u8 *);
index 19144184983aba9ba73bf022453a8ef85e6665db..1df35417c412d2fb7dde775b7cc54dde7468dfa4 100644 (file)
@@ -15,10 +15,4 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
        return raw_irqs_disabled_flags(regs->flags);
 }
 
-static inline void xen_do_IRQ(int irq, struct pt_regs *regs)
-{
-       regs->orig_ax = ~irq;
-       do_IRQ(regs);
-}
-
 #endif /* _ASM_X86_XEN_EVENTS_H */
index 81fbd735aec47e3edbed57a5d83eb142f3c3571e..d5b7e90c0edfd3ca1bf7d05e3f4c4e642f8e6fb7 100644 (file)
@@ -38,22 +38,30 @@ extern struct shared_info *HYPERVISOR_shared_info;
 extern struct start_info *xen_start_info;
 
 enum xen_domain_type {
-       XEN_NATIVE,
-       XEN_PV_DOMAIN,
-       XEN_HVM_DOMAIN,
+       XEN_NATIVE,             /* running on bare hardware    */
+       XEN_PV_DOMAIN,          /* running in a PV domain      */
+       XEN_HVM_DOMAIN,         /* running in a Xen hvm domain */
 };
 
-extern enum xen_domain_type xen_domain_type;
-
 #ifdef CONFIG_XEN
-#define xen_domain()           (xen_domain_type != XEN_NATIVE)
+extern enum xen_domain_type xen_domain_type;
 #else
-#define xen_domain()           (0)
+#define xen_domain_type                XEN_NATIVE
 #endif
 
-#define xen_pv_domain()                (xen_domain() && xen_domain_type == XEN_PV_DOMAIN)
-#define xen_hvm_domain()       (xen_domain() && xen_domain_type == XEN_HVM_DOMAIN)
+#define xen_domain()           (xen_domain_type != XEN_NATIVE)
+#define xen_pv_domain()                (xen_domain() &&                        \
+                                xen_domain_type == XEN_PV_DOMAIN)
+#define xen_hvm_domain()       (xen_domain() &&                        \
+                                xen_domain_type == XEN_HVM_DOMAIN)
+
+#ifdef CONFIG_XEN_DOM0
+#include <xen/interface/xen.h>
 
-#define xen_initial_domain()   (xen_pv_domain() && xen_start_info->flags & SIF_INITDOMAIN)
+#define xen_initial_domain()   (xen_pv_domain() && \
+                                xen_start_info->flags & SIF_INITDOMAIN)
+#else  /* !CONFIG_XEN_DOM0 */
+#define xen_initial_domain()   (0)
+#endif /* CONFIG_XEN_DOM0 */
 
 #endif /* _ASM_X86_XEN_HYPERVISOR_H */
index 4bd990ee43df840a99ffb1a283cb2b6a664af7f2..1a918dde46b503272b26e6070d198460f825f0c0 100644 (file)
@@ -164,6 +164,7 @@ static inline pte_t __pte_ma(pteval_t x)
 
 
 xmaddr_t arbitrary_virt_to_machine(void *address);
+unsigned long arbitrary_virt_to_mfn(void *vaddr);
 void make_lowmem_page_readonly(void *vaddr);
 void make_lowmem_page_readwrite(void *vaddr);
 
index d364df03c1d6419ce4473e23b420d86167343775..339ce35648e60708b147ca24d09bdb44a469b269 100644 (file)
@@ -23,11 +23,12 @@ nostackp := $(call cc-option, -fno-stack-protector)
 CFLAGS_vsyscall_64.o   := $(PROFILING) -g0 $(nostackp)
 CFLAGS_hpet.o          := $(nostackp)
 CFLAGS_tsc.o           := $(nostackp)
+CFLAGS_paravirt.o      := $(nostackp)
 
 obj-y                  := process_$(BITS).o signal.o entry_$(BITS).o
 obj-y                  += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
 obj-y                  += time_$(BITS).o ioport.o ldt.o dumpstack.o
-obj-y                  += setup.o i8259.o irqinit_$(BITS).o setup_percpu.o
+obj-y                  += setup.o i8259.o irqinit_$(BITS).o
 obj-$(CONFIG_X86_VISWS)        += visws_quirks.o
 obj-$(CONFIG_X86_32)   += probe_roms_32.o
 obj-$(CONFIG_X86_32)   += sys_i386_32.o i386_ksyms_32.o
@@ -49,31 +50,27 @@ obj-y                               += step.o
 obj-$(CONFIG_STACKTRACE)       += stacktrace.o
 obj-y                          += cpu/
 obj-y                          += acpi/
-obj-$(CONFIG_X86_BIOS_REBOOT)  += reboot.o
+obj-y                          += reboot.o
 obj-$(CONFIG_MCA)              += mca_32.o
 obj-$(CONFIG_X86_MSR)          += msr.o
 obj-$(CONFIG_X86_CPUID)                += cpuid.o
 obj-$(CONFIG_PCI)              += early-quirks.o
 apm-y                          := apm_32.o
 obj-$(CONFIG_APM)              += apm.o
-obj-$(CONFIG_X86_SMP)          += smp.o
-obj-$(CONFIG_X86_SMP)          += smpboot.o tsc_sync.o ipi.o tlb_$(BITS).o
-obj-$(CONFIG_X86_32_SMP)       += smpcommon.o
-obj-$(CONFIG_X86_64_SMP)       += tsc_sync.o smpcommon.o
+obj-$(CONFIG_SMP)              += smp.o
+obj-$(CONFIG_SMP)              += smpboot.o tsc_sync.o
+obj-$(CONFIG_SMP)              += setup_percpu.o
+obj-$(CONFIG_X86_64_SMP)       += tsc_sync.o
 obj-$(CONFIG_X86_TRAMPOLINE)   += trampoline_$(BITS).o
 obj-$(CONFIG_X86_MPPARSE)      += mpparse.o
-obj-$(CONFIG_X86_LOCAL_APIC)   += apic.o nmi.o
-obj-$(CONFIG_X86_IO_APIC)      += io_apic.o
+obj-y                          += apic/
 obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
 obj-$(CONFIG_KEXEC)            += machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC)            += relocate_kernel_$(BITS).o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump_$(BITS).o
-obj-$(CONFIG_X86_NUMAQ)                += numaq_32.o
-obj-$(CONFIG_X86_ES7000)       += es7000_32.o
-obj-$(CONFIG_X86_SUMMIT_NUMA)  += summit_32.o
-obj-y                          += vsmp_64.o
+obj-$(CONFIG_X86_VSMP)         += vsmp_64.o
 obj-$(CONFIG_KPROBES)          += kprobes.o
 obj-$(CONFIG_MODULES)          += module_$(BITS).o
 obj-$(CONFIG_EFI)              += efi.o efi_$(BITS).o efi_stub_$(BITS).o
@@ -114,16 +111,13 @@ obj-$(CONFIG_SWIOTLB)                     += pci-swiotlb_64.o # NB rename without _64
 ###
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
-        obj-y                          += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
-       obj-y                           += bios_uv.o uv_irq.o uv_sysfs.o
-        obj-y                          += genx2apic_cluster.o
-        obj-y                          += genx2apic_phys.o
-        obj-$(CONFIG_X86_PM_TIMER)     += pmtimer_64.o
-        obj-$(CONFIG_AUDIT)            += audit_64.o
-
-        obj-$(CONFIG_GART_IOMMU)       += pci-gart_64.o aperture_64.o
-        obj-$(CONFIG_CALGARY_IOMMU)    += pci-calgary_64.o tce_64.o
-        obj-$(CONFIG_AMD_IOMMU)                += amd_iommu_init.o amd_iommu.o
-
-        obj-$(CONFIG_PCI_MMCONFIG)     += mmconf-fam10h_64.o
+       obj-$(CONFIG_X86_UV)            += tlb_uv.o bios_uv.o uv_irq.o uv_sysfs.o uv_time.o
+       obj-$(CONFIG_X86_PM_TIMER)      += pmtimer_64.o
+       obj-$(CONFIG_AUDIT)             += audit_64.o
+
+       obj-$(CONFIG_GART_IOMMU)        += pci-gart_64.o aperture_64.o
+       obj-$(CONFIG_CALGARY_IOMMU)     += pci-calgary_64.o tce_64.o
+       obj-$(CONFIG_AMD_IOMMU)         += amd_iommu_init.o amd_iommu.o
+
+       obj-$(CONFIG_PCI_MMCONFIG)      += mmconf-fam10h_64.o
 endif
index 7678f10c45686e1113381ccf5af41fb2c4ad3906..a18eb7ce223643c8c9d6f40303c8180fa7d59e47 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/io_apic.h>
 #include <asm/apic.h>
-#include <asm/genapic.h>
 #include <asm/io.h>
 #include <asm/mpspec.h>
 #include <asm/smp.h>
 
-#ifdef CONFIG_X86_LOCAL_APIC
-# include <mach_apic.h>
-#endif
-
 static int __initdata acpi_force = 0;
 u32 acpi_rsdt_forced;
 #ifdef CONFIG_ACPI
@@ -56,16 +51,7 @@ int acpi_disabled = 1;
 EXPORT_SYMBOL(acpi_disabled);
 
 #ifdef CONFIG_X86_64
-
-#include <asm/proto.h>
-
-#else                          /* X86 */
-
-#ifdef CONFIG_X86_LOCAL_APIC
-#include <mach_apic.h>
-#include <mach_mpparse.h>
-#endif                         /* CONFIG_X86_LOCAL_APIC */
-
+# include <asm/proto.h>
 #endif                         /* X86 */
 
 #define BAD_MADT_ENTRY(entry, end) (                                       \
@@ -121,35 +107,18 @@ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
  */
 char *__init __acpi_map_table(unsigned long phys, unsigned long size)
 {
-       unsigned long base, offset, mapped_size;
-       int idx;
 
        if (!phys || !size)
                return NULL;
 
-       if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT))
-               return __va(phys);
-
-       offset = phys & (PAGE_SIZE - 1);
-       mapped_size = PAGE_SIZE - offset;
-       clear_fixmap(FIX_ACPI_END);
-       set_fixmap(FIX_ACPI_END, phys);
-       base = fix_to_virt(FIX_ACPI_END);
-
-       /*
-        * Most cases can be covered by the below.
-        */
-       idx = FIX_ACPI_END;
-       while (mapped_size < size) {
-               if (--idx < FIX_ACPI_BEGIN)
-                       return NULL;    /* cannot handle this */
-               phys += PAGE_SIZE;
-               clear_fixmap(idx);
-               set_fixmap(idx, phys);
-               mapped_size += PAGE_SIZE;
-       }
+       return early_ioremap(phys, size);
+}
+void __init __acpi_unmap_table(char *map, unsigned long size)
+{
+       if (!map || !size)
+               return;
 
-       return ((unsigned char *)base + offset);
+       early_iounmap(map, size);
 }
 
 #ifdef CONFIG_PCI_MMCONFIG
@@ -239,7 +208,8 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
                       madt->address);
        }
 
-       acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
+       default_acpi_madt_oem_check(madt->header.oem_id,
+                                   madt->header.oem_table_id);
 
        return 0;
 }
@@ -884,7 +854,7 @@ static struct {
        DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
 } mp_ioapic_routing[MAX_IO_APICS];
 
-static int mp_find_ioapic(int gsi)
+int mp_find_ioapic(int gsi)
 {
        int i = 0;
 
@@ -899,6 +869,16 @@ static int mp_find_ioapic(int gsi)
        return -1;
 }
 
+int mp_find_ioapic_pin(int ioapic, int gsi)
+{
+       if (WARN_ON(ioapic == -1))
+               return -1;
+       if (WARN_ON(gsi > mp_ioapic_routing[ioapic].gsi_end))
+               return -1;
+
+       return gsi - mp_ioapic_routing[ioapic].gsi_base;
+}
+
 static u8 __init uniq_ioapic_id(u8 id)
 {
 #ifdef CONFIG_X86_32
@@ -912,8 +892,8 @@ static u8 __init uniq_ioapic_id(u8 id)
        DECLARE_BITMAP(used, 256);
        bitmap_zero(used, 256);
        for (i = 0; i < nr_ioapics; i++) {
-               struct mp_config_ioapic *ia = &mp_ioapics[i];
-               __set_bit(ia->mp_apicid, used);
+               struct mpc_ioapic *ia = &mp_ioapics[i];
+               __set_bit(ia->apicid, used);
        }
        if (!test_bit(id, used))
                return id;
@@ -945,29 +925,29 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
 
        idx = nr_ioapics;
 
-       mp_ioapics[idx].mp_type = MP_IOAPIC;
-       mp_ioapics[idx].mp_flags = MPC_APIC_USABLE;
-       mp_ioapics[idx].mp_apicaddr = address;
+       mp_ioapics[idx].type = MP_IOAPIC;
+       mp_ioapics[idx].flags = MPC_APIC_USABLE;
+       mp_ioapics[idx].apicaddr = address;
 
        set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-       mp_ioapics[idx].mp_apicid = uniq_ioapic_id(id);
+       mp_ioapics[idx].apicid = uniq_ioapic_id(id);
 #ifdef CONFIG_X86_32
-       mp_ioapics[idx].mp_apicver = io_apic_get_version(idx);
+       mp_ioapics[idx].apicver = io_apic_get_version(idx);
 #else
-       mp_ioapics[idx].mp_apicver = 0;
+       mp_ioapics[idx].apicver = 0;
 #endif
        /*
         * Build basic GSI lookup table to facilitate gsi->io_apic lookups
         * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
         */
-       mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mp_apicid;
+       mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
        mp_ioapic_routing[idx].gsi_base = gsi_base;
        mp_ioapic_routing[idx].gsi_end = gsi_base +
            io_apic_get_redir_entries(idx);
 
-       printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
-              "GSI %d-%d\n", idx, mp_ioapics[idx].mp_apicid,
-              mp_ioapics[idx].mp_apicver, mp_ioapics[idx].mp_apicaddr,
+       printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
+              "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
+              mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
               mp_ioapic_routing[idx].gsi_base, mp_ioapic_routing[idx].gsi_end);
 
        nr_ioapics++;
@@ -996,19 +976,19 @@ int __init acpi_probe_gsi(void)
        return max_gsi + 1;
 }
 
-static void assign_to_mp_irq(struct mp_config_intsrc *m,
-                                   struct mp_config_intsrc *mp_irq)
+static void assign_to_mp_irq(struct mpc_intsrc *m,
+                                   struct mpc_intsrc *mp_irq)
 {
-       memcpy(mp_irq, m, sizeof(struct mp_config_intsrc));
+       memcpy(mp_irq, m, sizeof(struct mpc_intsrc));
 }
 
-static int mp_irq_cmp(struct mp_config_intsrc *mp_irq,
-                               struct mp_config_intsrc *m)
+static int mp_irq_cmp(struct mpc_intsrc *mp_irq,
+                               struct mpc_intsrc *m)
 {
-       return memcmp(mp_irq, m, sizeof(struct mp_config_intsrc));
+       return memcmp(mp_irq, m, sizeof(struct mpc_intsrc));
 }
 
-static void save_mp_irq(struct mp_config_intsrc *m)
+static void save_mp_irq(struct mpc_intsrc *m)
 {
        int i;
 
@@ -1026,7 +1006,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
 {
        int ioapic;
        int pin;
-       struct mp_config_intsrc mp_irq;
+       struct mpc_intsrc mp_irq;
 
        /*
         * Convert 'gsi' to 'ioapic.pin'.
@@ -1034,7 +1014,7 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
        ioapic = mp_find_ioapic(gsi);
        if (ioapic < 0)
                return;
-       pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+       pin = mp_find_ioapic_pin(ioapic, gsi);
 
        /*
         * TBD: This check is for faulty timer entries, where the override
@@ -1044,13 +1024,13 @@ void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi)
        if ((bus_irq == 0) && (trigger == 3))
                trigger = 1;
 
-       mp_irq.mp_type = MP_INTSRC;
-       mp_irq.mp_irqtype = mp_INT;
-       mp_irq.mp_irqflag = (trigger << 2) | polarity;
-       mp_irq.mp_srcbus = MP_ISA_BUS;
-       mp_irq.mp_srcbusirq = bus_irq;  /* IRQ */
-       mp_irq.mp_dstapic = mp_ioapics[ioapic].mp_apicid; /* APIC ID */
-       mp_irq.mp_dstirq = pin; /* INTIN# */
+       mp_irq.type = MP_INTSRC;
+       mp_irq.irqtype = mp_INT;
+       mp_irq.irqflag = (trigger << 2) | polarity;
+       mp_irq.srcbus = MP_ISA_BUS;
+       mp_irq.srcbusirq = bus_irq;     /* IRQ */
+       mp_irq.dstapic = mp_ioapics[ioapic].apicid; /* APIC ID */
+       mp_irq.dstirq = pin;    /* INTIN# */
 
        save_mp_irq(&mp_irq);
 }
@@ -1060,7 +1040,7 @@ void __init mp_config_acpi_legacy_irqs(void)
        int i;
        int ioapic;
        unsigned int dstapic;
-       struct mp_config_intsrc mp_irq;
+       struct mpc_intsrc mp_irq;
 
 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
        /*
@@ -1085,7 +1065,7 @@ void __init mp_config_acpi_legacy_irqs(void)
        ioapic = mp_find_ioapic(0);
        if (ioapic < 0)
                return;
-       dstapic = mp_ioapics[ioapic].mp_apicid;
+       dstapic = mp_ioapics[ioapic].apicid;
 
        /*
         * Use the default configuration for the IRQs 0-15.  Unless
@@ -1095,16 +1075,14 @@ void __init mp_config_acpi_legacy_irqs(void)
                int idx;
 
                for (idx = 0; idx < mp_irq_entries; idx++) {
-                       struct mp_config_intsrc *irq = mp_irqs + idx;
+                       struct mpc_intsrc *irq = mp_irqs + idx;
 
                        /* Do we already have a mapping for this ISA IRQ? */
-                       if (irq->mp_srcbus == MP_ISA_BUS
-                           && irq->mp_srcbusirq == i)
+                       if (irq->srcbus == MP_ISA_BUS && irq->srcbusirq == i)
                                break;
 
                        /* Do we already have a mapping for this IOAPIC pin */
-                       if (irq->mp_dstapic == dstapic &&
-                           irq->mp_dstirq == i)
+                       if (irq->dstapic == dstapic && irq->dstirq == i)
                                break;
                }
 
@@ -1113,13 +1091,13 @@ void __init mp_config_acpi_legacy_irqs(void)
                        continue;       /* IRQ already used */
                }
 
-               mp_irq.mp_type = MP_INTSRC;
-               mp_irq.mp_irqflag = 0;  /* Conforming */
-               mp_irq.mp_srcbus = MP_ISA_BUS;
-               mp_irq.mp_dstapic = dstapic;
-               mp_irq.mp_irqtype = mp_INT;
-               mp_irq.mp_srcbusirq = i; /* Identity mapped */
-               mp_irq.mp_dstirq = i;
+               mp_irq.type = MP_INTSRC;
+               mp_irq.irqflag = 0;     /* Conforming */
+               mp_irq.srcbus = MP_ISA_BUS;
+               mp_irq.dstapic = dstapic;
+               mp_irq.irqtype = mp_INT;
+               mp_irq.srcbusirq = i; /* Identity mapped */
+               mp_irq.dstirq = i;
 
                save_mp_irq(&mp_irq);
        }
@@ -1156,7 +1134,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
                return gsi;
        }
 
-       ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
+       ioapic_pin = mp_find_ioapic_pin(ioapic, gsi);
 
 #ifdef CONFIG_X86_32
        if (ioapic_renumber_irq)
@@ -1230,22 +1208,22 @@ int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
                        u32 gsi, int triggering, int polarity)
 {
 #ifdef CONFIG_X86_MPPARSE
-       struct mp_config_intsrc mp_irq;
+       struct mpc_intsrc mp_irq;
        int ioapic;
 
        if (!acpi_ioapic)
                return 0;
 
        /* print the entry should happen on mptable identically */
-       mp_irq.mp_type = MP_INTSRC;
-       mp_irq.mp_irqtype = mp_INT;
-       mp_irq.mp_irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
+       mp_irq.type = MP_INTSRC;
+       mp_irq.irqtype = mp_INT;
+       mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
                                (polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
-       mp_irq.mp_srcbus = number;
-       mp_irq.mp_srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
+       mp_irq.srcbus = number;
+       mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
        ioapic = mp_find_ioapic(gsi);
-       mp_irq.mp_dstapic = mp_ioapic_routing[ioapic].apic_id;
-       mp_irq.mp_dstirq = gsi - mp_ioapic_routing[ioapic].gsi_base;
+       mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
+       mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
 
        save_mp_irq(&mp_irq);
 #endif
@@ -1372,7 +1350,7 @@ static void __init acpi_process_madt(void)
                if (!error) {
                        acpi_lapic = 1;
 
-#ifdef CONFIG_X86_GENERICARCH
+#ifdef CONFIG_X86_BIGSMP
                        generic_bigsmp_probe();
 #endif
                        /*
@@ -1384,9 +1362,8 @@ static void __init acpi_process_madt(void)
                                acpi_ioapic = 1;
 
                                smp_found_config = 1;
-#ifdef CONFIG_X86_32
-                               setup_apic_routing();
-#endif
+                               if (apic->setup_apic_routing)
+                                       apic->setup_apic_routing();
                        }
                }
                if (error == -EINVAL) {
index 3355973b12ac6f8ca11ea0f5486a6643373d8042..580b4e29601005abf25adc6dfc011ce6fb6ef1bd 100644 (file)
@@ -3,8 +3,8 @@
  */
 #include <asm/segment.h>
 #include <asm/msr-index.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
+#include <asm/page_types.h>
+#include <asm/pgtable_types.h>
 #include <asm/processor-flags.h>
 
        .code16
index a60c1f3bcb87826ebdfffc15f2ca78488308ba2f..7c243a2c5115dfa6653d7a4fec05d6521eb5ef4f 100644 (file)
@@ -101,6 +101,7 @@ int acpi_save_state_mem(void)
        stack_start.sp = temp_stack + sizeof(temp_stack);
        early_gdt_descr.address =
                        (unsigned long)get_cpu_gdt_table(smp_processor_id());
+       initial_gs = per_cpu_offset(smp_processor_id());
 #endif
        initial_code = (unsigned long)wakeup_long64;
        saved_magic = 0x123456789abcdef0;
index a12e6a9fb65963f12ad7697b0f830a85cb69ff64..8ded418b05939f1fc2f9ef6512fd21281bd2b10a 100644 (file)
@@ -1,7 +1,7 @@
        .section .text.page_aligned
 #include <linux/linkage.h>
 #include <asm/segment.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 
 # Copyright 2003, 2008 Pavel Machek <pavel@suse.cz>, distribute under GPLv2
 
index 96258d9dc9743c094e0d42af3ff6201d960a6abb..8ea5164cbd0451a27b9048699f60c2420057edb3 100644 (file)
@@ -1,8 +1,8 @@
 .text
 #include <linux/linkage.h>
 #include <asm/segment.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
+#include <asm/pgtable_types.h>
+#include <asm/page_types.h>
 #include <asm/msr.h>
 #include <asm/asm-offsets.h>
 
index a84ac7b570e6bb4582788da74a4c570e28fd65e9..4c80f15574335d6b02bc93a7ed797c088c1da6c2 100644 (file)
@@ -414,9 +414,17 @@ void __init alternative_instructions(void)
           that might execute the to be patched code.
           Other CPUs are not running. */
        stop_nmi();
-#ifdef CONFIG_X86_MCE
-       stop_mce();
-#endif
+
+       /*
+        * Don't stop machine check exceptions while patching.
+        * MCEs only happen when something got corrupted and in this
+        * case we must do something about the corruption.
+        * Ignoring it is worse than a unlikely patching race.
+        * Also machine checks tend to be broadcast and if one CPU
+        * goes into machine check the others follow quickly, so we don't
+        * expect a machine check to cause undue problems during to code
+        * patching.
+        */
 
        apply_alternatives(__alt_instructions, __alt_instructions_end);
 
@@ -456,9 +464,6 @@ void __init alternative_instructions(void)
                                (unsigned long)__smp_locks_end);
 
        restart_nmi();
-#ifdef CONFIG_X86_MCE
-       restart_mce();
-#endif
 }
 
 /**
@@ -498,12 +503,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
  */
 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
 {
-       unsigned long flags;
        char *vaddr;
        int nr_pages = 2;
        struct page *pages[2];
        int i;
 
+       might_sleep();
        if (!core_kernel_text((unsigned long)addr)) {
                pages[0] = vmalloc_to_page(addr);
                pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -517,9 +522,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
                nr_pages = 1;
        vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
        BUG_ON(!vaddr);
-       local_irq_save(flags);
+       local_irq_disable();
        memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
-       local_irq_restore(flags);
+       local_irq_enable();
        vunmap(vaddr);
        sync_core();
        /* Could also do a CLFLUSH here to speed up CPU recovery; but
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c
deleted file mode 100644 (file)
index 570f36e..0000000
+++ /dev/null
@@ -1,2223 +0,0 @@
-/*
- *     Local APIC handling, local APIC timers
- *
- *     (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
- *
- *     Fixes
- *     Maciej W. Rozycki       :       Bits for genuine 82489DX APICs;
- *                                     thanks to Eric Gilmore
- *                                     and Rolf G. Tews
- *                                     for testing these extensively.
- *     Maciej W. Rozycki       :       Various updates and fixes.
- *     Mikael Pettersson       :       Power Management for UP-APIC.
- *     Pavel Machek and
- *     Mikael Pettersson       :       PM converted to driver model.
- */
-
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/bootmem.h>
-#include <linux/interrupt.h>
-#include <linux/mc146818rtc.h>
-#include <linux/kernel_stat.h>
-#include <linux/sysdev.h>
-#include <linux/ioport.h>
-#include <linux/cpu.h>
-#include <linux/clockchips.h>
-#include <linux/acpi_pmtmr.h>
-#include <linux/module.h>
-#include <linux/dmi.h>
-#include <linux/dmar.h>
-#include <linux/ftrace.h>
-#include <linux/smp.h>
-#include <linux/nmi.h>
-#include <linux/timex.h>
-
-#include <asm/atomic.h>
-#include <asm/mtrr.h>
-#include <asm/mpspec.h>
-#include <asm/desc.h>
-#include <asm/arch_hooks.h>
-#include <asm/hpet.h>
-#include <asm/pgalloc.h>
-#include <asm/i8253.h>
-#include <asm/idle.h>
-#include <asm/proto.h>
-#include <asm/apic.h>
-#include <asm/i8259.h>
-#include <asm/smp.h>
-
-#include <mach_apic.h>
-#include <mach_apicdef.h>
-#include <mach_ipi.h>
-
-/*
- * Sanity check
- */
-#if ((SPURIOUS_APIC_VECTOR & 0x0F) != 0x0F)
-# error SPURIOUS_APIC_VECTOR definition error
-#endif
-
-#ifdef CONFIG_X86_32
-/*
- * Knob to control our willingness to enable the local APIC.
- *
- * +1=force-enable
- */
-static int force_enable_local_apic;
-/*
- * APIC command line parameters
- */
-static int __init parse_lapic(char *arg)
-{
-       force_enable_local_apic = 1;
-       return 0;
-}
-early_param("lapic", parse_lapic);
-/* Local APIC was disabled by the BIOS and enabled by the kernel */
-static int enabled_via_apicbase;
-
-#endif
-
-#ifdef CONFIG_X86_64
-static int apic_calibrate_pmtmr __initdata;
-static __init int setup_apicpmtimer(char *s)
-{
-       apic_calibrate_pmtmr = 1;
-       notsc_setup(NULL);
-       return 0;
-}
-__setup("apicpmtimer", setup_apicpmtimer);
-#endif
-
-#ifdef CONFIG_X86_64
-#define HAVE_X2APIC
-#endif
-
-#ifdef HAVE_X2APIC
-int x2apic;
-/* x2apic enabled before OS handover */
-static int x2apic_preenabled;
-static int disable_x2apic;
-static __init int setup_nox2apic(char *str)
-{
-       disable_x2apic = 1;
-       setup_clear_cpu_cap(X86_FEATURE_X2APIC);
-       return 0;
-}
-early_param("nox2apic", setup_nox2apic);
-#endif
-
-unsigned long mp_lapic_addr;
-int disable_apic;
-/* Disable local APIC timer from the kernel commandline or via dmi quirk */
-static int disable_apic_timer __cpuinitdata;
-/* Local APIC timer works in C2 */
-int local_apic_timer_c2_ok;
-EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
-
-int first_system_vector = 0xfe;
-
-/*
- * Debug level, exported for io_apic.c
- */
-unsigned int apic_verbosity;
-
-int pic_mode;
-
-/* Have we found an MP table */
-int smp_found_config;
-
-static struct resource lapic_resource = {
-       .name = "Local APIC",
-       .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
-};
-
-static unsigned int calibration_result;
-
-static int lapic_next_event(unsigned long delta,
-                           struct clock_event_device *evt);
-static void lapic_timer_setup(enum clock_event_mode mode,
-                             struct clock_event_device *evt);
-static void lapic_timer_broadcast(const struct cpumask *mask);
-static void apic_pm_activate(void);
-
-/*
- * The local apic timer can be used for any function which is CPU local.
- */
-static struct clock_event_device lapic_clockevent = {
-       .name           = "lapic",
-       .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
-                       | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
-       .shift          = 32,
-       .set_mode       = lapic_timer_setup,
-       .set_next_event = lapic_next_event,
-       .broadcast      = lapic_timer_broadcast,
-       .rating         = 100,
-       .irq            = -1,
-};
-static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
-
-static unsigned long apic_phys;
-
-/*
- * Get the LAPIC version
- */
-static inline int lapic_get_version(void)
-{
-       return GET_APIC_VERSION(apic_read(APIC_LVR));
-}
-
-/*
- * Check, if the APIC is integrated or a separate chip
- */
-static inline int lapic_is_integrated(void)
-{
-#ifdef CONFIG_X86_64
-       return 1;
-#else
-       return APIC_INTEGRATED(lapic_get_version());
-#endif
-}
-
-/*
- * Check, whether this is a modern or a first generation APIC
- */
-static int modern_apic(void)
-{
-       /* AMD systems use old APIC versions, so check the CPU */
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-           boot_cpu_data.x86 >= 0xf)
-               return 1;
-       return lapic_get_version() >= 0x14;
-}
-
-/*
- * Paravirt kernels also might be using these below ops. So we still
- * use generic apic_read()/apic_write(), which might be pointing to different
- * ops in PARAVIRT case.
- */
-void xapic_wait_icr_idle(void)
-{
-       while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
-               cpu_relax();
-}
-
-u32 safe_xapic_wait_icr_idle(void)
-{
-       u32 send_status;
-       int timeout;
-
-       timeout = 0;
-       do {
-               send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
-               if (!send_status)
-                       break;
-               udelay(100);
-       } while (timeout++ < 1000);
-
-       return send_status;
-}
-
-void xapic_icr_write(u32 low, u32 id)
-{
-       apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
-       apic_write(APIC_ICR, low);
-}
-
-static u64 xapic_icr_read(void)
-{
-       u32 icr1, icr2;
-
-       icr2 = apic_read(APIC_ICR2);
-       icr1 = apic_read(APIC_ICR);
-
-       return icr1 | ((u64)icr2 << 32);
-}
-
-static struct apic_ops xapic_ops = {
-       .read = native_apic_mem_read,
-       .write = native_apic_mem_write,
-       .icr_read = xapic_icr_read,
-       .icr_write = xapic_icr_write,
-       .wait_icr_idle = xapic_wait_icr_idle,
-       .safe_wait_icr_idle = safe_xapic_wait_icr_idle,
-};
-
-struct apic_ops __read_mostly *apic_ops = &xapic_ops;
-EXPORT_SYMBOL_GPL(apic_ops);
-
-#ifdef HAVE_X2APIC
-static void x2apic_wait_icr_idle(void)
-{
-       /* no need to wait for icr idle in x2apic */
-       return;
-}
-
-static u32 safe_x2apic_wait_icr_idle(void)
-{
-       /* no need to wait for icr idle in x2apic */
-       return 0;
-}
-
-void x2apic_icr_write(u32 low, u32 id)
-{
-       wrmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), ((__u64) id) << 32 | low);
-}
-
-static u64 x2apic_icr_read(void)
-{
-       unsigned long val;
-
-       rdmsrl(APIC_BASE_MSR + (APIC_ICR >> 4), val);
-       return val;
-}
-
-static struct apic_ops x2apic_ops = {
-       .read = native_apic_msr_read,
-       .write = native_apic_msr_write,
-       .icr_read = x2apic_icr_read,
-       .icr_write = x2apic_icr_write,
-       .wait_icr_idle = x2apic_wait_icr_idle,
-       .safe_wait_icr_idle = safe_x2apic_wait_icr_idle,
-};
-#endif
-
-/**
- * enable_NMI_through_LVT0 - enable NMI through local vector table 0
- */
-void __cpuinit enable_NMI_through_LVT0(void)
-{
-       unsigned int v;
-
-       /* unmask and set to NMI */
-       v = APIC_DM_NMI;
-
-       /* Level triggered for 82489DX (32bit mode) */
-       if (!lapic_is_integrated())
-               v |= APIC_LVT_LEVEL_TRIGGER;
-
-       apic_write(APIC_LVT0, v);
-}
-
-#ifdef CONFIG_X86_32
-/**
- * get_physical_broadcast - Get number of physical broadcast IDs
- */
-int get_physical_broadcast(void)
-{
-       return modern_apic() ? 0xff : 0xf;
-}
-#endif
-
-/**
- * lapic_get_maxlvt - get the maximum number of local vector table entries
- */
-int lapic_get_maxlvt(void)
-{
-       unsigned int v;
-
-       v = apic_read(APIC_LVR);
-       /*
-        * - we always have APIC integrated on 64bit mode
-        * - 82489DXs do not report # of LVT entries
-        */
-       return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
-}
-
-/*
- * Local APIC timer
- */
-
-/* Clock divisor */
-#define APIC_DIVISOR 16
-
-/*
- * This function sets up the local APIC timer, with a timeout of
- * 'clocks' APIC bus clock. During calibration we actually call
- * this function twice on the boot CPU, once with a bogus timeout
- * value, second time for real. The other (noncalibrating) CPUs
- * call this function only once, with the real, calibrated value.
- *
- * We do reads before writes even if unnecessary, to get around the
- * P5 APIC double write bug.
- */
-static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
-{
-       unsigned int lvtt_value, tmp_value;
-
-       lvtt_value = LOCAL_TIMER_VECTOR;
-       if (!oneshot)
-               lvtt_value |= APIC_LVT_TIMER_PERIODIC;
-       if (!lapic_is_integrated())
-               lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
-
-       if (!irqen)
-               lvtt_value |= APIC_LVT_MASKED;
-
-       apic_write(APIC_LVTT, lvtt_value);
-
-       /*
-        * Divide PICLK by 16
-        */
-       tmp_value = apic_read(APIC_TDCR);
-       apic_write(APIC_TDCR,
-               (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
-               APIC_TDR_DIV_16);
-
-       if (!oneshot)
-               apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
-}
-
-/*
- * Setup extended LVT, AMD specific (K8, family 10h)
- *
- * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
- * MCE interrupts are supported. Thus MCE offset must be set to 0.
- *
- * If mask=1, the LVT entry does not generate interrupts while mask=0
- * enables the vector. See also the BKDGs.
- */
-
-#define APIC_EILVT_LVTOFF_MCE 0
-#define APIC_EILVT_LVTOFF_IBS 1
-
-static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
-{
-       unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
-       unsigned int  v   = (mask << 16) | (msg_type << 8) | vector;
-
-       apic_write(reg, v);
-}
-
-u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
-{
-       setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
-       return APIC_EILVT_LVTOFF_MCE;
-}
-
-u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
-{
-       setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
-       return APIC_EILVT_LVTOFF_IBS;
-}
-EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
-
-/*
- * Program the next event, relative to now
- */
-static int lapic_next_event(unsigned long delta,
-                           struct clock_event_device *evt)
-{
-       apic_write(APIC_TMICT, delta);
-       return 0;
-}
-
-/*
- * Setup the lapic timer in periodic or oneshot mode
- */
-static void lapic_timer_setup(enum clock_event_mode mode,
-                             struct clock_event_device *evt)
-{
-       unsigned long flags;
-       unsigned int v;
-
-       /* Lapic used as dummy for broadcast ? */
-       if (evt->features & CLOCK_EVT_FEAT_DUMMY)
-               return;
-
-       local_irq_save(flags);
-
-       switch (mode) {
-       case CLOCK_EVT_MODE_PERIODIC:
-       case CLOCK_EVT_MODE_ONESHOT:
-               __setup_APIC_LVTT(calibration_result,
-                                 mode != CLOCK_EVT_MODE_PERIODIC, 1);
-               break;
-       case CLOCK_EVT_MODE_UNUSED:
-       case CLOCK_EVT_MODE_SHUTDOWN:
-               v = apic_read(APIC_LVTT);
-               v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
-               apic_write(APIC_LVTT, v);
-               apic_write(APIC_TMICT, 0xffffffff);
-               break;
-       case CLOCK_EVT_MODE_RESUME:
-               /* Nothing to do here */
-               break;
-       }
-
-       local_irq_restore(flags);
-}
-
-/*
- * Local APIC timer broadcast function
- */
-static void lapic_timer_broadcast(const struct cpumask *mask)
-{
-#ifdef CONFIG_SMP
-       send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
-#endif
-}
-
-/*
- * Setup the local APIC timer for this CPU. Copy the initilized values
- * of the boot CPU and register the clock event in the framework.
- */
-static void __cpuinit setup_APIC_timer(void)
-{
-       struct clock_event_device *levt = &__get_cpu_var(lapic_events);
-
-       memcpy(levt, &lapic_clockevent, sizeof(*levt));
-       levt->cpumask = cpumask_of(smp_processor_id());
-
-       clockevents_register_device(levt);
-}
-
-/*
- * In this functions we calibrate APIC bus clocks to the external timer.
- *
- * We want to do the calibration only once since we want to have local timer
- * irqs syncron. CPUs connected by the same APIC bus have the very same bus
- * frequency.
- *
- * This was previously done by reading the PIT/HPET and waiting for a wrap
- * around to find out, that a tick has elapsed. I have a box, where the PIT
- * readout is broken, so it never gets out of the wait loop again. This was
- * also reported by others.
- *
- * Monitoring the jiffies value is inaccurate and the clockevents
- * infrastructure allows us to do a simple substitution of the interrupt
- * handler.
- *
- * The calibration routine also uses the pm_timer when possible, as the PIT
- * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
- * back to normal later in the boot process).
- */
-
-#define LAPIC_CAL_LOOPS                (HZ/10)
-
-static __initdata int lapic_cal_loops = -1;
-static __initdata long lapic_cal_t1, lapic_cal_t2;
-static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
-static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
-static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
-
-/*
- * Temporary interrupt handler.
- */
-static void __init lapic_cal_handler(struct clock_event_device *dev)
-{
-       unsigned long long tsc = 0;
-       long tapic = apic_read(APIC_TMCCT);
-       unsigned long pm = acpi_pm_read_early();
-
-       if (cpu_has_tsc)
-               rdtscll(tsc);
-
-       switch (lapic_cal_loops++) {
-       case 0:
-               lapic_cal_t1 = tapic;
-               lapic_cal_tsc1 = tsc;
-               lapic_cal_pm1 = pm;
-               lapic_cal_j1 = jiffies;
-               break;
-
-       case LAPIC_CAL_LOOPS:
-               lapic_cal_t2 = tapic;
-               lapic_cal_tsc2 = tsc;
-               if (pm < lapic_cal_pm1)
-                       pm += ACPI_PM_OVRRUN;
-               lapic_cal_pm2 = pm;
-               lapic_cal_j2 = jiffies;
-               break;
-       }
-}
-
-static int __init calibrate_by_pmtimer(long deltapm, long *delta)
-{
-       const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
-       const long pm_thresh = pm_100ms / 100;
-       unsigned long mult;
-       u64 res;
-
-#ifndef CONFIG_X86_PM_TIMER
-       return -1;
-#endif
-
-       apic_printk(APIC_VERBOSE, "... PM timer delta = %ld\n", deltapm);
-
-       /* Check, if the PM timer is available */
-       if (!deltapm)
-               return -1;
-
-       mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
-
-       if (deltapm > (pm_100ms - pm_thresh) &&
-           deltapm < (pm_100ms + pm_thresh)) {
-               apic_printk(APIC_VERBOSE, "... PM timer result ok\n");
-       } else {
-               res = (((u64)deltapm) *  mult) >> 22;
-               do_div(res, 1000000);
-               pr_warning("APIC calibration not consistent "
-                       "with PM Timer: %ldms instead of 100ms\n",
-                       (long)res);
-               /* Correct the lapic counter value */
-               res = (((u64)(*delta)) * pm_100ms);
-               do_div(res, deltapm);
-               pr_info("APIC delta adjusted to PM-Timer: "
-                       "%lu (%ld)\n", (unsigned long)res, *delta);
-               *delta = (long)res;
-       }
-
-       return 0;
-}
-
-static int __init calibrate_APIC_clock(void)
-{
-       struct clock_event_device *levt = &__get_cpu_var(lapic_events);
-       void (*real_handler)(struct clock_event_device *dev);
-       unsigned long deltaj;
-       long delta;
-       int pm_referenced = 0;
-
-       local_irq_disable();
-
-       /* Replace the global interrupt handler */
-       real_handler = global_clock_event->event_handler;
-       global_clock_event->event_handler = lapic_cal_handler;
-
-       /*
-        * Setup the APIC counter to maximum. There is no way the lapic
-        * can underflow in the 100ms detection time frame
-        */
-       __setup_APIC_LVTT(0xffffffff, 0, 0);
-
-       /* Let the interrupts run */
-       local_irq_enable();
-
-       while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
-               cpu_relax();
-
-       local_irq_disable();
-
-       /* Restore the real event handler */
-       global_clock_event->event_handler = real_handler;
-
-       /* Build delta t1-t2 as apic timer counts down */
-       delta = lapic_cal_t1 - lapic_cal_t2;
-       apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
-
-       /* we trust the PM based calibration if possible */
-       pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
-                                       &delta);
-
-       /* Calculate the scaled math multiplication factor */
-       lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
-                                      lapic_clockevent.shift);
-       lapic_clockevent.max_delta_ns =
-               clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
-       lapic_clockevent.min_delta_ns =
-               clockevent_delta2ns(0xF, &lapic_clockevent);
-
-       calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
-
-       apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
-       apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult);
-       apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
-                   calibration_result);
-
-       if (cpu_has_tsc) {
-               delta = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
-               apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
-                           "%ld.%04ld MHz.\n",
-                           (delta / LAPIC_CAL_LOOPS) / (1000000 / HZ),
-                           (delta / LAPIC_CAL_LOOPS) % (1000000 / HZ));
-       }
-
-       apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
-                   "%u.%04u MHz.\n",
-                   calibration_result / (1000000 / HZ),
-                   calibration_result % (1000000 / HZ));
-
-       /*
-        * Do a sanity check on the APIC calibration result
-        */
-       if (calibration_result < (1000000 / HZ)) {
-               local_irq_enable();
-               pr_warning("APIC frequency too slow, disabling apic timer\n");
-               return -1;
-       }
-
-       levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
-
-       /*
-        * PM timer calibration failed or not turned on
-        * so lets try APIC timer based calibration
-        */
-       if (!pm_referenced) {
-               apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
-
-               /*
-                * Setup the apic timer manually
-                */
-               levt->event_handler = lapic_cal_handler;
-               lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
-               lapic_cal_loops = -1;
-
-               /* Let the interrupts run */
-               local_irq_enable();
-
-               while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
-                       cpu_relax();
-
-               /* Stop the lapic timer */
-               lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
-
-               /* Jiffies delta */
-               deltaj = lapic_cal_j2 - lapic_cal_j1;
-               apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
-
-               /* Check, if the jiffies result is consistent */
-               if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
-                       apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
-               else
-                       levt->features |= CLOCK_EVT_FEAT_DUMMY;
-       } else
-               local_irq_enable();
-
-       if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
-               pr_warning("APIC timer disabled due to verification failure\n");
-                       return -1;
-       }
-
-       return 0;
-}
-
-/*
- * Setup the boot APIC
- *
- * Calibrate and verify the result.
- */
-void __init setup_boot_APIC_clock(void)
-{
-       /*
-        * The local apic timer can be disabled via the kernel
-        * commandline or from the CPU detection code. Register the lapic
-        * timer as a dummy clock event source on SMP systems, so the
-        * broadcast mechanism is used. On UP systems simply ignore it.
-        */
-       if (disable_apic_timer) {
-               pr_info("Disabling APIC timer\n");
-               /* No broadcast on UP ! */
-               if (num_possible_cpus() > 1) {
-                       lapic_clockevent.mult = 1;
-                       setup_APIC_timer();
-               }
-               return;
-       }
-
-       apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
-                   "calibrating APIC timer ...\n");
-
-       if (calibrate_APIC_clock()) {
-               /* No broadcast on UP ! */
-               if (num_possible_cpus() > 1)
-                       setup_APIC_timer();
-               return;
-       }
-
-       /*
-        * If nmi_watchdog is set to IO_APIC, we need the
-        * PIT/HPET going.  Otherwise register lapic as a dummy
-        * device.
-        */
-       if (nmi_watchdog != NMI_IO_APIC)
-               lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
-       else
-               pr_warning("APIC timer registered as dummy,"
-                       " due to nmi_watchdog=%d!\n", nmi_watchdog);
-
-       /* Setup the lapic or request the broadcast */
-       setup_APIC_timer();
-}
-
-void __cpuinit setup_secondary_APIC_clock(void)
-{
-       setup_APIC_timer();
-}
-
-/*
- * The guts of the apic timer interrupt
- */
-static void local_apic_timer_interrupt(void)
-{
-       int cpu = smp_processor_id();
-       struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
-
-       /*
-        * Normally we should not be here till LAPIC has been initialized but
-        * in some cases like kdump, its possible that there is a pending LAPIC
-        * timer interrupt from previous kernel's context and is delivered in
-        * new kernel the moment interrupts are enabled.
-        *
-        * Interrupts are enabled early and LAPIC is setup much later, hence
-        * its possible that when we get here evt->event_handler is NULL.
-        * Check for event_handler being NULL and discard the interrupt as
-        * spurious.
-        */
-       if (!evt->event_handler) {
-               pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
-               /* Switch it off */
-               lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
-               return;
-       }
-
-       /*
-        * the NMI deadlock-detector uses this.
-        */
-       inc_irq_stat(apic_timer_irqs);
-
-       evt->event_handler(evt);
-}
-
-/*
- * Local APIC timer interrupt. This is the most natural way for doing
- * local interrupts, but local timer interrupts can be emulated by
- * broadcast interrupts too. [in case the hw doesn't support APIC timers]
- *
- * [ if a single-CPU system runs an SMP kernel then we call the local
- *   interrupt as well. Thus we cannot inline the local irq ... ]
- */
-void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
-{
-       struct pt_regs *old_regs = set_irq_regs(regs);
-
-       /*
-        * NOTE! We'd better ACK the irq immediately,
-        * because timer handling can be slow.
-        */
-       ack_APIC_irq();
-       /*
-        * update_process_times() expects us to have done irq_enter().
-        * Besides, if we don't timer interrupts ignore the global
-        * interrupt lock, which is the WrongThing (tm) to do.
-        */
-       exit_idle();
-       irq_enter();
-       local_apic_timer_interrupt();
-       irq_exit();
-
-       set_irq_regs(old_regs);
-}
-
-int setup_profiling_timer(unsigned int multiplier)
-{
-       return -EINVAL;
-}
-
-/*
- * Local APIC start and shutdown
- */
-
-/**
- * clear_local_APIC - shutdown the local APIC
- *
- * This is called, when a CPU is disabled and before rebooting, so the state of
- * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
- * leftovers during boot.
- */
-void clear_local_APIC(void)
-{
-       int maxlvt;
-       u32 v;
-
-       /* APIC hasn't been mapped yet */
-       if (!apic_phys)
-               return;
-
-       maxlvt = lapic_get_maxlvt();
-       /*
-        * Masking an LVT entry can trigger a local APIC error
-        * if the vector is zero. Mask LVTERR first to prevent this.
-        */
-       if (maxlvt >= 3) {
-               v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
-               apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
-       }
-       /*
-        * Careful: we have to set masks only first to deassert
-        * any level-triggered sources.
-        */
-       v = apic_read(APIC_LVTT);
-       apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
-       v = apic_read(APIC_LVT0);
-       apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
-       v = apic_read(APIC_LVT1);
-       apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
-       if (maxlvt >= 4) {
-               v = apic_read(APIC_LVTPC);
-               apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
-       }
-
-       /* lets not touch this if we didn't frob it */
-#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
-       if (maxlvt >= 5) {
-               v = apic_read(APIC_LVTTHMR);
-               apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
-       }
-#endif
-       /*
-        * Clean APIC state for other OSs:
-        */
-       apic_write(APIC_LVTT, APIC_LVT_MASKED);
-       apic_write(APIC_LVT0, APIC_LVT_MASKED);
-       apic_write(APIC_LVT1, APIC_LVT_MASKED);
-       if (maxlvt >= 3)
-               apic_write(APIC_LVTERR, APIC_LVT_MASKED);
-       if (maxlvt >= 4)
-               apic_write(APIC_LVTPC, APIC_LVT_MASKED);
-
-       /* Integrated APIC (!82489DX) ? */
-       if (lapic_is_integrated()) {
-               if (maxlvt > 3)
-                       /* Clear ESR due to Pentium errata 3AP and 11AP */
-                       apic_write(APIC_ESR, 0);
-               apic_read(APIC_ESR);
-       }
-}
-
-/**
- * disable_local_APIC - clear and disable the local APIC
- */
-void disable_local_APIC(void)
-{
-       unsigned int value;
-
-       /* APIC hasn't been mapped yet */
-       if (!apic_phys)
-               return;
-
-       clear_local_APIC();
-
-       /*
-        * Disable APIC (implies clearing of registers
-        * for 82489DX!).
-        */
-       value = apic_read(APIC_SPIV);
-       value &= ~APIC_SPIV_APIC_ENABLED;
-       apic_write(APIC_SPIV, value);
-
-#ifdef CONFIG_X86_32
-       /*
-        * When LAPIC was disabled by the BIOS and enabled by the kernel,
-        * restore the disabled state.
-        */
-       if (enabled_via_apicbase) {
-               unsigned int l, h;
-
-               rdmsr(MSR_IA32_APICBASE, l, h);
-               l &= ~MSR_IA32_APICBASE_ENABLE;
-               wrmsr(MSR_IA32_APICBASE, l, h);
-       }
-#endif
-}
-
-/*
- * If Linux enabled the LAPIC against the BIOS default disable it down before
- * re-entering the BIOS on shutdown.  Otherwise the BIOS may get confused and
- * not power-off.  Additionally clear all LVT entries before disable_local_APIC
- * for the case where Linux didn't enable the LAPIC.
- */
-void lapic_shutdown(void)
-{
-       unsigned long flags;
-
-       if (!cpu_has_apic)
-               return;
-
-       local_irq_save(flags);
-
-#ifdef CONFIG_X86_32
-       if (!enabled_via_apicbase)
-               clear_local_APIC();
-       else
-#endif
-               disable_local_APIC();
-
-
-       local_irq_restore(flags);
-}
-
-/*
- * This is to verify that we're looking at a real local APIC.
- * Check these against your board if the CPUs aren't getting
- * started for no apparent reason.
- */
-int __init verify_local_APIC(void)
-{
-       unsigned int reg0, reg1;
-
-       /*
-        * The version register is read-only in a real APIC.
-        */
-       reg0 = apic_read(APIC_LVR);
-       apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
-       apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
-       reg1 = apic_read(APIC_LVR);
-       apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
-
-       /*
-        * The two version reads above should print the same
-        * numbers.  If the second one is different, then we
-        * poke at a non-APIC.
-        */
-       if (reg1 != reg0)
-               return 0;
-
-       /*
-        * Check if the version looks reasonably.
-        */
-       reg1 = GET_APIC_VERSION(reg0);
-       if (reg1 == 0x00 || reg1 == 0xff)
-               return 0;
-       reg1 = lapic_get_maxlvt();
-       if (reg1 < 0x02 || reg1 == 0xff)
-               return 0;
-
-       /*
-        * The ID register is read/write in a real APIC.
-        */
-       reg0 = apic_read(APIC_ID);
-       apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
-       apic_write(APIC_ID, reg0 ^ APIC_ID_MASK);
-       reg1 = apic_read(APIC_ID);
-       apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
-       apic_write(APIC_ID, reg0);
-       if (reg1 != (reg0 ^ APIC_ID_MASK))
-               return 0;
-
-       /*
-        * The next two are just to see if we have sane values.
-        * They're only really relevant if we're in Virtual Wire
-        * compatibility mode, but most boxes are anymore.
-        */
-       reg0 = apic_read(APIC_LVT0);
-       apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
-       reg1 = apic_read(APIC_LVT1);
-       apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
-
-       return 1;
-}
-
-/**
- * sync_Arb_IDs - synchronize APIC bus arbitration IDs
- */
-void __init sync_Arb_IDs(void)
-{
-       /*
-        * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
-        * needed on AMD.
-        */
-       if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
-               return;
-
-       /*
-        * Wait for idle.
-        */
-       apic_wait_icr_idle();
-
-       apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
-       apic_write(APIC_ICR, APIC_DEST_ALLINC |
-                       APIC_INT_LEVELTRIG | APIC_DM_INIT);
-}
-
-/*
- * An initial setup of the virtual wire mode.
- */
-void __init init_bsp_APIC(void)
-{
-       unsigned int value;
-
-       /*
-        * Don't do the setup now if we have a SMP BIOS as the
-        * through-I/O-APIC virtual wire mode might be active.
-        */
-       if (smp_found_config || !cpu_has_apic)
-               return;
-
-       /*
-        * Do not trust the local APIC being empty at bootup.
-        */
-       clear_local_APIC();
-
-       /*
-        * Enable APIC.
-        */
-       value = apic_read(APIC_SPIV);
-       value &= ~APIC_VECTOR_MASK;
-       value |= APIC_SPIV_APIC_ENABLED;
-
-#ifdef CONFIG_X86_32
-       /* This bit is reserved on P4/Xeon and should be cleared */
-       if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
-           (boot_cpu_data.x86 == 15))
-               value &= ~APIC_SPIV_FOCUS_DISABLED;
-       else
-#endif
-               value |= APIC_SPIV_FOCUS_DISABLED;
-       value |= SPURIOUS_APIC_VECTOR;
-       apic_write(APIC_SPIV, value);
-
-       /*
-        * Set up the virtual wire mode.
-        */
-       apic_write(APIC_LVT0, APIC_DM_EXTINT);
-       value = APIC_DM_NMI;
-       if (!lapic_is_integrated())             /* 82489DX */
-               value |= APIC_LVT_LEVEL_TRIGGER;
-       apic_write(APIC_LVT1, value);
-}
-
-static void __cpuinit lapic_setup_esr(void)
-{
-       unsigned int oldvalue, value, maxlvt;
-
-       if (!lapic_is_integrated()) {
-               pr_info("No ESR for 82489DX.\n");
-               return;
-       }
-
-       if (esr_disable) {
-               /*
-                * Something untraceable is creating bad interrupts on
-                * secondary quads ... for the moment, just leave the
-                * ESR disabled - we can't do anything useful with the
-                * errors anyway - mbligh
-                */
-               pr_info("Leaving ESR disabled.\n");
-               return;
-       }
-
-       maxlvt = lapic_get_maxlvt();
-       if (maxlvt > 3)         /* Due to the Pentium erratum 3AP. */
-               apic_write(APIC_ESR, 0);
-       oldvalue = apic_read(APIC_ESR);
-
-       /* enables sending errors */
-       value = ERROR_APIC_VECTOR;
-       apic_write(APIC_LVTERR, value);
-
-       /*
-        * spec says clear errors after enabling vector.
-        */
-       if (maxlvt > 3)
-               apic_write(APIC_ESR, 0);
-       value = apic_read(APIC_ESR);
-       if (value != oldvalue)
-               apic_printk(APIC_VERBOSE, "ESR value before enabling "
-                       "vector: 0x%08x  after: 0x%08x\n",
-                       oldvalue, value);
-}
-
-
-/**
- * setup_local_APIC - setup the local APIC
- */
-void __cpuinit setup_local_APIC(void)
-{
-       unsigned int value;
-       int i, j;
-
-#ifdef CONFIG_X86_32
-       /* Pound the ESR really hard over the head with a big hammer - mbligh */
-       if (lapic_is_integrated() && esr_disable) {
-               apic_write(APIC_ESR, 0);
-               apic_write(APIC_ESR, 0);
-               apic_write(APIC_ESR, 0);
-               apic_write(APIC_ESR, 0);
-       }
-#endif
-
-       preempt_disable();
-
-       /*
-        * Double-check whether this APIC is really registered.
-        * This is meaningless in clustered apic mode, so we skip it.
-        */
-       if (!apic_id_registered())
-               BUG();
-
-       /*
-        * Intel recommends to set DFR, LDR and TPR before enabling
-        * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
-        * document number 292116).  So here it goes...
-        */
-       init_apic_ldr();
-
-       /*
-        * Set Task Priority to 'accept all'. We never change this
-        * later on.
-        */
-       value = apic_read(APIC_TASKPRI);
-       value &= ~APIC_TPRI_MASK;
-       apic_write(APIC_TASKPRI, value);
-
-       /*
-        * After a crash, we no longer service the interrupts and a pending
-        * interrupt from previous kernel might still have ISR bit set.
-        *
-        * Most probably by now CPU has serviced that pending interrupt and
-        * it might not have done the ack_APIC_irq() because it thought,
-        * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
-        * does not clear the ISR bit and cpu thinks it has already serivced
-        * the interrupt. Hence a vector might get locked. It was noticed
-        * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
-        */
-       for (i = APIC_ISR_NR - 1; i >= 0; i--) {
-               value = apic_read(APIC_ISR + i*0x10);
-               for (j = 31; j >= 0; j--) {
-                       if (value & (1<<j))
-                               ack_APIC_irq();
-               }
-       }
-
-       /*
-        * Now that we are all set up, enable the APIC
-        */
-       value = apic_read(APIC_SPIV);
-       value &= ~APIC_VECTOR_MASK;
-       /*
-        * Enable APIC
-        */
-       value |= APIC_SPIV_APIC_ENABLED;
-
-#ifdef CONFIG_X86_32
-       /*
-        * Some unknown Intel IO/APIC (or APIC) errata is biting us with
-        * certain networking cards. If high frequency interrupts are
-        * happening on a particular IOAPIC pin, plus the IOAPIC routing
-        * entry is masked/unmasked at a high rate as well then sooner or
-        * later IOAPIC line gets 'stuck', no more interrupts are received
-        * from the device. If focus CPU is disabled then the hang goes
-        * away, oh well :-(
-        *
-        * [ This bug can be reproduced easily with a level-triggered
-        *   PCI Ne2000 networking cards and PII/PIII processors, dual
-        *   BX chipset. ]
-        */
-       /*
-        * Actually disabling the focus CPU check just makes the hang less
-        * frequent as it makes the interrupt distributon model be more
-        * like LRU than MRU (the short-term load is more even across CPUs).
-        * See also the comment in end_level_ioapic_irq().  --macro
-        */
-
-       /*
-        * - enable focus processor (bit==0)
-        * - 64bit mode always use processor focus
-        *   so no need to set it
-        */
-       value &= ~APIC_SPIV_FOCUS_DISABLED;
-#endif
-
-       /*
-        * Set spurious IRQ vector
-        */
-       value |= SPURIOUS_APIC_VECTOR;
-       apic_write(APIC_SPIV, value);
-
-       /*
-        * Set up LVT0, LVT1:
-        *
-        * set up through-local-APIC on the BP's LINT0. This is not
-        * strictly necessary in pure symmetric-IO mode, but sometimes
-        * we delegate interrupts to the 8259A.
-        */
-       /*
-        * TODO: set up through-local-APIC from through-I/O-APIC? --macro
-        */
-       value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
-       if (!smp_processor_id() && (pic_mode || !value)) {
-               value = APIC_DM_EXTINT;
-               apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
-                               smp_processor_id());
-       } else {
-               value = APIC_DM_EXTINT | APIC_LVT_MASKED;
-               apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
-                               smp_processor_id());
-       }
-       apic_write(APIC_LVT0, value);
-
-       /*
-        * only the BP should see the LINT1 NMI signal, obviously.
-        */
-       if (!smp_processor_id())
-               value = APIC_DM_NMI;
-       else
-               value = APIC_DM_NMI | APIC_LVT_MASKED;
-       if (!lapic_is_integrated())             /* 82489DX */
-               value |= APIC_LVT_LEVEL_TRIGGER;
-       apic_write(APIC_LVT1, value);
-
-       preempt_enable();
-}
-
-void __cpuinit end_local_APIC_setup(void)
-{
-       lapic_setup_esr();
-
-#ifdef CONFIG_X86_32
-       {
-               unsigned int value;
-               /* Disable the local apic timer */
-               value = apic_read(APIC_LVTT);
-               value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
-               apic_write(APIC_LVTT, value);
-       }
-#endif
-
-       setup_apic_nmi_watchdog(NULL);
-       apic_pm_activate();
-}
-
-#ifdef HAVE_X2APIC
-void check_x2apic(void)
-{
-       int msr, msr2;
-
-       rdmsr(MSR_IA32_APICBASE, msr, msr2);
-
-       if (msr & X2APIC_ENABLE) {
-               pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
-               x2apic_preenabled = x2apic = 1;
-               apic_ops = &x2apic_ops;
-       }
-}
-
-void enable_x2apic(void)
-{
-       int msr, msr2;
-
-       rdmsr(MSR_IA32_APICBASE, msr, msr2);
-       if (!(msr & X2APIC_ENABLE)) {
-               pr_info("Enabling x2apic\n");
-               wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
-       }
-}
-
-void __init enable_IR_x2apic(void)
-{
-#ifdef CONFIG_INTR_REMAP
-       int ret;
-       unsigned long flags;
-
-       if (!cpu_has_x2apic)
-               return;
-
-       if (!x2apic_preenabled && disable_x2apic) {
-               pr_info("Skipped enabling x2apic and Interrupt-remapping "
-                       "because of nox2apic\n");
-               return;
-       }
-
-       if (x2apic_preenabled && disable_x2apic)
-               panic("Bios already enabled x2apic, can't enforce nox2apic");
-
-       if (!x2apic_preenabled && skip_ioapic_setup) {
-               pr_info("Skipped enabling x2apic and Interrupt-remapping "
-                       "because of skipping io-apic setup\n");
-               return;
-       }
-
-       ret = dmar_table_init();
-       if (ret) {
-               pr_info("dmar_table_init() failed with %d:\n", ret);
-
-               if (x2apic_preenabled)
-                       panic("x2apic enabled by bios. But IR enabling failed");
-               else
-                       pr_info("Not enabling x2apic,Intr-remapping\n");
-               return;
-       }
-
-       local_irq_save(flags);
-       mask_8259A();
-
-       ret = save_mask_IO_APIC_setup();
-       if (ret) {
-               pr_info("Saving IO-APIC state failed: %d\n", ret);
-               goto end;
-       }
-
-       ret = enable_intr_remapping(1);
-
-       if (ret && x2apic_preenabled) {
-               local_irq_restore(flags);
-               panic("x2apic enabled by bios. But IR enabling failed");
-       }
-
-       if (ret)
-               goto end_restore;
-
-       if (!x2apic) {
-               x2apic = 1;
-               apic_ops = &x2apic_ops;
-               enable_x2apic();
-       }
-
-end_restore:
-       if (ret)
-               /*
-                * IR enabling failed
-                */
-               restore_IO_APIC_setup();
-       else
-               reinit_intr_remapped_IO_APIC(x2apic_preenabled);
-
-end:
-       unmask_8259A();
-       local_irq_restore(flags);
-
-       if (!ret) {
-               if (!x2apic_preenabled)
-                       pr_info("Enabled x2apic and interrupt-remapping\n");
-               else
-                       pr_info("Enabled Interrupt-remapping\n");
-       } else
-               pr_err("Failed to enable Interrupt-remapping and x2apic\n");
-#else
-       if (!cpu_has_x2apic)
-               return;
-
-       if (x2apic_preenabled)
-               panic("x2apic enabled prior OS handover,"
-                     " enable CONFIG_INTR_REMAP");
-
-       pr_info("Enable CONFIG_INTR_REMAP for enabling intr-remapping "
-               " and x2apic\n");
-#endif
-
-       return;
-}
-#endif /* HAVE_X2APIC */
-
-#ifdef CONFIG_X86_64
-/*
- * Detect and enable local APICs on non-SMP boards.
- * Original code written by Keir Fraser.
- * On AMD64 we trust the BIOS - if it says no APIC it is likely
- * not correctly set up (usually the APIC timer won't work etc.)
- */
-static int __init detect_init_APIC(void)
-{
-       if (!cpu_has_apic) {
-               pr_info("No local APIC present\n");
-               return -1;
-       }
-
-       mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-       boot_cpu_physical_apicid = 0;
-       return 0;
-}
-#else
-/*
- * Detect and initialize APIC
- */
-static int __init detect_init_APIC(void)
-{
-       u32 h, l, features;
-
-       /* Disabled by kernel option? */
-       if (disable_apic)
-               return -1;
-
-       switch (boot_cpu_data.x86_vendor) {
-       case X86_VENDOR_AMD:
-               if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
-                   (boot_cpu_data.x86 >= 15))
-                       break;
-               goto no_apic;
-       case X86_VENDOR_INTEL:
-               if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
-                   (boot_cpu_data.x86 == 5 && cpu_has_apic))
-                       break;
-               goto no_apic;
-       default:
-               goto no_apic;
-       }
-
-       if (!cpu_has_apic) {
-               /*
-                * Over-ride BIOS and try to enable the local APIC only if
-                * "lapic" specified.
-                */
-               if (!force_enable_local_apic) {
-                       pr_info("Local APIC disabled by BIOS -- "
-                               "you can enable it with \"lapic\"\n");
-                       return -1;
-               }
-               /*
-                * Some BIOSes disable the local APIC in the APIC_BASE
-                * MSR. This can only be done in software for Intel P6 or later
-                * and AMD K7 (Model > 1) or later.
-                */
-               rdmsr(MSR_IA32_APICBASE, l, h);
-               if (!(l & MSR_IA32_APICBASE_ENABLE)) {
-                       pr_info("Local APIC disabled by BIOS -- reenabling.\n");
-                       l &= ~MSR_IA32_APICBASE_BASE;
-                       l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
-                       wrmsr(MSR_IA32_APICBASE, l, h);
-                       enabled_via_apicbase = 1;
-               }
-       }
-       /*
-        * The APIC feature bit should now be enabled
-        * in `cpuid'
-        */
-       features = cpuid_edx(1);
-       if (!(features & (1 << X86_FEATURE_APIC))) {
-               pr_warning("Could not enable APIC!\n");
-               return -1;
-       }
-       set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
-       mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
-
-       /* The BIOS may have set up the APIC at some other address */
-       rdmsr(MSR_IA32_APICBASE, l, h);
-       if (l & MSR_IA32_APICBASE_ENABLE)
-               mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
-
-       pr_info("Found and enabled local APIC!\n");
-
-       apic_pm_activate();
-
-       return 0;
-
-no_apic:
-       pr_info("No local APIC present or hardware disabled\n");
-       return -1;
-}
-#endif
-
-#ifdef CONFIG_X86_64
-void __init early_init_lapic_mapping(void)
-{
-       unsigned long phys_addr;
-
-       /*
-        * If no local APIC can be found then go out
-        * : it means there is no mpatable and MADT
-        */
-       if (!smp_found_config)
-               return;
-
-       phys_addr = mp_lapic_addr;
-
-       set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
-       apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
-                   APIC_BASE, phys_addr);
-
-       /*
-        * Fetch the APIC ID of the BSP in case we have a
-        * default configuration (or the MP table is broken).
-        */
-       boot_cpu_physical_apicid = read_apic_id();
-}
-#endif
-
-/**
- * init_apic_mappings - initialize APIC mappings
- */
-void __init init_apic_mappings(void)
-{
-#ifdef HAVE_X2APIC
-       if (x2apic) {
-               boot_cpu_physical_apicid = read_apic_id();
-               return;
-       }
-#endif
-
-       /*
-        * If no local APIC can be found then set up a fake all
-        * zeroes page to simulate the local APIC and another
-        * one for the IO-APIC.
-        */
-       if (!smp_found_config && detect_init_APIC()) {
-               apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
-               apic_phys = __pa(apic_phys);
-       } else
-               apic_phys = mp_lapic_addr;
-
-       set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
-       apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
-                               APIC_BASE, apic_phys);
-
-       /*
-        * Fetch the APIC ID of the BSP in case we have a
-        * default configuration (or the MP table is broken).
-        */
-       if (boot_cpu_physical_apicid == -1U)
-               boot_cpu_physical_apicid = read_apic_id();
-}
-
-/*
- * This initializes the IO-APIC and APIC hardware if this is
- * a UP kernel.
- */
-int apic_version[MAX_APICS];
-
-int __init APIC_init_uniprocessor(void)
-{
-#ifdef CONFIG_X86_64
-       if (disable_apic) {
-               pr_info("Apic disabled\n");
-               return -1;
-       }
-       if (!cpu_has_apic) {
-               disable_apic = 1;
-               pr_info("Apic disabled by BIOS\n");
-               return -1;
-       }
-#else
-       if (!smp_found_config && !cpu_has_apic)
-               return -1;
-
-       /*
-        * Complain if the BIOS pretends there is one.
-        */
-       if (!cpu_has_apic &&
-           APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
-               pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
-                       boot_cpu_physical_apicid);
-               clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
-               return -1;
-       }
-#endif
-
-#ifdef HAVE_X2APIC
-       enable_IR_x2apic();
-#endif
-#ifdef CONFIG_X86_64
-       setup_apic_routing();
-#endif
-
-       verify_local_APIC();
-       connect_bsp_APIC();
-
-#ifdef CONFIG_X86_64
-       apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
-#else
-       /*
-        * Hack: In case of kdump, after a crash, kernel might be booting
-        * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
-        * might be zero if read from MP tables. Get it from LAPIC.
-        */
-# ifdef CONFIG_CRASH_DUMP
-       boot_cpu_physical_apicid = read_apic_id();
-# endif
-#endif
-       physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
-       setup_local_APIC();
-
-#ifdef CONFIG_X86_64
-       /*
-        * Now enable IO-APICs, actually call clear_IO_APIC
-        * We need clear_IO_APIC before enabling vector on BP
-        */
-       if (!skip_ioapic_setup && nr_ioapics)
-               enable_IO_APIC();
-#endif
-
-#ifdef CONFIG_X86_IO_APIC
-       if (!smp_found_config || skip_ioapic_setup || !nr_ioapics)
-#endif
-               localise_nmi_watchdog();
-       end_local_APIC_setup();
-
-#ifdef CONFIG_X86_IO_APIC
-       if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
-               setup_IO_APIC();
-# ifdef CONFIG_X86_64
-       else
-               nr_ioapics = 0;
-# endif
-#endif
-
-#ifdef CONFIG_X86_64
-       setup_boot_APIC_clock();
-       check_nmi_watchdog();
-#else
-       setup_boot_clock();
-#endif
-
-       return 0;
-}
-
-/*
- * Local APIC interrupts
- */
-
-/*
- * This interrupt should _never_ happen with our APIC/SMP architecture
- */
-void smp_spurious_interrupt(struct pt_regs *regs)
-{
-       u32 v;
-
-       exit_idle();
-       irq_enter();
-       /*
-        * Check if this really is a spurious interrupt and ACK it
-        * if it is a vectored one.  Just in case...
-        * Spurious interrupts should not be ACKed.
-        */
-       v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
-       if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
-               ack_APIC_irq();
-
-       inc_irq_stat(irq_spurious_count);
-
-       /* see sw-dev-man vol 3, chapter 7.4.13.5 */
-       pr_info("spurious APIC interrupt on CPU#%d, "
-               "should never happen.\n", smp_processor_id());
-       irq_exit();
-}
-
-/*
- * This interrupt should never happen with our APIC/SMP architecture
- */
-void smp_error_interrupt(struct pt_regs *regs)
-{
-       u32 v, v1;
-
-       exit_idle();
-       irq_enter();
-       /* First tickle the hardware, only then report what went on. -- REW */
-       v = apic_read(APIC_ESR);
-       apic_write(APIC_ESR, 0);
-       v1 = apic_read(APIC_ESR);
-       ack_APIC_irq();
-       atomic_inc(&irq_err_count);
-
-       /*
-        * Here is what the APIC error bits mean:
-        * 0: Send CS error
-        * 1: Receive CS error
-        * 2: Send accept error
-        * 3: Receive accept error
-        * 4: Reserved
-        * 5: Send illegal vector
-        * 6: Received illegal vector
-        * 7: Illegal register address
-        */
-       pr_debug("APIC error on CPU%d: %02x(%02x)\n",
-               smp_processor_id(), v , v1);
-       irq_exit();
-}
-
-/**
- * connect_bsp_APIC - attach the APIC to the interrupt system
- */
-void __init connect_bsp_APIC(void)
-{
-#ifdef CONFIG_X86_32
-       if (pic_mode) {
-               /*
-                * Do not trust the local APIC being empty at bootup.
-                */
-               clear_local_APIC();
-               /*
-                * PIC mode, enable APIC mode in the IMCR, i.e.  connect BSP's
-                * local APIC to INT and NMI lines.
-                */
-               apic_printk(APIC_VERBOSE, "leaving PIC mode, "
-                               "enabling APIC mode.\n");
-               outb(0x70, 0x22);
-               outb(0x01, 0x23);
-       }
-#endif
-       enable_apic_mode();
-}
-
-/**
- * disconnect_bsp_APIC - detach the APIC from the interrupt system
- * @virt_wire_setup:   indicates, whether virtual wire mode is selected
- *
- * Virtual wire mode is necessary to deliver legacy interrupts even when the
- * APIC is disabled.
- */
-void disconnect_bsp_APIC(int virt_wire_setup)
-{
-       unsigned int value;
-
-#ifdef CONFIG_X86_32
-       if (pic_mode) {
-               /*
-                * Put the board back into PIC mode (has an effect only on
-                * certain older boards).  Note that APIC interrupts, including
-                * IPIs, won't work beyond this point!  The only exception are
-                * INIT IPIs.
-                */
-               apic_printk(APIC_VERBOSE, "disabling APIC mode, "
-                               "entering PIC mode.\n");
-               outb(0x70, 0x22);
-               outb(0x00, 0x23);
-               return;
-       }
-#endif
-
-       /* Go back to Virtual Wire compatibility mode */
-
-       /* For the spurious interrupt use vector F, and enable it */
-       value = apic_read(APIC_SPIV);
-       value &= ~APIC_VECTOR_MASK;
-       value |= APIC_SPIV_APIC_ENABLED;
-       value |= 0xf;
-       apic_write(APIC_SPIV, value);
-
-       if (!virt_wire_setup) {
-               /*
-                * For LVT0 make it edge triggered, active high,
-                * external and enabled
-                */
-               value = apic_read(APIC_LVT0);
-               value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
-                       APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
-                       APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
-               value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
-               value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
-               apic_write(APIC_LVT0, value);
-       } else {
-               /* Disable LVT0 */
-               apic_write(APIC_LVT0, APIC_LVT_MASKED);
-       }
-
-       /*
-        * For LVT1 make it edge triggered, active high,
-        * nmi and enabled
-        */
-       value = apic_read(APIC_LVT1);
-       value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
-                       APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
-                       APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
-       value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
-       value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
-       apic_write(APIC_LVT1, value);
-}
-
-void __cpuinit generic_processor_info(int apicid, int version)
-{
-       int cpu;
-
-       /*
-        * Validate version
-        */
-       if (version == 0x0) {
-               pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
-                          "fixing up to 0x10. (tell your hw vendor)\n",
-                               version);
-               version = 0x10;
-       }
-       apic_version[apicid] = version;
-
-       if (num_processors >= nr_cpu_ids) {
-               int max = nr_cpu_ids;
-               int thiscpu = max + disabled_cpus;
-
-               pr_warning(
-                       "ACPI: NR_CPUS/possible_cpus limit of %i reached."
-                       "  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
-
-               disabled_cpus++;
-               return;
-       }
-
-       num_processors++;
-       cpu = cpumask_next_zero(-1, cpu_present_mask);
-
-       if (version != apic_version[boot_cpu_physical_apicid])
-               WARN_ONCE(1,
-                       "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
-                       apic_version[boot_cpu_physical_apicid], cpu, version);
-
-       physid_set(apicid, phys_cpu_present_map);
-       if (apicid == boot_cpu_physical_apicid) {
-               /*
-                * x86_bios_cpu_apicid is required to have processors listed
-                * in same order as logical cpu numbers. Hence the first
-                * entry is BSP, and so on.
-                */
-               cpu = 0;
-       }
-       if (apicid > max_physical_apicid)
-               max_physical_apicid = apicid;
-
-#ifdef CONFIG_X86_32
-       /*
-        * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
-        * but we need to work other dependencies like SMP_SUSPEND etc
-        * before this can be done without some confusion.
-        * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
-        *       - Ashok Raj <ashok.raj@intel.com>
-        */
-       if (max_physical_apicid >= 8) {
-               switch (boot_cpu_data.x86_vendor) {
-               case X86_VENDOR_INTEL:
-                       if (!APIC_XAPIC(version)) {
-                               def_to_bigsmp = 0;
-                               break;
-                       }
-                       /* If P4 and above fall through */
-               case X86_VENDOR_AMD:
-                       def_to_bigsmp = 1;
-               }
-       }
-#endif
-
-#if defined(CONFIG_X86_SMP) || defined(CONFIG_X86_64)
-       /* are we being called early in kernel startup? */
-       if (early_per_cpu_ptr(x86_cpu_to_apicid)) {
-               u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
-               u16 *bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
-
-               cpu_to_apicid[cpu] = apicid;
-               bios_cpu_apicid[cpu] = apicid;
-       } else {
-               per_cpu(x86_cpu_to_apicid, cpu) = apicid;
-               per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
-       }
-#endif
-
-       set_cpu_possible(cpu, true);
-       set_cpu_present(cpu, true);
-}
-
-#ifdef CONFIG_X86_64
-int hard_smp_processor_id(void)
-{
-       return read_apic_id();
-}
-#endif
-
-/*
- * Power management
- */
-#ifdef CONFIG_PM
-
-static struct {
-       /*
-        * 'active' is true if the local APIC was enabled by us and
-        * not the BIOS; this signifies that we are also responsible
-        * for disabling it before entering apm/acpi suspend
-        */
-       int active;
-       /* r/w apic fields */
-       unsigned int apic_id;
-       unsigned int apic_taskpri;
-       unsigned int apic_ldr;
-       unsigned int apic_dfr;
-       unsigned int apic_spiv;
-       unsigned int apic_lvtt;
-       unsigned int apic_lvtpc;
-       unsigned int apic_lvt0;
-       unsigned int apic_lvt1;
-       unsigned int apic_lvterr;
-       unsigned int apic_tmict;
-       unsigned int apic_tdcr;
-       unsigned int apic_thmr;
-} apic_pm_state;
-
-static int lapic_suspend(struct sys_device *dev, pm_message_t state)
-{
-       unsigned long flags;
-       int maxlvt;
-
-       if (!apic_pm_state.active)
-               return 0;
-
-       maxlvt = lapic_get_maxlvt();
-
-       apic_pm_state.apic_id = apic_read(APIC_ID);
-       apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
-       apic_pm_state.apic_ldr = apic_read(APIC_LDR);
-       apic_pm_state.apic_dfr = apic_read(APIC_DFR);
-       apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
-       apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
-       if (maxlvt >= 4)
-               apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
-       apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
-       apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
-       apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
-       apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
-       apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
-#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
-       if (maxlvt >= 5)
-               apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
-#endif
-
-       local_irq_save(flags);
-       disable_local_APIC();
-       local_irq_restore(flags);
-       return 0;
-}
-
-static int lapic_resume(struct sys_device *dev)
-{
-       unsigned int l, h;
-       unsigned long flags;
-       int maxlvt;
-
-       if (!apic_pm_state.active)
-               return 0;
-
-       maxlvt = lapic_get_maxlvt();
-
-       local_irq_save(flags);
-
-#ifdef HAVE_X2APIC
-       if (x2apic)
-               enable_x2apic();
-       else
-#endif
-       {
-               /*
-                * Make sure the APICBASE points to the right address
-                *
-                * FIXME! This will be wrong if we ever support suspend on
-                * SMP! We'll need to do this as part of the CPU restore!
-                */
-               rdmsr(MSR_IA32_APICBASE, l, h);
-               l &= ~MSR_IA32_APICBASE_BASE;
-               l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
-               wrmsr(MSR_IA32_APICBASE, l, h);
-       }
-
-       apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
-       apic_write(APIC_ID, apic_pm_state.apic_id);
-       apic_write(APIC_DFR, apic_pm_state.apic_dfr);
-       apic_write(APIC_LDR, apic_pm_state.apic_ldr);
-       apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
-       apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
-       apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
-       apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
-#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
-       if (maxlvt >= 5)
-               apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
-#endif
-       if (maxlvt >= 4)
-               apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
-       apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
-       apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
-       apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
-       apic_write(APIC_ESR, 0);
-       apic_read(APIC_ESR);
-       apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
-       apic_write(APIC_ESR, 0);
-       apic_read(APIC_ESR);
-
-       local_irq_restore(flags);
-
-       return 0;
-}
-
-/*
- * This device has no shutdown method - fully functioning local APICs
- * are needed on every CPU up until machine_halt/restart/poweroff.
- */
-
-static struct sysdev_class lapic_sysclass = {
-       .name           = "lapic",
-       .resume         = lapic_resume,
-       .suspend        = lapic_suspend,
-};
-
-static struct sys_device device_lapic = {
-       .id     = 0,
-       .cls    = &lapic_sysclass,
-};
-
-static void __cpuinit apic_pm_activate(void)
-{
-       apic_pm_state.active = 1;
-}
-
-static int __init init_lapic_sysfs(void)
-{
-       int error;
-
-       if (!cpu_has_apic)
-               return 0;
-       /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
-
-       error = sysdev_class_register(&lapic_sysclass);
-       if (!error)
-               error = sysdev_register(&device_lapic);
-       return error;
-}
-device_initcall(init_lapic_sysfs);
-
-#else  /* CONFIG_PM */
-
-static void apic_pm_activate(void) { }
-
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_X86_64
-/*
- * apic_is_clustered_box() -- Check if we can expect good TSC
- *
- * Thus far, the major user of this is IBM's Summit2 series:
- *
- * Clustered boxes may have unsynced TSC problems if they are
- * multi-chassis. Use available data to take a good guess.
- * If in doubt, go HPET.
- */
-__cpuinit int apic_is_clustered_box(void)
-{
-       int i, clusters, zeros;
-       unsigned id;
-       u16 *bios_cpu_apicid;
-       DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
-
-       /*
-        * there is not this kind of box with AMD CPU yet.
-        * Some AMD box with quadcore cpu and 8 sockets apicid
-        * will be [4, 0x23] or [8, 0x27] could be thought to
-        * vsmp box still need checking...
-        */
-       if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
-               return 0;
-
-       bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
-       bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
-
-       for (i = 0; i < nr_cpu_ids; i++) {
-               /* are we being called early in kernel startup? */
-               if (bios_cpu_apicid) {
-                       id = bios_cpu_apicid[i];
-               } else if (i < nr_cpu_ids) {
-                       if (cpu_present(i))
-                               id = per_cpu(x86_bios_cpu_apicid, i);
-                       else
-                               continue;
-               } else
-                       break;
-
-               if (id != BAD_APICID)
-                       __set_bit(APIC_CLUSTERID(id), clustermap);
-       }
-
-       /* Problem:  Partially populated chassis may not have CPUs in some of
-        * the APIC clusters they have been allocated.  Only present CPUs have
-        * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
-        * Since clusters are allocated sequentially, count zeros only if
-        * they are bounded by ones.
-        */
-       clusters = 0;
-       zeros = 0;
-       for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
-               if (test_bit(i, clustermap)) {
-                       clusters += 1 + zeros;
-                       zeros = 0;
-               } else
-                       ++zeros;
-       }
-
-       /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
-        * not guaranteed to be synced between boards
-        */
-       if (is_vsmp_box() && clusters > 1)
-               return 1;
-
-       /*
-        * If clusters > 2, then should be multi-chassis.
-        * May have to revisit this when multi-core + hyperthreaded CPUs come
-        * out, but AFAIK this will work even for them.
-        */
-       return (clusters > 2);
-}
-#endif
-
-/*
- * APIC command line parameters
- */
-static int __init setup_disableapic(char *arg)
-{
-       disable_apic = 1;
-       setup_clear_cpu_cap(X86_FEATURE_APIC);
-       return 0;
-}
-early_param("disableapic", setup_disableapic);
-
-/* same as disableapic, for compatibility */
-static int __init setup_nolapic(char *arg)
-{
-       return setup_disableapic(arg);
-}
-early_param("nolapic", setup_nolapic);
-
-static int __init parse_lapic_timer_c2_ok(char *arg)
-{
-       local_apic_timer_c2_ok = 1;
-       return 0;
-}
-early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
-
-static int __init parse_disable_apic_timer(char *arg)
-{
-       disable_apic_timer = 1;
-       return 0;
-}
-early_param("noapictimer", parse_disable_apic_timer);
-
-static int __init parse_nolapic_timer(char *arg)
-{
-       disable_apic_timer = 1;
-       return 0;
-}
-early_param("nolapic_timer", parse_nolapic_timer);
-
-static int __init apic_set_verbosity(char *arg)
-{
-       if (!arg)  {
-#ifdef CONFIG_X86_64
-               skip_ioapic_setup = 0;
-               return 0;
-#endif
-               return -EINVAL;
-       }
-
-       if (strcmp("debug", arg) == 0)
-               apic_verbosity = APIC_DEBUG;
-       else if (strcmp("verbose", arg) == 0)
-               apic_verbosity = APIC_VERBOSE;
-       else {
-               pr_warning("APIC Verbosity level %s not recognised"
-                       " use apic=verbose or apic=debug\n", arg);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-early_param("apic", apic_set_verbosity);
-
-static int __init lapic_insert_resource(void)
-{
-       if (!apic_phys)
-               return -1;
-
-       /* Put local APIC into the resource map. */
-       lapic_resource.start = apic_phys;
-       lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
-       insert_resource(&iomem_resource, &lapic_resource);
-
-       return 0;
-}
-
-/*
- * need call insert after e820_reserve_resources()
- * that is using request_resource
- */
-late_initcall(lapic_insert_resource);
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile
new file mode 100644 (file)
index 0000000..da7b7b9
--- /dev/null
@@ -0,0 +1,19 @@
+#
+# Makefile for local APIC drivers and for the IO-APIC code
+#
+
+obj-$(CONFIG_X86_LOCAL_APIC)   += apic.o probe_$(BITS).o ipi.o nmi.o
+obj-$(CONFIG_X86_IO_APIC)      += io_apic.o
+obj-$(CONFIG_SMP)              += ipi.o
+
+ifeq ($(CONFIG_X86_64),y)
+obj-y                          += apic_flat_64.o
+obj-$(CONFIG_X86_X2APIC)       += x2apic_cluster.o
+obj-$(CONFIG_X86_X2APIC)       += x2apic_phys.o
+obj-$(CONFIG_X86_UV)           += x2apic_uv_x.o
+endif
+
+obj-$(CONFIG_X86_BIGSMP)       += bigsmp_32.o
+obj-$(CONFIG_X86_NUMAQ)                += numaq_32.o
+obj-$(CONFIG_X86_ES7000)       += es7000_32.o
+obj-$(CONFIG_X86_SUMMIT)       += summit_32.o
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
new file mode 100644 (file)
index 0000000..30909a2
--- /dev/null
@@ -0,0 +1,2219 @@
+/*
+ *     Local APIC handling, local APIC timers
+ *
+ *     (c) 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
+ *
+ *     Fixes
+ *     Maciej W. Rozycki       :       Bits for genuine 82489DX APICs;
+ *                                     thanks to Eric Gilmore
+ *                                     and Rolf G. Tews
+ *                                     for testing these extensively.
+ *     Maciej W. Rozycki       :       Various updates and fixes.
+ *     Mikael Pettersson       :       Power Management for UP-APIC.
+ *     Pavel Machek and
+ *     Mikael Pettersson       :       PM converted to driver model.
+ */
+
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/acpi_pmtmr.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/bootmem.h>
+#include <linux/ftrace.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/delay.h>
+#include <linux/timex.h>
+#include <linux/dmar.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/dmi.h>
+#include <linux/nmi.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm/pgalloc.h>
+#include <asm/atomic.h>
+#include <asm/mpspec.h>
+#include <asm/i8253.h>
+#include <asm/i8259.h>
+#include <asm/proto.h>
+#include <asm/apic.h>
+#include <asm/desc.h>
+#include <asm/hpet.h>
+#include <asm/idle.h>
+#include <asm/mtrr.h>
+#include <asm/smp.h>
+#include <asm/mce.h>
+
+unsigned int num_processors;
+
+unsigned disabled_cpus __cpuinitdata;
+
+/* Processor that is doing the boot up */
+unsigned int boot_cpu_physical_apicid = -1U;
+
+/*
+ * The highest APIC ID seen during enumeration.
+ *
+ * This determines the messaging protocol we can use: if all APIC IDs
+ * are in the 0 ... 7 range, then we can use logical addressing which
+ * has some performance advantages (better broadcasting).
+ *
+ * If there's an APIC ID above 8, we use physical addressing.
+ */
+unsigned int max_physical_apicid;
+
+/*
+ * Bitmask of physically existing CPUs:
+ */
+physid_mask_t phys_cpu_present_map;
+
+/*
+ * Map cpu index to physical APIC ID
+ */
+DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
+DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
+EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
+EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
+
+#ifdef CONFIG_X86_32
+/*
+ * Knob to control our willingness to enable the local APIC.
+ *
+ * +1=force-enable
+ */
+static int force_enable_local_apic;
+/*
+ * APIC command line parameters
+ */
+static int __init parse_lapic(char *arg)
+{
+       force_enable_local_apic = 1;
+       return 0;
+}
+early_param("lapic", parse_lapic);
+/* Local APIC was disabled by the BIOS and enabled by the kernel */
+static int enabled_via_apicbase;
+
+#endif
+
+#ifdef CONFIG_X86_64
+static int apic_calibrate_pmtmr __initdata;
+static __init int setup_apicpmtimer(char *s)
+{
+       apic_calibrate_pmtmr = 1;
+       notsc_setup(NULL);
+       return 0;
+}
+__setup("apicpmtimer", setup_apicpmtimer);
+#endif
+
+#ifdef CONFIG_X86_X2APIC
+int x2apic;
+/* x2apic enabled before OS handover */
+static int x2apic_preenabled;
+static int disable_x2apic;
+static __init int setup_nox2apic(char *str)
+{
+       disable_x2apic = 1;
+       setup_clear_cpu_cap(X86_FEATURE_X2APIC);
+       return 0;
+}
+early_param("nox2apic", setup_nox2apic);
+#endif
+
+unsigned long mp_lapic_addr;
+int disable_apic;
+/* Disable local APIC timer from the kernel commandline or via dmi quirk */
+static int disable_apic_timer __cpuinitdata;
+/* Local APIC timer works in C2 */
+int local_apic_timer_c2_ok;
+EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
+
+int first_system_vector = 0xfe;
+
+/*
+ * Debug level, exported for io_apic.c
+ */
+unsigned int apic_verbosity;
+
+int pic_mode;
+
+/* Have we found an MP table */
+int smp_found_config;
+
+static struct resource lapic_resource = {
+       .name = "Local APIC",
+       .flags = IORESOURCE_MEM | IORESOURCE_BUSY,
+};
+
+static unsigned int calibration_result;
+
+static int lapic_next_event(unsigned long delta,
+                           struct clock_event_device *evt);
+static void lapic_timer_setup(enum clock_event_mode mode,
+                             struct clock_event_device *evt);
+static void lapic_timer_broadcast(const struct cpumask *mask);
+static void apic_pm_activate(void);
+
+/*
+ * The local apic timer can be used for any function which is CPU local.
+ */
+static struct clock_event_device lapic_clockevent = {
+       .name           = "lapic",
+       .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
+                       | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
+       .shift          = 32,
+       .set_mode       = lapic_timer_setup,
+       .set_next_event = lapic_next_event,
+       .broadcast      = lapic_timer_broadcast,
+       .rating         = 100,
+       .irq            = -1,
+};
+static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
+
+static unsigned long apic_phys;
+
+/*
+ * Get the LAPIC version
+ */
+static inline int lapic_get_version(void)
+{
+       return GET_APIC_VERSION(apic_read(APIC_LVR));
+}
+
+/*
+ * Check, if the APIC is integrated or a separate chip
+ */
+static inline int lapic_is_integrated(void)
+{
+#ifdef CONFIG_X86_64
+       return 1;
+#else
+       return APIC_INTEGRATED(lapic_get_version());
+#endif
+}
+
+/*
+ * Check, whether this is a modern or a first generation APIC
+ */
+static int modern_apic(void)
+{
+       /* AMD systems use old APIC versions, so check the CPU */
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+           boot_cpu_data.x86 >= 0xf)
+               return 1;
+       return lapic_get_version() >= 0x14;
+}
+
+void native_apic_wait_icr_idle(void)
+{
+       while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
+               cpu_relax();
+}
+
+u32 native_safe_apic_wait_icr_idle(void)
+{
+       u32 send_status;
+       int timeout;
+
+       timeout = 0;
+       do {
+               send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
+               if (!send_status)
+                       break;
+               udelay(100);
+       } while (timeout++ < 1000);
+
+       return send_status;
+}
+
+void native_apic_icr_write(u32 low, u32 id)
+{
+       apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
+       apic_write(APIC_ICR, low);
+}
+
+u64 native_apic_icr_read(void)
+{
+       u32 icr1, icr2;
+
+       icr2 = apic_read(APIC_ICR2);
+       icr1 = apic_read(APIC_ICR);
+
+       return icr1 | ((u64)icr2 << 32);
+}
+
+/**
+ * enable_NMI_through_LVT0 - enable NMI through local vector table 0
+ */
+void __cpuinit enable_NMI_through_LVT0(void)
+{
+       unsigned int v;
+
+       /* unmask and set to NMI */
+       v = APIC_DM_NMI;
+
+       /* Level triggered for 82489DX (32bit mode) */
+       if (!lapic_is_integrated())
+               v |= APIC_LVT_LEVEL_TRIGGER;
+
+       apic_write(APIC_LVT0, v);
+}
+
+#ifdef CONFIG_X86_32
+/**
+ * get_physical_broadcast - Get number of physical broadcast IDs
+ */
+int get_physical_broadcast(void)
+{
+       return modern_apic() ? 0xff : 0xf;
+}
+#endif
+
+/**
+ * lapic_get_maxlvt - get the maximum number of local vector table entries
+ */
+int lapic_get_maxlvt(void)
+{
+       unsigned int v;
+
+       v = apic_read(APIC_LVR);
+       /*
+        * - we always have APIC integrated on 64bit mode
+        * - 82489DXs do not report # of LVT entries
+        */
+       return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
+}
+
+/*
+ * Local APIC timer
+ */
+
+/* Clock divisor */
+#define APIC_DIVISOR 16
+
+/*
+ * This function sets up the local APIC timer, with a timeout of
+ * 'clocks' APIC bus clock. During calibration we actually call
+ * this function twice on the boot CPU, once with a bogus timeout
+ * value, second time for real. The other (noncalibrating) CPUs
+ * call this function only once, with the real, calibrated value.
+ *
+ * We do reads before writes even if unnecessary, to get around the
+ * P5 APIC double write bug.
+ */
+static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
+{
+       unsigned int lvtt_value, tmp_value;
+
+       lvtt_value = LOCAL_TIMER_VECTOR;
+       if (!oneshot)
+               lvtt_value |= APIC_LVT_TIMER_PERIODIC;
+       if (!lapic_is_integrated())
+               lvtt_value |= SET_APIC_TIMER_BASE(APIC_TIMER_BASE_DIV);
+
+       if (!irqen)
+               lvtt_value |= APIC_LVT_MASKED;
+
+       apic_write(APIC_LVTT, lvtt_value);
+
+       /*
+        * Divide PICLK by 16
+        */
+       tmp_value = apic_read(APIC_TDCR);
+       apic_write(APIC_TDCR,
+               (tmp_value & ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE)) |
+               APIC_TDR_DIV_16);
+
+       if (!oneshot)
+               apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
+}
+
+/*
+ * Setup extended LVT, AMD specific (K8, family 10h)
+ *
+ * Vector mappings are hard coded. On K8 only offset 0 (APIC500) and
+ * MCE interrupts are supported. Thus MCE offset must be set to 0.
+ *
+ * If mask=1, the LVT entry does not generate interrupts while mask=0
+ * enables the vector. See also the BKDGs.
+ */
+
+#define APIC_EILVT_LVTOFF_MCE 0
+#define APIC_EILVT_LVTOFF_IBS 1
+
+static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
+{
+       unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
+       unsigned int  v   = (mask << 16) | (msg_type << 8) | vector;
+
+       apic_write(reg, v);
+}
+
+u8 setup_APIC_eilvt_mce(u8 vector, u8 msg_type, u8 mask)
+{
+       setup_APIC_eilvt(APIC_EILVT_LVTOFF_MCE, vector, msg_type, mask);
+       return APIC_EILVT_LVTOFF_MCE;
+}
+
+u8 setup_APIC_eilvt_ibs(u8 vector, u8 msg_type, u8 mask)
+{
+       setup_APIC_eilvt(APIC_EILVT_LVTOFF_IBS, vector, msg_type, mask);
+       return APIC_EILVT_LVTOFF_IBS;
+}
+EXPORT_SYMBOL_GPL(setup_APIC_eilvt_ibs);
+
+/*
+ * Program the next event, relative to now
+ */
+static int lapic_next_event(unsigned long delta,
+                           struct clock_event_device *evt)
+{
+       apic_write(APIC_TMICT, delta);
+       return 0;
+}
+
+/*
+ * Setup the lapic timer in periodic or oneshot mode
+ */
+static void lapic_timer_setup(enum clock_event_mode mode,
+                             struct clock_event_device *evt)
+{
+       unsigned long flags;
+       unsigned int v;
+
+       /* Lapic used as dummy for broadcast ? */
+       if (evt->features & CLOCK_EVT_FEAT_DUMMY)
+               return;
+
+       local_irq_save(flags);
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+       case CLOCK_EVT_MODE_ONESHOT:
+               __setup_APIC_LVTT(calibration_result,
+                                 mode != CLOCK_EVT_MODE_PERIODIC, 1);
+               break;
+       case CLOCK_EVT_MODE_UNUSED:
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               v = apic_read(APIC_LVTT);
+               v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+               apic_write(APIC_LVTT, v);
+               apic_write(APIC_TMICT, 0xffffffff);
+               break;
+       case CLOCK_EVT_MODE_RESUME:
+               /* Nothing to do here */
+               break;
+       }
+
+       local_irq_restore(flags);
+}
+
+/*
+ * Local APIC timer broadcast function
+ */
+static void lapic_timer_broadcast(const struct cpumask *mask)
+{
+#ifdef CONFIG_SMP
+       apic->send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+#endif
+}
+
+/*
+ * Setup the local APIC timer for this CPU. Copy the initilized values
+ * of the boot CPU and register the clock event in the framework.
+ */
+static void __cpuinit setup_APIC_timer(void)
+{
+       struct clock_event_device *levt = &__get_cpu_var(lapic_events);
+
+       memcpy(levt, &lapic_clockevent, sizeof(*levt));
+       levt->cpumask = cpumask_of(smp_processor_id());
+
+       clockevents_register_device(levt);
+}
+
+/*
+ * In this functions we calibrate APIC bus clocks to the external timer.
+ *
+ * We want to do the calibration only once since we want to have local timer
+ * irqs syncron. CPUs connected by the same APIC bus have the very same bus
+ * frequency.
+ *
+ * This was previously done by reading the PIT/HPET and waiting for a wrap
+ * around to find out, that a tick has elapsed. I have a box, where the PIT
+ * readout is broken, so it never gets out of the wait loop again. This was
+ * also reported by others.
+ *
+ * Monitoring the jiffies value is inaccurate and the clockevents
+ * infrastructure allows us to do a simple substitution of the interrupt
+ * handler.
+ *
+ * The calibration routine also uses the pm_timer when possible, as the PIT
+ * happens to run way too slow (factor 2.3 on my VAIO CoreDuo, which goes
+ * back to normal later in the boot process).
+ */
+
+#define LAPIC_CAL_LOOPS                (HZ/10)
+
+static __initdata int lapic_cal_loops = -1;
+static __initdata long lapic_cal_t1, lapic_cal_t2;
+static __initdata unsigned long long lapic_cal_tsc1, lapic_cal_tsc2;
+static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
+static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
+
+/*
+ * Temporary interrupt handler.
+ */
+static void __init lapic_cal_handler(struct clock_event_device *dev)
+{
+       unsigned long long tsc = 0;
+       long tapic = apic_read(APIC_TMCCT);
+       unsigned long pm = acpi_pm_read_early();
+
+       if (cpu_has_tsc)
+               rdtscll(tsc);
+
+       switch (lapic_cal_loops++) {
+       case 0:
+               lapic_cal_t1 = tapic;
+               lapic_cal_tsc1 = tsc;
+               lapic_cal_pm1 = pm;
+               lapic_cal_j1 = jiffies;
+               break;
+
+       case LAPIC_CAL_LOOPS:
+               lapic_cal_t2 = tapic;
+               lapic_cal_tsc2 = tsc;
+               if (pm < lapic_cal_pm1)
+                       pm += ACPI_PM_OVRRUN;
+               lapic_cal_pm2 = pm;
+               lapic_cal_j2 = jiffies;
+               break;
+       }
+}
+
+static int __init
+calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
+{
+       const long pm_100ms = PMTMR_TICKS_PER_SEC / 10;
+       const long pm_thresh = pm_100ms / 100;
+       unsigned long mult;
+       u64 res;
+
+#ifndef CONFIG_X86_PM_TIMER
+       return -1;
+#endif
+
+       apic_printk(APIC_VERBOSE, "... PM-Timer delta = %ld\n", deltapm);
+
+       /* Check, if the PM timer is available */
+       if (!deltapm)
+               return -1;
+
+       mult = clocksource_hz2mult(PMTMR_TICKS_PER_SEC, 22);
+
+       if (deltapm > (pm_100ms - pm_thresh) &&
+           deltapm < (pm_100ms + pm_thresh)) {
+               apic_printk(APIC_VERBOSE, "... PM-Timer result ok\n");
+               return 0;
+       }
+
+       res = (((u64)deltapm) *  mult) >> 22;
+       do_div(res, 1000000);
+       pr_warning("APIC calibration not consistent "
+                  "with PM-Timer: %ldms instead of 100ms\n",(long)res);
+
+       /* Correct the lapic counter value */
+       res = (((u64)(*delta)) * pm_100ms);
+       do_div(res, deltapm);
+       pr_info("APIC delta adjusted to PM-Timer: "
+               "%lu (%ld)\n", (unsigned long)res, *delta);
+       *delta = (long)res;
+
+       /* Correct the tsc counter value */
+       if (cpu_has_tsc) {
+               res = (((u64)(*deltatsc)) * pm_100ms);
+               do_div(res, deltapm);
+               apic_printk(APIC_VERBOSE, "TSC delta adjusted to "
+                                         "PM-Timer: %lu (%ld) \n",
+                                       (unsigned long)res, *deltatsc);
+               *deltatsc = (long)res;
+       }
+
+       return 0;
+}
+
+static int __init calibrate_APIC_clock(void)
+{
+       struct clock_event_device *levt = &__get_cpu_var(lapic_events);
+       void (*real_handler)(struct clock_event_device *dev);
+       unsigned long deltaj;
+       long delta, deltatsc;
+       int pm_referenced = 0;
+
+       local_irq_disable();
+
+       /* Replace the global interrupt handler */
+       real_handler = global_clock_event->event_handler;
+       global_clock_event->event_handler = lapic_cal_handler;
+
+       /*
+        * Setup the APIC counter to maximum. There is no way the lapic
+        * can underflow in the 100ms detection time frame
+        */
+       __setup_APIC_LVTT(0xffffffff, 0, 0);
+
+       /* Let the interrupts run */
+       local_irq_enable();
+
+       while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
+               cpu_relax();
+
+       local_irq_disable();
+
+       /* Restore the real event handler */
+       global_clock_event->event_handler = real_handler;
+
+       /* Build delta t1-t2 as apic timer counts down */
+       delta = lapic_cal_t1 - lapic_cal_t2;
+       apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
+
+       deltatsc = (long)(lapic_cal_tsc2 - lapic_cal_tsc1);
+
+       /* we trust the PM based calibration if possible */
+       pm_referenced = !calibrate_by_pmtimer(lapic_cal_pm2 - lapic_cal_pm1,
+                                       &delta, &deltatsc);
+
+       /* Calculate the scaled math multiplication factor */
+       lapic_clockevent.mult = div_sc(delta, TICK_NSEC * LAPIC_CAL_LOOPS,
+                                      lapic_clockevent.shift);
+       lapic_clockevent.max_delta_ns =
+               clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+       lapic_clockevent.min_delta_ns =
+               clockevent_delta2ns(0xF, &lapic_clockevent);
+
+       calibration_result = (delta * APIC_DIVISOR) / LAPIC_CAL_LOOPS;
+
+       apic_printk(APIC_VERBOSE, "..... delta %ld\n", delta);
+       apic_printk(APIC_VERBOSE, "..... mult: %ld\n", lapic_clockevent.mult);
+       apic_printk(APIC_VERBOSE, "..... calibration result: %u\n",
+                   calibration_result);
+
+       if (cpu_has_tsc) {
+               apic_printk(APIC_VERBOSE, "..... CPU clock speed is "
+                           "%ld.%04ld MHz.\n",
+                           (deltatsc / LAPIC_CAL_LOOPS) / (1000000 / HZ),
+                           (deltatsc / LAPIC_CAL_LOOPS) % (1000000 / HZ));
+       }
+
+       apic_printk(APIC_VERBOSE, "..... host bus clock speed is "
+                   "%u.%04u MHz.\n",
+                   calibration_result / (1000000 / HZ),
+                   calibration_result % (1000000 / HZ));
+
+       /*
+        * Do a sanity check on the APIC calibration result
+        */
+       if (calibration_result < (1000000 / HZ)) {
+               local_irq_enable();
+               pr_warning("APIC frequency too slow, disabling apic timer\n");
+               return -1;
+       }
+
+       levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
+
+       /*
+        * PM timer calibration failed or not turned on
+        * so lets try APIC timer based calibration
+        */
+       if (!pm_referenced) {
+               apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
+
+               /*
+                * Setup the apic timer manually
+                */
+               levt->event_handler = lapic_cal_handler;
+               lapic_timer_setup(CLOCK_EVT_MODE_PERIODIC, levt);
+               lapic_cal_loops = -1;
+
+               /* Let the interrupts run */
+               local_irq_enable();
+
+               while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
+                       cpu_relax();
+
+               /* Stop the lapic timer */
+               lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, levt);
+
+               /* Jiffies delta */
+               deltaj = lapic_cal_j2 - lapic_cal_j1;
+               apic_printk(APIC_VERBOSE, "... jiffies delta = %lu\n", deltaj);
+
+               /* Check, if the jiffies result is consistent */
+               if (deltaj >= LAPIC_CAL_LOOPS-2 && deltaj <= LAPIC_CAL_LOOPS+2)
+                       apic_printk(APIC_VERBOSE, "... jiffies result ok\n");
+               else
+                       levt->features |= CLOCK_EVT_FEAT_DUMMY;
+       } else
+               local_irq_enable();
+
+       if (levt->features & CLOCK_EVT_FEAT_DUMMY) {
+               pr_warning("APIC timer disabled due to verification failure\n");
+                       return -1;
+       }
+
+       return 0;
+}
+
+/*
+ * Setup the boot APIC
+ *
+ * Calibrate and verify the result.
+ */
+void __init setup_boot_APIC_clock(void)
+{
+       /*
+        * The local apic timer can be disabled via the kernel
+        * commandline or from the CPU detection code. Register the lapic
+        * timer as a dummy clock event source on SMP systems, so the
+        * broadcast mechanism is used. On UP systems simply ignore it.
+        */
+       if (disable_apic_timer) {
+               pr_info("Disabling APIC timer\n");
+               /* No broadcast on UP ! */
+               if (num_possible_cpus() > 1) {
+                       lapic_clockevent.mult = 1;
+                       setup_APIC_timer();
+               }
+               return;
+       }
+
+       apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
+                   "calibrating APIC timer ...\n");
+
+       if (calibrate_APIC_clock()) {
+               /* No broadcast on UP ! */
+               if (num_possible_cpus() > 1)
+                       setup_APIC_timer();
+               return;
+       }
+
+       /*
+        * If nmi_watchdog is set to IO_APIC, we need the
+        * PIT/HPET going.  Otherwise register lapic as a dummy
+        * device.
+        */
+       if (nmi_watchdog != NMI_IO_APIC)
+               lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
+       else
+               pr_warning("APIC timer registered as dummy,"
+                       " due to nmi_watchdog=%d!\n", nmi_watchdog);
+
+       /* Setup the lapic or request the broadcast */
+       setup_APIC_timer();
+}
+
+void __cpuinit setup_secondary_APIC_clock(void)
+{
+       setup_APIC_timer();
+}
+
+/*
+ * The guts of the apic timer interrupt
+ */
+static void local_apic_timer_interrupt(void)
+{
+       int cpu = smp_processor_id();
+       struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
+
+       /*
+        * Normally we should not be here till LAPIC has been initialized but
+        * in some cases like kdump, its possible that there is a pending LAPIC
+        * timer interrupt from previous kernel's context and is delivered in
+        * new kernel the moment interrupts are enabled.
+        *
+        * Interrupts are enabled early and LAPIC is setup much later, hence
+        * its possible that when we get here evt->event_handler is NULL.
+        * Check for event_handler being NULL and discard the interrupt as
+        * spurious.
+        */
+       if (!evt->event_handler) {
+               pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
+               /* Switch it off */
+               lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
+               return;
+       }
+
+       /*
+        * the NMI deadlock-detector uses this.
+        */
+       inc_irq_stat(apic_timer_irqs);
+
+       evt->event_handler(evt);
+}
+
+/*
+ * Local APIC timer interrupt. This is the most natural way for doing
+ * local interrupts, but local timer interrupts can be emulated by
+ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
+ *
+ * [ if a single-CPU system runs an SMP kernel then we call the local
+ *   interrupt as well. Thus we cannot inline the local irq ... ]
+ */
+void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       /*
+        * NOTE! We'd better ACK the irq immediately,
+        * because timer handling can be slow.
+        */
+       ack_APIC_irq();
+       /*
+        * update_process_times() expects us to have done irq_enter().
+        * Besides, if we don't timer interrupts ignore the global
+        * interrupt lock, which is the WrongThing (tm) to do.
+        */
+       exit_idle();
+       irq_enter();
+       local_apic_timer_interrupt();
+       irq_exit();
+
+       set_irq_regs(old_regs);
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+       return -EINVAL;
+}
+
+/*
+ * Local APIC start and shutdown
+ */
+
+/**
+ * clear_local_APIC - shutdown the local APIC
+ *
+ * This is called, when a CPU is disabled and before rebooting, so the state of
+ * the local APIC has no dangling leftovers. Also used to cleanout any BIOS
+ * leftovers during boot.
+ */
+void clear_local_APIC(void)
+{
+       int maxlvt;
+       u32 v;
+
+       /* APIC hasn't been mapped yet */
+       if (!apic_phys)
+               return;
+
+       maxlvt = lapic_get_maxlvt();
+       /*
+        * Masking an LVT entry can trigger a local APIC error
+        * if the vector is zero. Mask LVTERR first to prevent this.
+        */
+       if (maxlvt >= 3) {
+               v = ERROR_APIC_VECTOR; /* any non-zero vector will do */
+               apic_write(APIC_LVTERR, v | APIC_LVT_MASKED);
+       }
+       /*
+        * Careful: we have to set masks only first to deassert
+        * any level-triggered sources.
+        */
+       v = apic_read(APIC_LVTT);
+       apic_write(APIC_LVTT, v | APIC_LVT_MASKED);
+       v = apic_read(APIC_LVT0);
+       apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
+       v = apic_read(APIC_LVT1);
+       apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
+       if (maxlvt >= 4) {
+               v = apic_read(APIC_LVTPC);
+               apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
+       }
+
+       /* lets not touch this if we didn't frob it */
+#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
+       if (maxlvt >= 5) {
+               v = apic_read(APIC_LVTTHMR);
+               apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED);
+       }
+#endif
+#ifdef CONFIG_X86_MCE_INTEL
+       if (maxlvt >= 6) {
+               v = apic_read(APIC_LVTCMCI);
+               if (!(v & APIC_LVT_MASKED))
+                       apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED);
+       }
+#endif
+
+       /*
+        * Clean APIC state for other OSs:
+        */
+       apic_write(APIC_LVTT, APIC_LVT_MASKED);
+       apic_write(APIC_LVT0, APIC_LVT_MASKED);
+       apic_write(APIC_LVT1, APIC_LVT_MASKED);
+       if (maxlvt >= 3)
+               apic_write(APIC_LVTERR, APIC_LVT_MASKED);
+       if (maxlvt >= 4)
+               apic_write(APIC_LVTPC, APIC_LVT_MASKED);
+
+       /* Integrated APIC (!82489DX) ? */
+       if (lapic_is_integrated()) {
+               if (maxlvt > 3)
+                       /* Clear ESR due to Pentium errata 3AP and 11AP */
+                       apic_write(APIC_ESR, 0);
+               apic_read(APIC_ESR);
+       }
+}
+
+/**
+ * disable_local_APIC - clear and disable the local APIC
+ */
+void disable_local_APIC(void)
+{
+       unsigned int value;
+
+       /* APIC hasn't been mapped yet */
+       if (!apic_phys)
+               return;
+
+       clear_local_APIC();
+
+       /*
+        * Disable APIC (implies clearing of registers
+        * for 82489DX!).
+        */
+       value = apic_read(APIC_SPIV);
+       value &= ~APIC_SPIV_APIC_ENABLED;
+       apic_write(APIC_SPIV, value);
+
+#ifdef CONFIG_X86_32
+       /*
+        * When LAPIC was disabled by the BIOS and enabled by the kernel,
+        * restore the disabled state.
+        */
+       if (enabled_via_apicbase) {
+               unsigned int l, h;
+
+               rdmsr(MSR_IA32_APICBASE, l, h);
+               l &= ~MSR_IA32_APICBASE_ENABLE;
+               wrmsr(MSR_IA32_APICBASE, l, h);
+       }
+#endif
+}
+
+/*
+ * If Linux enabled the LAPIC against the BIOS default disable it down before
+ * re-entering the BIOS on shutdown.  Otherwise the BIOS may get confused and
+ * not power-off.  Additionally clear all LVT entries before disable_local_APIC
+ * for the case where Linux didn't enable the LAPIC.
+ */
+void lapic_shutdown(void)
+{
+       unsigned long flags;
+
+       if (!cpu_has_apic)
+               return;
+
+       local_irq_save(flags);
+
+#ifdef CONFIG_X86_32
+       if (!enabled_via_apicbase)
+               clear_local_APIC();
+       else
+#endif
+               disable_local_APIC();
+
+
+       local_irq_restore(flags);
+}
+
+/*
+ * This is to verify that we're looking at a real local APIC.
+ * Check these against your board if the CPUs aren't getting
+ * started for no apparent reason.
+ */
+int __init verify_local_APIC(void)
+{
+       unsigned int reg0, reg1;
+
+       /*
+        * The version register is read-only in a real APIC.
+        */
+       reg0 = apic_read(APIC_LVR);
+       apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg0);
+       apic_write(APIC_LVR, reg0 ^ APIC_LVR_MASK);
+       reg1 = apic_read(APIC_LVR);
+       apic_printk(APIC_DEBUG, "Getting VERSION: %x\n", reg1);
+
+       /*
+        * The two version reads above should print the same
+        * numbers.  If the second one is different, then we
+        * poke at a non-APIC.
+        */
+       if (reg1 != reg0)
+               return 0;
+
+       /*
+        * Check if the version looks reasonably.
+        */
+       reg1 = GET_APIC_VERSION(reg0);
+       if (reg1 == 0x00 || reg1 == 0xff)
+               return 0;
+       reg1 = lapic_get_maxlvt();
+       if (reg1 < 0x02 || reg1 == 0xff)
+               return 0;
+
+       /*
+        * The ID register is read/write in a real APIC.
+        */
+       reg0 = apic_read(APIC_ID);
+       apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg0);
+       apic_write(APIC_ID, reg0 ^ apic->apic_id_mask);
+       reg1 = apic_read(APIC_ID);
+       apic_printk(APIC_DEBUG, "Getting ID: %x\n", reg1);
+       apic_write(APIC_ID, reg0);
+       if (reg1 != (reg0 ^ apic->apic_id_mask))
+               return 0;
+
+       /*
+        * The next two are just to see if we have sane values.
+        * They're only really relevant if we're in Virtual Wire
+        * compatibility mode, but most boxes are anymore.
+        */
+       reg0 = apic_read(APIC_LVT0);
+       apic_printk(APIC_DEBUG, "Getting LVT0: %x\n", reg0);
+       reg1 = apic_read(APIC_LVT1);
+       apic_printk(APIC_DEBUG, "Getting LVT1: %x\n", reg1);
+
+       return 1;
+}
+
+/**
+ * sync_Arb_IDs - synchronize APIC bus arbitration IDs
+ */
+void __init sync_Arb_IDs(void)
+{
+       /*
+        * Unsupported on P4 - see Intel Dev. Manual Vol. 3, Ch. 8.6.1 And not
+        * needed on AMD.
+        */
+       if (modern_apic() || boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+               return;
+
+       /*
+        * Wait for idle.
+        */
+       apic_wait_icr_idle();
+
+       apic_printk(APIC_DEBUG, "Synchronizing Arb IDs.\n");
+       apic_write(APIC_ICR, APIC_DEST_ALLINC |
+                       APIC_INT_LEVELTRIG | APIC_DM_INIT);
+}
+
+/*
+ * An initial setup of the virtual wire mode.
+ */
+void __init init_bsp_APIC(void)
+{
+       unsigned int value;
+
+       /*
+        * Don't do the setup now if we have a SMP BIOS as the
+        * through-I/O-APIC virtual wire mode might be active.
+        */
+       if (smp_found_config || !cpu_has_apic)
+               return;
+
+       /*
+        * Do not trust the local APIC being empty at bootup.
+        */
+       clear_local_APIC();
+
+       /*
+        * Enable APIC.
+        */
+       value = apic_read(APIC_SPIV);
+       value &= ~APIC_VECTOR_MASK;
+       value |= APIC_SPIV_APIC_ENABLED;
+
+#ifdef CONFIG_X86_32
+       /* This bit is reserved on P4/Xeon and should be cleared */
+       if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
+           (boot_cpu_data.x86 == 15))
+               value &= ~APIC_SPIV_FOCUS_DISABLED;
+       else
+#endif
+               value |= APIC_SPIV_FOCUS_DISABLED;
+       value |= SPURIOUS_APIC_VECTOR;
+       apic_write(APIC_SPIV, value);
+
+       /*
+        * Set up the virtual wire mode.
+        */
+       apic_write(APIC_LVT0, APIC_DM_EXTINT);
+       value = APIC_DM_NMI;
+       if (!lapic_is_integrated())             /* 82489DX */
+               value |= APIC_LVT_LEVEL_TRIGGER;
+       apic_write(APIC_LVT1, value);
+}
+
+static void __cpuinit lapic_setup_esr(void)
+{
+       unsigned int oldvalue, value, maxlvt;
+
+       if (!lapic_is_integrated()) {
+               pr_info("No ESR for 82489DX.\n");
+               return;
+       }
+
+       if (apic->disable_esr) {
+               /*
+                * Something untraceable is creating bad interrupts on
+                * secondary quads ... for the moment, just leave the
+                * ESR disabled - we can't do anything useful with the
+                * errors anyway - mbligh
+                */
+               pr_info("Leaving ESR disabled.\n");
+               return;
+       }
+
+       maxlvt = lapic_get_maxlvt();
+       if (maxlvt > 3)         /* Due to the Pentium erratum 3AP. */
+               apic_write(APIC_ESR, 0);
+       oldvalue = apic_read(APIC_ESR);
+
+       /* enables sending errors */
+       value = ERROR_APIC_VECTOR;
+       apic_write(APIC_LVTERR, value);
+
+       /*
+        * spec says clear errors after enabling vector.
+        */
+       if (maxlvt > 3)
+               apic_write(APIC_ESR, 0);
+       value = apic_read(APIC_ESR);
+       if (value != oldvalue)
+               apic_printk(APIC_VERBOSE, "ESR value before enabling "
+                       "vector: 0x%08x  after: 0x%08x\n",
+                       oldvalue, value);
+}
+
+
+/**
+ * setup_local_APIC - setup the local APIC
+ */
+void __cpuinit setup_local_APIC(void)
+{
+       unsigned int value;
+       int i, j;
+
+       if (disable_apic) {
+               arch_disable_smp_support();
+               return;
+       }
+
+#ifdef CONFIG_X86_32
+       /* Pound the ESR really hard over the head with a big hammer - mbligh */
+       if (lapic_is_integrated() && apic->disable_esr) {
+               apic_write(APIC_ESR, 0);
+               apic_write(APIC_ESR, 0);
+               apic_write(APIC_ESR, 0);
+               apic_write(APIC_ESR, 0);
+       }
+#endif
+
+       preempt_disable();
+
+       /*
+        * Double-check whether this APIC is really registered.
+        * This is meaningless in clustered apic mode, so we skip it.
+        */
+       if (!apic->apic_id_registered())
+               BUG();
+
+       /*
+        * Intel recommends to set DFR, LDR and TPR before enabling
+        * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+        * document number 292116).  So here it goes...
+        */
+       apic->init_apic_ldr();
+
+       /*
+        * Set Task Priority to 'accept all'. We never change this
+        * later on.
+        */
+       value = apic_read(APIC_TASKPRI);
+       value &= ~APIC_TPRI_MASK;
+       apic_write(APIC_TASKPRI, value);
+
+       /*
+        * After a crash, we no longer service the interrupts and a pending
+        * interrupt from previous kernel might still have ISR bit set.
+        *
+        * Most probably by now CPU has serviced that pending interrupt and
+        * it might not have done the ack_APIC_irq() because it thought,
+        * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
+        * does not clear the ISR bit and cpu thinks it has already serivced
+        * the interrupt. Hence a vector might get locked. It was noticed
+        * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
+        */
+       for (i = APIC_ISR_NR - 1; i >= 0; i--) {
+               value = apic_read(APIC_ISR + i*0x10);
+               for (j = 31; j >= 0; j--) {
+                       if (value & (1<<j))
+                               ack_APIC_irq();
+               }
+       }
+
+       /*
+        * Now that we are all set up, enable the APIC
+        */
+       value = apic_read(APIC_SPIV);
+       value &= ~APIC_VECTOR_MASK;
+       /*
+        * Enable APIC
+        */
+       value |= APIC_SPIV_APIC_ENABLED;
+
+#ifdef CONFIG_X86_32
+       /*
+        * Some unknown Intel IO/APIC (or APIC) errata is biting us with
+        * certain networking cards. If high frequency interrupts are
+        * happening on a particular IOAPIC pin, plus the IOAPIC routing
+        * entry is masked/unmasked at a high rate as well then sooner or
+        * later IOAPIC line gets 'stuck', no more interrupts are received
+        * from the device. If focus CPU is disabled then the hang goes
+        * away, oh well :-(
+        *
+        * [ This bug can be reproduced easily with a level-triggered
+        *   PCI Ne2000 networking cards and PII/PIII processors, dual
+        *   BX chipset. ]
+        */
+       /*
+        * Actually disabling the focus CPU check just makes the hang less
+        * frequent as it makes the interrupt distributon model be more
+        * like LRU than MRU (the short-term load is more even across CPUs).
+        * See also the comment in end_level_ioapic_irq().  --macro
+        */
+
+       /*
+        * - enable focus processor (bit==0)
+        * - 64bit mode always use processor focus
+        *   so no need to set it
+        */
+       value &= ~APIC_SPIV_FOCUS_DISABLED;
+#endif
+
+       /*
+        * Set spurious IRQ vector
+        */
+       value |= SPURIOUS_APIC_VECTOR;
+       apic_write(APIC_SPIV, value);
+
+       /*
+        * Set up LVT0, LVT1:
+        *
+        * set up through-local-APIC on the BP's LINT0. This is not
+        * strictly necessary in pure symmetric-IO mode, but sometimes
+        * we delegate interrupts to the 8259A.
+        */
+       /*
+        * TODO: set up through-local-APIC from through-I/O-APIC? --macro
+        */
+       value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
+       if (!smp_processor_id() && (pic_mode || !value)) {
+               value = APIC_DM_EXTINT;
+               apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
+                               smp_processor_id());
+       } else {
+               value = APIC_DM_EXTINT | APIC_LVT_MASKED;
+               apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
+                               smp_processor_id());
+       }
+       apic_write(APIC_LVT0, value);
+
+       /*
+        * only the BP should see the LINT1 NMI signal, obviously.
+        */
+       if (!smp_processor_id())
+               value = APIC_DM_NMI;
+       else
+               value = APIC_DM_NMI | APIC_LVT_MASKED;
+       if (!lapic_is_integrated())             /* 82489DX */
+               value |= APIC_LVT_LEVEL_TRIGGER;
+       apic_write(APIC_LVT1, value);
+
+       preempt_enable();
+
+#ifdef CONFIG_X86_MCE_INTEL
+       /* Recheck CMCI information after local APIC is up on CPU #0 */
+       if (smp_processor_id() == 0)
+               cmci_recheck();
+#endif
+}
+
+void __cpuinit end_local_APIC_setup(void)
+{
+       lapic_setup_esr();
+
+#ifdef CONFIG_X86_32
+       {
+               unsigned int value;
+               /* Disable the local apic timer */
+               value = apic_read(APIC_LVTT);
+               value |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+               apic_write(APIC_LVTT, value);
+       }
+#endif
+
+       setup_apic_nmi_watchdog(NULL);
+       apic_pm_activate();
+}
+
+#ifdef CONFIG_X86_X2APIC
+void check_x2apic(void)
+{
+       if (x2apic_enabled()) {
+               pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
+               x2apic_preenabled = x2apic = 1;
+       }
+}
+
+void enable_x2apic(void)
+{
+       int msr, msr2;
+
+       if (!x2apic)
+               return;
+
+       rdmsr(MSR_IA32_APICBASE, msr, msr2);
+       if (!(msr & X2APIC_ENABLE)) {
+               pr_info("Enabling x2apic\n");
+               wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
+       }
+}
+
+void __init enable_IR_x2apic(void)
+{
+#ifdef CONFIG_INTR_REMAP
+       int ret;
+       unsigned long flags;
+
+       if (!cpu_has_x2apic)
+               return;
+
+       if (!x2apic_preenabled && disable_x2apic) {
+               pr_info("Skipped enabling x2apic and Interrupt-remapping "
+                       "because of nox2apic\n");
+               return;
+       }
+
+       if (x2apic_preenabled && disable_x2apic)
+               panic("Bios already enabled x2apic, can't enforce nox2apic");
+
+       if (!x2apic_preenabled && skip_ioapic_setup) {
+               pr_info("Skipped enabling x2apic and Interrupt-remapping "
+                       "because of skipping io-apic setup\n");
+               return;
+       }
+
+       ret = dmar_table_init();
+       if (ret) {
+               pr_info("dmar_table_init() failed with %d:\n", ret);
+
+               if (x2apic_preenabled)
+                       panic("x2apic enabled by bios. But IR enabling failed");
+               else
+                       pr_info("Not enabling x2apic,Intr-remapping\n");
+               return;
+       }
+
+       local_irq_save(flags);
+       mask_8259A();
+
+       ret = save_mask_IO_APIC_setup();
+       if (ret) {
+               pr_info("Saving IO-APIC state failed: %d\n", ret);
+               goto end;
+       }
+
+       ret = enable_intr_remapping(1);
+
+       if (ret && x2apic_preenabled) {
+               local_irq_restore(flags);
+               panic("x2apic enabled by bios. But IR enabling failed");
+       }
+
+       if (ret)
+               goto end_restore;
+
+       if (!x2apic) {
+               x2apic = 1;
+               enable_x2apic();
+       }
+
+end_restore:
+       if (ret)
+               /*
+                * IR enabling failed
+                */
+               restore_IO_APIC_setup();
+       else
+               reinit_intr_remapped_IO_APIC(x2apic_preenabled);
+
+end:
+       unmask_8259A();
+       local_irq_restore(flags);
+
+       if (!ret) {
+               if (!x2apic_preenabled)
+                       pr_info("Enabled x2apic and interrupt-remapping\n");
+               else
+                       pr_info("Enabled Interrupt-remapping\n");
+       } else
+               pr_err("Failed to enable Interrupt-remapping and x2apic\n");
+#else
+       if (!cpu_has_x2apic)
+               return;
+
+       if (x2apic_preenabled)
+               panic("x2apic enabled prior OS handover,"
+                     " enable CONFIG_INTR_REMAP");
+
+       pr_info("Enable CONFIG_INTR_REMAP for enabling intr-remapping "
+               " and x2apic\n");
+#endif
+
+       return;
+}
+#endif /* CONFIG_X86_X2APIC */
+
+#ifdef CONFIG_X86_64
+/*
+ * Detect and enable local APICs on non-SMP boards.
+ * Original code written by Keir Fraser.
+ * On AMD64 we trust the BIOS - if it says no APIC it is likely
+ * not correctly set up (usually the APIC timer won't work etc.)
+ */
+static int __init detect_init_APIC(void)
+{
+       if (!cpu_has_apic) {
+               pr_info("No local APIC present\n");
+               return -1;
+       }
+
+       mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+       boot_cpu_physical_apicid = 0;
+       return 0;
+}
+#else
+/*
+ * Detect and initialize APIC
+ */
+static int __init detect_init_APIC(void)
+{
+       u32 h, l, features;
+
+       /* Disabled by kernel option? */
+       if (disable_apic)
+               return -1;
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model > 1) ||
+                   (boot_cpu_data.x86 >= 15))
+                       break;
+               goto no_apic;
+       case X86_VENDOR_INTEL:
+               if (boot_cpu_data.x86 == 6 || boot_cpu_data.x86 == 15 ||
+                   (boot_cpu_data.x86 == 5 && cpu_has_apic))
+                       break;
+               goto no_apic;
+       default:
+               goto no_apic;
+       }
+
+       if (!cpu_has_apic) {
+               /*
+                * Over-ride BIOS and try to enable the local APIC only if
+                * "lapic" specified.
+                */
+               if (!force_enable_local_apic) {
+                       pr_info("Local APIC disabled by BIOS -- "
+                               "you can enable it with \"lapic\"\n");
+                       return -1;
+               }
+               /*
+                * Some BIOSes disable the local APIC in the APIC_BASE
+                * MSR. This can only be done in software for Intel P6 or later
+                * and AMD K7 (Model > 1) or later.
+                */
+               rdmsr(MSR_IA32_APICBASE, l, h);
+               if (!(l & MSR_IA32_APICBASE_ENABLE)) {
+                       pr_info("Local APIC disabled by BIOS -- reenabling.\n");
+                       l &= ~MSR_IA32_APICBASE_BASE;
+                       l |= MSR_IA32_APICBASE_ENABLE | APIC_DEFAULT_PHYS_BASE;
+                       wrmsr(MSR_IA32_APICBASE, l, h);
+                       enabled_via_apicbase = 1;
+               }
+       }
+       /*
+        * The APIC feature bit should now be enabled
+        * in `cpuid'
+        */
+       features = cpuid_edx(1);
+       if (!(features & (1 << X86_FEATURE_APIC))) {
+               pr_warning("Could not enable APIC!\n");
+               return -1;
+       }
+       set_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
+       mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
+
+       /* The BIOS may have set up the APIC at some other address */
+       rdmsr(MSR_IA32_APICBASE, l, h);
+       if (l & MSR_IA32_APICBASE_ENABLE)
+               mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
+
+       pr_info("Found and enabled local APIC!\n");
+
+       apic_pm_activate();
+
+       return 0;
+
+no_apic:
+       pr_info("No local APIC present or hardware disabled\n");
+       return -1;
+}
+#endif
+
+#ifdef CONFIG_X86_64
+void __init early_init_lapic_mapping(void)
+{
+       unsigned long phys_addr;
+
+       /*
+        * If no local APIC can be found then go out
+        * : it means there is no mpatable and MADT
+        */
+       if (!smp_found_config)
+               return;
+
+       phys_addr = mp_lapic_addr;
+
+       set_fixmap_nocache(FIX_APIC_BASE, phys_addr);
+       apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
+                   APIC_BASE, phys_addr);
+
+       /*
+        * Fetch the APIC ID of the BSP in case we have a
+        * default configuration (or the MP table is broken).
+        */
+       boot_cpu_physical_apicid = read_apic_id();
+}
+#endif
+
+/**
+ * init_apic_mappings - initialize APIC mappings
+ */
+void __init init_apic_mappings(void)
+{
+#ifdef CONFIG_X86_X2APIC
+       if (x2apic) {
+               boot_cpu_physical_apicid = read_apic_id();
+               return;
+       }
+#endif
+
+       /*
+        * If no local APIC can be found then set up a fake all
+        * zeroes page to simulate the local APIC and another
+        * one for the IO-APIC.
+        */
+       if (!smp_found_config && detect_init_APIC()) {
+               apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+               apic_phys = __pa(apic_phys);
+       } else
+               apic_phys = mp_lapic_addr;
+
+       set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
+       apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
+                               APIC_BASE, apic_phys);
+
+       /*
+        * Fetch the APIC ID of the BSP in case we have a
+        * default configuration (or the MP table is broken).
+        */
+       if (boot_cpu_physical_apicid == -1U)
+               boot_cpu_physical_apicid = read_apic_id();
+}
+
+/*
+ * This initializes the IO-APIC and APIC hardware if this is
+ * a UP kernel.
+ */
+int apic_version[MAX_APICS];
+
+int __init APIC_init_uniprocessor(void)
+{
+       if (disable_apic) {
+               pr_info("Apic disabled\n");
+               return -1;
+       }
+#ifdef CONFIG_X86_64
+       if (!cpu_has_apic) {
+               disable_apic = 1;
+               pr_info("Apic disabled by BIOS\n");
+               return -1;
+       }
+#else
+       if (!smp_found_config && !cpu_has_apic)
+               return -1;
+
+       /*
+        * Complain if the BIOS pretends there is one.
+        */
+       if (!cpu_has_apic &&
+           APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
+               pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
+                       boot_cpu_physical_apicid);
+               clear_cpu_cap(&boot_cpu_data, X86_FEATURE_APIC);
+               return -1;
+       }
+#endif
+
+       enable_IR_x2apic();
+#ifdef CONFIG_X86_64
+       default_setup_apic_routing();
+#endif
+
+       verify_local_APIC();
+       connect_bsp_APIC();
+
+#ifdef CONFIG_X86_64
+       apic_write(APIC_ID, SET_APIC_ID(boot_cpu_physical_apicid));
+#else
+       /*
+        * Hack: In case of kdump, after a crash, kernel might be booting
+        * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid
+        * might be zero if read from MP tables. Get it from LAPIC.
+        */
+# ifdef CONFIG_CRASH_DUMP
+       boot_cpu_physical_apicid = read_apic_id();
+# endif
+#endif
+       physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
+       setup_local_APIC();
+
+#ifdef CONFIG_X86_IO_APIC
+       /*
+        * Now enable IO-APICs, actually call clear_IO_APIC
+        * We need clear_IO_APIC before enabling error vector
+        */
+       if (!skip_ioapic_setup && nr_ioapics)
+               enable_IO_APIC();
+#endif
+
+       end_local_APIC_setup();
+
+#ifdef CONFIG_X86_IO_APIC
+       if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
+               setup_IO_APIC();
+       else {
+               nr_ioapics = 0;
+               localise_nmi_watchdog();
+       }
+#else
+       localise_nmi_watchdog();
+#endif
+
+       setup_boot_clock();
+#ifdef CONFIG_X86_64
+       check_nmi_watchdog();
+#endif
+
+       return 0;
+}
+
+/*
+ * Local APIC interrupts
+ */
+
+/*
+ * This interrupt should _never_ happen with our APIC/SMP architecture
+ */
+void smp_spurious_interrupt(struct pt_regs *regs)
+{
+       u32 v;
+
+       exit_idle();
+       irq_enter();
+       /*
+        * Check if this really is a spurious interrupt and ACK it
+        * if it is a vectored one.  Just in case...
+        * Spurious interrupts should not be ACKed.
+        */
+       v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
+       if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
+               ack_APIC_irq();
+
+       inc_irq_stat(irq_spurious_count);
+
+       /* see sw-dev-man vol 3, chapter 7.4.13.5 */
+       pr_info("spurious APIC interrupt on CPU#%d, "
+               "should never happen.\n", smp_processor_id());
+       irq_exit();
+}
+
+/*
+ * This interrupt should never happen with our APIC/SMP architecture
+ */
+void smp_error_interrupt(struct pt_regs *regs)
+{
+       u32 v, v1;
+
+       exit_idle();
+       irq_enter();
+       /* First tickle the hardware, only then report what went on. -- REW */
+       v = apic_read(APIC_ESR);
+       apic_write(APIC_ESR, 0);
+       v1 = apic_read(APIC_ESR);
+       ack_APIC_irq();
+       atomic_inc(&irq_err_count);
+
+       /*
+        * Here is what the APIC error bits mean:
+        * 0: Send CS error
+        * 1: Receive CS error
+        * 2: Send accept error
+        * 3: Receive accept error
+        * 4: Reserved
+        * 5: Send illegal vector
+        * 6: Received illegal vector
+        * 7: Illegal register address
+        */
+       pr_debug("APIC error on CPU%d: %02x(%02x)\n",
+               smp_processor_id(), v , v1);
+       irq_exit();
+}
+
+/**
+ * connect_bsp_APIC - attach the APIC to the interrupt system
+ */
+void __init connect_bsp_APIC(void)
+{
+#ifdef CONFIG_X86_32
+       if (pic_mode) {
+               /*
+                * Do not trust the local APIC being empty at bootup.
+                */
+               clear_local_APIC();
+               /*
+                * PIC mode, enable APIC mode in the IMCR, i.e.  connect BSP's
+                * local APIC to INT and NMI lines.
+                */
+               apic_printk(APIC_VERBOSE, "leaving PIC mode, "
+                               "enabling APIC mode.\n");
+               outb(0x70, 0x22);
+               outb(0x01, 0x23);
+       }
+#endif
+       if (apic->enable_apic_mode)
+               apic->enable_apic_mode();
+}
+
+/**
+ * disconnect_bsp_APIC - detach the APIC from the interrupt system
+ * @virt_wire_setup:   indicates, whether virtual wire mode is selected
+ *
+ * Virtual wire mode is necessary to deliver legacy interrupts even when the
+ * APIC is disabled.
+ */
+void disconnect_bsp_APIC(int virt_wire_setup)
+{
+       unsigned int value;
+
+#ifdef CONFIG_X86_32
+       if (pic_mode) {
+               /*
+                * Put the board back into PIC mode (has an effect only on
+                * certain older boards).  Note that APIC interrupts, including
+                * IPIs, won't work beyond this point!  The only exception are
+                * INIT IPIs.
+                */
+               apic_printk(APIC_VERBOSE, "disabling APIC mode, "
+                               "entering PIC mode.\n");
+               outb(0x70, 0x22);
+               outb(0x00, 0x23);
+               return;
+       }
+#endif
+
+       /* Go back to Virtual Wire compatibility mode */
+
+       /* For the spurious interrupt use vector F, and enable it */
+       value = apic_read(APIC_SPIV);
+       value &= ~APIC_VECTOR_MASK;
+       value |= APIC_SPIV_APIC_ENABLED;
+       value |= 0xf;
+       apic_write(APIC_SPIV, value);
+
+       if (!virt_wire_setup) {
+               /*
+                * For LVT0 make it edge triggered, active high,
+                * external and enabled
+                */
+               value = apic_read(APIC_LVT0);
+               value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
+                       APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
+                       APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
+               value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
+               value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_EXTINT);
+               apic_write(APIC_LVT0, value);
+       } else {
+               /* Disable LVT0 */
+               apic_write(APIC_LVT0, APIC_LVT_MASKED);
+       }
+
+       /*
+        * For LVT1 make it edge triggered, active high,
+        * nmi and enabled
+        */
+       value = apic_read(APIC_LVT1);
+       value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
+                       APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
+                       APIC_LVT_LEVEL_TRIGGER | APIC_LVT_MASKED);
+       value |= APIC_LVT_REMOTE_IRR | APIC_SEND_PENDING;
+       value = SET_APIC_DELIVERY_MODE(value, APIC_MODE_NMI);
+       apic_write(APIC_LVT1, value);
+}
+
+void __cpuinit generic_processor_info(int apicid, int version)
+{
+       int cpu;
+
+       /*
+        * Validate version
+        */
+       if (version == 0x0) {
+               pr_warning("BIOS bug, APIC version is 0 for CPU#%d! "
+                          "fixing up to 0x10. (tell your hw vendor)\n",
+                               version);
+               version = 0x10;
+       }
+       apic_version[apicid] = version;
+
+       if (num_processors >= nr_cpu_ids) {
+               int max = nr_cpu_ids;
+               int thiscpu = max + disabled_cpus;
+
+               pr_warning(
+                       "ACPI: NR_CPUS/possible_cpus limit of %i reached."
+                       "  Processor %d/0x%x ignored.\n", max, thiscpu, apicid);
+
+               disabled_cpus++;
+               return;
+       }
+
+       num_processors++;
+       cpu = cpumask_next_zero(-1, cpu_present_mask);
+
+       if (version != apic_version[boot_cpu_physical_apicid])
+               WARN_ONCE(1,
+                       "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n",
+                       apic_version[boot_cpu_physical_apicid], cpu, version);
+
+       physid_set(apicid, phys_cpu_present_map);
+       if (apicid == boot_cpu_physical_apicid) {
+               /*
+                * x86_bios_cpu_apicid is required to have processors listed
+                * in same order as logical cpu numbers. Hence the first
+                * entry is BSP, and so on.
+                */
+               cpu = 0;
+       }
+       if (apicid > max_physical_apicid)
+               max_physical_apicid = apicid;
+
+#ifdef CONFIG_X86_32
+       /*
+        * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
+        * but we need to work other dependencies like SMP_SUSPEND etc
+        * before this can be done without some confusion.
+        * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
+        *       - Ashok Raj <ashok.raj@intel.com>
+        */
+       if (max_physical_apicid >= 8) {
+               switch (boot_cpu_data.x86_vendor) {
+               case X86_VENDOR_INTEL:
+                       if (!APIC_XAPIC(version)) {
+                               def_to_bigsmp = 0;
+                               break;
+                       }
+                       /* If P4 and above fall through */
+               case X86_VENDOR_AMD:
+                       def_to_bigsmp = 1;
+               }
+       }
+#endif
+
+#if defined(CONFIG_SMP) || defined(CONFIG_X86_64)
+       early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
+       early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
+#endif
+
+       set_cpu_possible(cpu, true);
+       set_cpu_present(cpu, true);
+}
+
+int hard_smp_processor_id(void)
+{
+       return read_apic_id();
+}
+
+void default_init_apic_ldr(void)
+{
+       unsigned long val;
+
+       apic_write(APIC_DFR, APIC_DFR_VALUE);
+       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+       val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
+       apic_write(APIC_LDR, val);
+}
+
+#ifdef CONFIG_X86_32
+int default_apicid_to_node(int logical_apicid)
+{
+#ifdef CONFIG_SMP
+       return apicid_2_node[hard_smp_processor_id()];
+#else
+       return 0;
+#endif
+}
+#endif
+
+/*
+ * Power management
+ */
+#ifdef CONFIG_PM
+
+static struct {
+       /*
+        * 'active' is true if the local APIC was enabled by us and
+        * not the BIOS; this signifies that we are also responsible
+        * for disabling it before entering apm/acpi suspend
+        */
+       int active;
+       /* r/w apic fields */
+       unsigned int apic_id;
+       unsigned int apic_taskpri;
+       unsigned int apic_ldr;
+       unsigned int apic_dfr;
+       unsigned int apic_spiv;
+       unsigned int apic_lvtt;
+       unsigned int apic_lvtpc;
+       unsigned int apic_lvt0;
+       unsigned int apic_lvt1;
+       unsigned int apic_lvterr;
+       unsigned int apic_tmict;
+       unsigned int apic_tdcr;
+       unsigned int apic_thmr;
+} apic_pm_state;
+
+static int lapic_suspend(struct sys_device *dev, pm_message_t state)
+{
+       unsigned long flags;
+       int maxlvt;
+
+       if (!apic_pm_state.active)
+               return 0;
+
+       maxlvt = lapic_get_maxlvt();
+
+       apic_pm_state.apic_id = apic_read(APIC_ID);
+       apic_pm_state.apic_taskpri = apic_read(APIC_TASKPRI);
+       apic_pm_state.apic_ldr = apic_read(APIC_LDR);
+       apic_pm_state.apic_dfr = apic_read(APIC_DFR);
+       apic_pm_state.apic_spiv = apic_read(APIC_SPIV);
+       apic_pm_state.apic_lvtt = apic_read(APIC_LVTT);
+       if (maxlvt >= 4)
+               apic_pm_state.apic_lvtpc = apic_read(APIC_LVTPC);
+       apic_pm_state.apic_lvt0 = apic_read(APIC_LVT0);
+       apic_pm_state.apic_lvt1 = apic_read(APIC_LVT1);
+       apic_pm_state.apic_lvterr = apic_read(APIC_LVTERR);
+       apic_pm_state.apic_tmict = apic_read(APIC_TMICT);
+       apic_pm_state.apic_tdcr = apic_read(APIC_TDCR);
+#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
+       if (maxlvt >= 5)
+               apic_pm_state.apic_thmr = apic_read(APIC_LVTTHMR);
+#endif
+
+       local_irq_save(flags);
+       disable_local_APIC();
+       local_irq_restore(flags);
+       return 0;
+}
+
+static int lapic_resume(struct sys_device *dev)
+{
+       unsigned int l, h;
+       unsigned long flags;
+       int maxlvt;
+
+       if (!apic_pm_state.active)
+               return 0;
+
+       maxlvt = lapic_get_maxlvt();
+
+       local_irq_save(flags);
+
+#ifdef CONFIG_X86_X2APIC
+       if (x2apic)
+               enable_x2apic();
+       else
+#endif
+       {
+               /*
+                * Make sure the APICBASE points to the right address
+                *
+                * FIXME! This will be wrong if we ever support suspend on
+                * SMP! We'll need to do this as part of the CPU restore!
+                */
+               rdmsr(MSR_IA32_APICBASE, l, h);
+               l &= ~MSR_IA32_APICBASE_BASE;
+               l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
+               wrmsr(MSR_IA32_APICBASE, l, h);
+       }
+
+       apic_write(APIC_LVTERR, ERROR_APIC_VECTOR | APIC_LVT_MASKED);
+       apic_write(APIC_ID, apic_pm_state.apic_id);
+       apic_write(APIC_DFR, apic_pm_state.apic_dfr);
+       apic_write(APIC_LDR, apic_pm_state.apic_ldr);
+       apic_write(APIC_TASKPRI, apic_pm_state.apic_taskpri);
+       apic_write(APIC_SPIV, apic_pm_state.apic_spiv);
+       apic_write(APIC_LVT0, apic_pm_state.apic_lvt0);
+       apic_write(APIC_LVT1, apic_pm_state.apic_lvt1);
+#if defined(CONFIG_X86_MCE_P4THERMAL) || defined(CONFIG_X86_MCE_INTEL)
+       if (maxlvt >= 5)
+               apic_write(APIC_LVTTHMR, apic_pm_state.apic_thmr);
+#endif
+       if (maxlvt >= 4)
+               apic_write(APIC_LVTPC, apic_pm_state.apic_lvtpc);
+       apic_write(APIC_LVTT, apic_pm_state.apic_lvtt);
+       apic_write(APIC_TDCR, apic_pm_state.apic_tdcr);
+       apic_write(APIC_TMICT, apic_pm_state.apic_tmict);
+       apic_write(APIC_ESR, 0);
+       apic_read(APIC_ESR);
+       apic_write(APIC_LVTERR, apic_pm_state.apic_lvterr);
+       apic_write(APIC_ESR, 0);
+       apic_read(APIC_ESR);
+
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+/*
+ * This device has no shutdown method - fully functioning local APICs
+ * are needed on every CPU up until machine_halt/restart/poweroff.
+ */
+
+static struct sysdev_class lapic_sysclass = {
+       .name           = "lapic",
+       .resume         = lapic_resume,
+       .suspend        = lapic_suspend,
+};
+
+static struct sys_device device_lapic = {
+       .id     = 0,
+       .cls    = &lapic_sysclass,
+};
+
+static void __cpuinit apic_pm_activate(void)
+{
+       apic_pm_state.active = 1;
+}
+
+static int __init init_lapic_sysfs(void)
+{
+       int error;
+
+       if (!cpu_has_apic)
+               return 0;
+       /* XXX: remove suspend/resume procs if !apic_pm_state.active? */
+
+       error = sysdev_class_register(&lapic_sysclass);
+       if (!error)
+               error = sysdev_register(&device_lapic);
+       return error;
+}
+device_initcall(init_lapic_sysfs);
+
+#else  /* CONFIG_PM */
+
+static void apic_pm_activate(void) { }
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_X86_64
+/*
+ * apic_is_clustered_box() -- Check if we can expect good TSC
+ *
+ * Thus far, the major user of this is IBM's Summit2 series:
+ *
+ * Clustered boxes may have unsynced TSC problems if they are
+ * multi-chassis. Use available data to take a good guess.
+ * If in doubt, go HPET.
+ */
+__cpuinit int apic_is_clustered_box(void)
+{
+       int i, clusters, zeros;
+       unsigned id;
+       u16 *bios_cpu_apicid;
+       DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
+
+       /*
+        * there is not this kind of box with AMD CPU yet.
+        * Some AMD box with quadcore cpu and 8 sockets apicid
+        * will be [4, 0x23] or [8, 0x27] could be thought to
+        * vsmp box still need checking...
+        */
+       if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
+               return 0;
+
+       bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
+       bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
+
+       for (i = 0; i < nr_cpu_ids; i++) {
+               /* are we being called early in kernel startup? */
+               if (bios_cpu_apicid) {
+                       id = bios_cpu_apicid[i];
+               } else if (i < nr_cpu_ids) {
+                       if (cpu_present(i))
+                               id = per_cpu(x86_bios_cpu_apicid, i);
+                       else
+                               continue;
+               } else
+                       break;
+
+               if (id != BAD_APICID)
+                       __set_bit(APIC_CLUSTERID(id), clustermap);
+       }
+
+       /* Problem:  Partially populated chassis may not have CPUs in some of
+        * the APIC clusters they have been allocated.  Only present CPUs have
+        * x86_bios_cpu_apicid entries, thus causing zeroes in the bitmap.
+        * Since clusters are allocated sequentially, count zeros only if
+        * they are bounded by ones.
+        */
+       clusters = 0;
+       zeros = 0;
+       for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
+               if (test_bit(i, clustermap)) {
+                       clusters += 1 + zeros;
+                       zeros = 0;
+               } else
+                       ++zeros;
+       }
+
+       /* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
+        * not guaranteed to be synced between boards
+        */
+       if (is_vsmp_box() && clusters > 1)
+               return 1;
+
+       /*
+        * If clusters > 2, then should be multi-chassis.
+        * May have to revisit this when multi-core + hyperthreaded CPUs come
+        * out, but AFAIK this will work even for them.
+        */
+       return (clusters > 2);
+}
+#endif
+
+/*
+ * APIC command line parameters
+ */
+static int __init setup_disableapic(char *arg)
+{
+       disable_apic = 1;
+       setup_clear_cpu_cap(X86_FEATURE_APIC);
+       return 0;
+}
+early_param("disableapic", setup_disableapic);
+
+/* same as disableapic, for compatibility */
+static int __init setup_nolapic(char *arg)
+{
+       return setup_disableapic(arg);
+}
+early_param("nolapic", setup_nolapic);
+
+static int __init parse_lapic_timer_c2_ok(char *arg)
+{
+       local_apic_timer_c2_ok = 1;
+       return 0;
+}
+early_param("lapic_timer_c2_ok", parse_lapic_timer_c2_ok);
+
+static int __init parse_disable_apic_timer(char *arg)
+{
+       disable_apic_timer = 1;
+       return 0;
+}
+early_param("noapictimer", parse_disable_apic_timer);
+
+static int __init parse_nolapic_timer(char *arg)
+{
+       disable_apic_timer = 1;
+       return 0;
+}
+early_param("nolapic_timer", parse_nolapic_timer);
+
+static int __init apic_set_verbosity(char *arg)
+{
+       if (!arg)  {
+#ifdef CONFIG_X86_64
+               skip_ioapic_setup = 0;
+               return 0;
+#endif
+               return -EINVAL;
+       }
+
+       if (strcmp("debug", arg) == 0)
+               apic_verbosity = APIC_DEBUG;
+       else if (strcmp("verbose", arg) == 0)
+               apic_verbosity = APIC_VERBOSE;
+       else {
+               pr_warning("APIC Verbosity level %s not recognised"
+                       " use apic=verbose or apic=debug\n", arg);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+early_param("apic", apic_set_verbosity);
+
+static int __init lapic_insert_resource(void)
+{
+       if (!apic_phys)
+               return -1;
+
+       /* Put local APIC into the resource map. */
+       lapic_resource.start = apic_phys;
+       lapic_resource.end = lapic_resource.start + PAGE_SIZE - 1;
+       insert_resource(&iomem_resource, &lapic_resource);
+
+       return 0;
+}
+
+/*
+ * need call insert after e820_reserve_resources()
+ * that is using request_resource
+ */
+late_initcall(lapic_insert_resource);
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
new file mode 100644 (file)
index 0000000..f933822
--- /dev/null
@@ -0,0 +1,387 @@
+/*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+ *
+ * Flat APIC subarch code.
+ *
+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
+ * James Cleverdon.
+ */
+#include <linux/errno.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/hardirq.h>
+#include <asm/smp.h>
+#include <asm/apic.h>
+#include <asm/ipi.h>
+
+#ifdef CONFIG_ACPI
+#include <acpi/acpi_bus.h>
+#endif
+
+static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       return 1;
+}
+
+static const struct cpumask *flat_target_cpus(void)
+{
+       return cpu_online_mask;
+}
+
+static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
+{
+       /* Careful. Some cpus do not strictly honor the set of cpus
+        * specified in the interrupt destination when using lowest
+        * priority interrupt delivery mode.
+        *
+        * In particular there was a hyperthreading cpu observed to
+        * deliver interrupts to the wrong hyperthread when only one
+        * hyperthread was specified in the interrupt desitination.
+        */
+       cpumask_clear(retmask);
+       cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+static void flat_init_apic_ldr(void)
+{
+       unsigned long val;
+       unsigned long num, id;
+
+       num = smp_processor_id();
+       id = 1UL << num;
+       apic_write(APIC_DFR, APIC_DFR_FLAT);
+       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+       val |= SET_APIC_LOGICAL_ID(id);
+       apic_write(APIC_LDR, val);
+}
+
+static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
+       local_irq_restore(flags);
+}
+
+static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
+{
+       unsigned long mask = cpumask_bits(cpumask)[0];
+
+       _flat_send_IPI_mask(mask, vector);
+}
+
+static void
+ flat_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector)
+{
+       unsigned long mask = cpumask_bits(cpumask)[0];
+       int cpu = smp_processor_id();
+
+       if (cpu < BITS_PER_LONG)
+               clear_bit(cpu, &mask);
+
+       _flat_send_IPI_mask(mask, vector);
+}
+
+static void flat_send_IPI_allbutself(int vector)
+{
+       int cpu = smp_processor_id();
+#ifdef CONFIG_HOTPLUG_CPU
+       int hotplug = 1;
+#else
+       int hotplug = 0;
+#endif
+       if (hotplug || vector == NMI_VECTOR) {
+               if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
+                       unsigned long mask = cpumask_bits(cpu_online_mask)[0];
+
+                       if (cpu < BITS_PER_LONG)
+                               clear_bit(cpu, &mask);
+
+                       _flat_send_IPI_mask(mask, vector);
+               }
+       } else if (num_online_cpus() > 1) {
+               __default_send_IPI_shortcut(APIC_DEST_ALLBUT,
+                                           vector, apic->dest_logical);
+       }
+}
+
+static void flat_send_IPI_all(int vector)
+{
+       if (vector == NMI_VECTOR) {
+               flat_send_IPI_mask(cpu_online_mask, vector);
+       } else {
+               __default_send_IPI_shortcut(APIC_DEST_ALLINC,
+                                           vector, apic->dest_logical);
+       }
+}
+
+static unsigned int flat_get_apic_id(unsigned long x)
+{
+       unsigned int id;
+
+       id = (((x)>>24) & 0xFFu);
+
+       return id;
+}
+
+static unsigned long set_apic_id(unsigned int id)
+{
+       unsigned long x;
+
+       x = ((id & 0xFFu)<<24);
+       return x;
+}
+
+static unsigned int read_xapic_id(void)
+{
+       unsigned int id;
+
+       id = flat_get_apic_id(apic_read(APIC_ID));
+       return id;
+}
+
+static int flat_apic_id_registered(void)
+{
+       return physid_isset(read_xapic_id(), phys_cpu_present_map);
+}
+
+static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+       return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
+}
+
+static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                                               const struct cpumask *andmask)
+{
+       unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
+       unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
+
+       return mask1 & mask2;
+}
+
+static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
+{
+       return hard_smp_processor_id() >> index_msb;
+}
+
+struct apic apic_flat =  {
+       .name                           = "flat",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = flat_acpi_madt_oem_check,
+       .apic_id_registered             = flat_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       .irq_dest_mode                  = 1, /* logical */
+
+       .target_cpus                    = flat_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = flat_vector_allocation_domain,
+       .init_apic_ldr                  = flat_init_apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = flat_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = flat_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFu << 24,
+
+       .cpu_mask_to_apicid             = flat_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = flat_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = flat_send_IPI_mask,
+       .send_IPI_mask_allbutself       = flat_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = flat_send_IPI_allbutself,
+       .send_IPI_all                   = flat_send_IPI_all,
+       .send_IPI_self                  = apic_send_IPI_self,
+
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .inquire_remote_apic            = NULL,
+
+       .read                           = native_apic_mem_read,
+       .write                          = native_apic_mem_write,
+       .icr_read                       = native_apic_icr_read,
+       .icr_write                      = native_apic_icr_write,
+       .wait_icr_idle                  = native_apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+};
+
+/*
+ * Physflat mode is used when there are more than 8 CPUs on a AMD system.
+ * We cannot use logical delivery in this case because the mask
+ * overflows, so use physical mode.
+ */
+static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+#ifdef CONFIG_ACPI
+       /*
+        * Quirk: some x86_64 machines can only use physical APIC mode
+        * regardless of how many processors are present (x86_64 ES7000
+        * is an example).
+        */
+       if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
+               (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
+               printk(KERN_DEBUG "system APIC only can use physical flat");
+               return 1;
+       }
+#endif
+
+       return 0;
+}
+
+static const struct cpumask *physflat_target_cpus(void)
+{
+       return cpu_online_mask;
+}
+
+static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
+{
+       cpumask_clear(retmask);
+       cpumask_set_cpu(cpu, retmask);
+}
+
+static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
+{
+       default_send_IPI_mask_sequence_phys(cpumask, vector);
+}
+
+static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
+                                             int vector)
+{
+       default_send_IPI_mask_allbutself_phys(cpumask, vector);
+}
+
+static void physflat_send_IPI_allbutself(int vector)
+{
+       default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
+}
+
+static void physflat_send_IPI_all(int vector)
+{
+       physflat_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       cpu = cpumask_first(cpumask);
+       if ((unsigned)cpu < nr_cpu_ids)
+               return per_cpu(x86_cpu_to_apicid, cpu);
+       else
+               return BAD_APICID;
+}
+
+static unsigned int
+physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                               const struct cpumask *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       for_each_cpu_and(cpu, cpumask, andmask) {
+               if (cpumask_test_cpu(cpu, cpu_online_mask))
+                       break;
+       }
+       if (cpu < nr_cpu_ids)
+               return per_cpu(x86_cpu_to_apicid, cpu);
+
+       return BAD_APICID;
+}
+
+struct apic apic_physflat =  {
+
+       .name                           = "physical flat",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = physflat_acpi_madt_oem_check,
+       .apic_id_registered             = flat_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       .irq_dest_mode                  = 0, /* physical */
+
+       .target_cpus                    = physflat_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = 0,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = physflat_vector_allocation_domain,
+       /* not needed, but shouldn't hurt: */
+       .init_apic_ldr                  = flat_init_apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = flat_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = flat_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFu << 24,
+
+       .cpu_mask_to_apicid             = physflat_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = physflat_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = physflat_send_IPI_mask,
+       .send_IPI_mask_allbutself       = physflat_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = physflat_send_IPI_allbutself,
+       .send_IPI_all                   = physflat_send_IPI_all,
+       .send_IPI_self                  = apic_send_IPI_self,
+
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .inquire_remote_apic            = NULL,
+
+       .read                           = native_apic_mem_read,
+       .write                          = native_apic_mem_write,
+       .icr_read                       = native_apic_icr_read,
+       .icr_write                      = native_apic_icr_write,
+       .wait_icr_idle                  = native_apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+};
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
new file mode 100644 (file)
index 0000000..d806eca
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
+ *
+ * Drives the local APIC in "clustered mode".
+ */
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/dmi.h>
+#include <linux/smp.h>
+
+#include <asm/apicdef.h>
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
+#include <asm/apic.h>
+#include <asm/ipi.h>
+
+static unsigned bigsmp_get_apic_id(unsigned long x)
+{
+       return (x >> 24) & 0xFF;
+}
+
+static int bigsmp_apic_id_registered(void)
+{
+       return 1;
+}
+
+static const cpumask_t *bigsmp_target_cpus(void)
+{
+#ifdef CONFIG_SMP
+       return &cpu_online_map;
+#else
+       return &cpumask_of_cpu(0);
+#endif
+}
+
+static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+       return 0;
+}
+
+static unsigned long bigsmp_check_apicid_present(int bit)
+{
+       return 1;
+}
+
+static inline unsigned long calculate_ldr(int cpu)
+{
+       unsigned long val, id;
+
+       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+       id = per_cpu(x86_bios_cpu_apicid, cpu);
+       val |= SET_APIC_LOGICAL_ID(id);
+
+       return val;
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LDR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+static void bigsmp_init_apic_ldr(void)
+{
+       unsigned long val;
+       int cpu = smp_processor_id();
+
+       apic_write(APIC_DFR, APIC_DFR_FLAT);
+       val = calculate_ldr(cpu);
+       apic_write(APIC_LDR, val);
+}
+
+static void bigsmp_setup_apic_routing(void)
+{
+       printk(KERN_INFO
+               "Enabling APIC mode:  Physflat.  Using %d I/O APICs\n",
+               nr_ioapics);
+}
+
+static int bigsmp_apicid_to_node(int logical_apicid)
+{
+       return apicid_2_node[hard_smp_processor_id()];
+}
+
+static int bigsmp_cpu_present_to_apicid(int mps_cpu)
+{
+       if (mps_cpu < nr_cpu_ids)
+               return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
+
+       return BAD_APICID;
+}
+
+static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
+{
+       return physid_mask_of_physid(phys_apicid);
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int bigsmp_cpu_to_logical_apicid(int cpu)
+{
+       if (cpu >= nr_cpu_ids)
+               return BAD_APICID;
+       return cpu_physical_id(cpu);
+}
+
+static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+       /* For clustered we don't have a good way to do this yet - hack */
+       return physids_promote(0xFFL);
+}
+
+static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return 1;
+}
+
+/* As we are using single CPU as destination, pick only one CPU here */
+static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+       return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
+}
+
+static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                             const struct cpumask *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       for_each_cpu_and(cpu, cpumask, andmask) {
+               if (cpumask_test_cpu(cpu, cpu_online_mask))
+                       break;
+       }
+       if (cpu < nr_cpu_ids)
+               return bigsmp_cpu_to_logical_apicid(cpu);
+
+       return BAD_APICID;
+}
+
+static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+       return cpuid_apic >> index_msb;
+}
+
+static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       default_send_IPI_mask_sequence_phys(mask, vector);
+}
+
+static void bigsmp_send_IPI_allbutself(int vector)
+{
+       default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
+}
+
+static void bigsmp_send_IPI_all(int vector)
+{
+       bigsmp_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static int dmi_bigsmp; /* can be set by dmi scanners */
+
+static int hp_ht_bigsmp(const struct dmi_system_id *d)
+{
+       printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
+       dmi_bigsmp = 1;
+
+       return 0;
+}
+
+
+static const struct dmi_system_id bigsmp_dmi_table[] = {
+       { hp_ht_bigsmp, "HP ProLiant DL760 G2",
+               {       DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
+               }
+       },
+
+       { hp_ht_bigsmp, "HP ProLiant DL740",
+               {       DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
+               }
+       },
+       { } /* NULL entry stops DMI scanning */
+};
+
+static void bigsmp_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+       cpus_clear(*retmask);
+       cpu_set(cpu, *retmask);
+}
+
+static int probe_bigsmp(void)
+{
+       if (def_to_bigsmp)
+               dmi_bigsmp = 1;
+       else
+               dmi_check_system(bigsmp_dmi_table);
+
+       return dmi_bigsmp;
+}
+
+struct apic apic_bigsmp = {
+
+       .name                           = "bigsmp",
+       .probe                          = probe_bigsmp,
+       .acpi_madt_oem_check            = NULL,
+       .apic_id_registered             = bigsmp_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       /* phys delivery to target CPU: */
+       .irq_dest_mode                  = 0,
+
+       .target_cpus                    = bigsmp_target_cpus,
+       .disable_esr                    = 1,
+       .dest_logical                   = 0,
+       .check_apicid_used              = bigsmp_check_apicid_used,
+       .check_apicid_present           = bigsmp_check_apicid_present,
+
+       .vector_allocation_domain       = bigsmp_vector_allocation_domain,
+       .init_apic_ldr                  = bigsmp_init_apic_ldr,
+
+       .ioapic_phys_id_map             = bigsmp_ioapic_phys_id_map,
+       .setup_apic_routing             = bigsmp_setup_apic_routing,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = bigsmp_apicid_to_node,
+       .cpu_to_logical_apicid          = bigsmp_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = bigsmp_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = bigsmp_apicid_to_cpu_present,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = bigsmp_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = bigsmp_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = bigsmp_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0xFF << 24,
+
+       .cpu_mask_to_apicid             = bigsmp_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = bigsmp_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = bigsmp_send_IPI_mask,
+       .send_IPI_mask_allbutself       = NULL,
+       .send_IPI_allbutself            = bigsmp_send_IPI_allbutself,
+       .send_IPI_all                   = bigsmp_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+
+       .wait_for_init_deassert         = default_wait_for_init_deassert,
+
+       .smp_callin_clear_local_apic    = NULL,
+       .inquire_remote_apic            = default_inquire_remote_apic,
+
+       .read                           = native_apic_mem_read,
+       .write                          = native_apic_mem_write,
+       .icr_read                       = native_apic_icr_read,
+       .icr_write                      = native_apic_icr_write,
+       .wait_icr_idle                  = native_apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+};
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
new file mode 100644 (file)
index 0000000..19588f2
--- /dev/null
@@ -0,0 +1,780 @@
+/*
+ * Written by: Garry Forsgren, Unisys Corporation
+ *             Natalie Protasevich, Unisys Corporation
+ *
+ * This file contains the code to configure and interface
+ * with Unisys ES7000 series hardware system manager.
+ *
+ * Copyright (c) 2003 Unisys Corporation.
+ * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar
+ *
+ *   All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Contact information: Unisys Corporation, Township Line & Union Meeting
+ * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or:
+ *
+ * http://www.unisys.com
+ */
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/cpumask.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/nmi.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+
+#include <asm/apicdef.h>
+#include <asm/atomic.h>
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
+#include <asm/setup.h>
+#include <asm/apic.h>
+#include <asm/ipi.h>
+
+/*
+ * ES7000 chipsets
+ */
+
+#define NON_UNISYS                     0
+#define ES7000_CLASSIC                 1
+#define ES7000_ZORRO                   2
+
+#define        MIP_REG                         1
+#define        MIP_PSAI_REG                    4
+
+#define        MIP_BUSY                        1
+#define        MIP_SPIN                        0xf0000
+#define        MIP_VALID                       0x0100000000000000ULL
+#define        MIP_SW_APIC                     0x1020b
+
+#define        MIP_PORT(val)                   ((val >> 32) & 0xffff)
+
+#define        MIP_RD_LO(val)                  (val & 0xffffffff)
+
+struct mip_reg {
+       unsigned long long              off_0x00;
+       unsigned long long              off_0x08;
+       unsigned long long              off_0x10;
+       unsigned long long              off_0x18;
+       unsigned long long              off_0x20;
+       unsigned long long              off_0x28;
+       unsigned long long              off_0x30;
+       unsigned long long              off_0x38;
+};
+
+struct mip_reg_info {
+       unsigned long long              mip_info;
+       unsigned long long              delivery_info;
+       unsigned long long              host_reg;
+       unsigned long long              mip_reg;
+};
+
+struct psai {
+       unsigned long long              entry_type;
+       unsigned long long              addr;
+       unsigned long long              bep_addr;
+};
+
+#ifdef CONFIG_ACPI
+
+struct es7000_oem_table {
+       struct acpi_table_header        Header;
+       u32                             OEMTableAddr;
+       u32                             OEMTableSize;
+};
+
+static unsigned long                   oem_addrX;
+static unsigned long                   oem_size;
+
+#endif
+
+/*
+ * ES7000 Globals
+ */
+
+static volatile unsigned long          *psai;
+static struct mip_reg                  *mip_reg;
+static struct mip_reg                  *host_reg;
+static int                             mip_port;
+static unsigned long                   mip_addr;
+static unsigned long                   host_addr;
+
+int                                    es7000_plat;
+
+/*
+ * GSI override for ES7000 platforms.
+ */
+
+static unsigned int                    base;
+
+static int
+es7000_rename_gsi(int ioapic, int gsi)
+{
+       if (es7000_plat == ES7000_ZORRO)
+               return gsi;
+
+       if (!base) {
+               int i;
+               for (i = 0; i < nr_ioapics; i++)
+                       base += nr_ioapic_registers[i];
+       }
+
+       if (!ioapic && (gsi < 16))
+               gsi += base;
+
+       return gsi;
+}
+
+static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
+{
+       unsigned long vect = 0, psaival = 0;
+
+       if (psai == NULL)
+               return -1;
+
+       vect = ((unsigned long)__pa(eip)/0x1000) << 16;
+       psaival = (0x1000000 | vect | cpu);
+
+       while (*psai & 0x1000000)
+               ;
+
+       *psai = psaival;
+
+       return 0;
+}
+
+static int es7000_apic_is_cluster(void)
+{
+       /* MPENTIUMIII */
+       if (boot_cpu_data.x86 == 6 &&
+           (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11))
+               return 1;
+
+       return 0;
+}
+
+static void setup_unisys(void)
+{
+       /*
+        * Determine the generation of the ES7000 currently running.
+        *
+        * es7000_plat = 1 if the machine is a 5xx ES7000 box
+        * es7000_plat = 2 if the machine is a x86_64 ES7000 box
+        *
+        */
+       if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
+               es7000_plat = ES7000_ZORRO;
+       else
+               es7000_plat = ES7000_CLASSIC;
+       ioapic_renumber_irq = es7000_rename_gsi;
+}
+
+/*
+ * Parse the OEM Table:
+ */
+static int parse_unisys_oem(char *oemptr)
+{
+       int                     i;
+       int                     success = 0;
+       unsigned char           type, size;
+       unsigned long           val;
+       char                    *tp = NULL;
+       struct psai             *psaip = NULL;
+       struct mip_reg_info     *mi;
+       struct mip_reg          *host, *mip;
+
+       tp = oemptr;
+
+       tp += 8;
+
+       for (i = 0; i <= 6; i++) {
+               type = *tp++;
+               size = *tp++;
+               tp -= 2;
+               switch (type) {
+               case MIP_REG:
+                       mi = (struct mip_reg_info *)tp;
+                       val = MIP_RD_LO(mi->host_reg);
+                       host_addr = val;
+                       host = (struct mip_reg *)val;
+                       host_reg = __va(host);
+                       val = MIP_RD_LO(mi->mip_reg);
+                       mip_port = MIP_PORT(mi->mip_info);
+                       mip_addr = val;
+                       mip = (struct mip_reg *)val;
+                       mip_reg = __va(mip);
+                       pr_debug("es7000_mipcfg: host_reg = 0x%lx \n",
+                                (unsigned long)host_reg);
+                       pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n",
+                                (unsigned long)mip_reg);
+                       success++;
+                       break;
+               case MIP_PSAI_REG:
+                       psaip = (struct psai *)tp;
+                       if (tp != NULL) {
+                               if (psaip->addr)
+                                       psai = __va(psaip->addr);
+                               else
+                                       psai = NULL;
+                               success++;
+                       }
+                       break;
+               default:
+                       break;
+               }
+               tp += size;
+       }
+
+       if (success < 2)
+               es7000_plat = NON_UNISYS;
+       else
+               setup_unisys();
+
+       return es7000_plat;
+}
+
+#ifdef CONFIG_ACPI
+static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
+{
+       struct acpi_table_header *header = NULL;
+       struct es7000_oem_table *table;
+       acpi_size tbl_size;
+       acpi_status ret;
+       int i = 0;
+
+       for (;;) {
+               ret = acpi_get_table_with_size("OEM1", i++, &header, &tbl_size);
+               if (!ACPI_SUCCESS(ret))
+                       return -1;
+
+               if (!memcmp((char *) &header->oem_id, "UNISYS", 6))
+                       break;
+
+               early_acpi_os_unmap_memory(header, tbl_size);
+       }
+
+       table = (void *)header;
+
+       oem_addrX       = table->OEMTableAddr;
+       oem_size        = table->OEMTableSize;
+
+       early_acpi_os_unmap_memory(header, tbl_size);
+
+       *oem_addr       = (unsigned long)__acpi_map_table(oem_addrX, oem_size);
+
+       return 0;
+}
+
+static void unmap_unisys_acpi_oem_table(unsigned long oem_addr)
+{
+       if (!oem_addr)
+               return;
+
+       __acpi_unmap_table((char *)oem_addr, oem_size);
+}
+
+static int es7000_check_dsdt(void)
+{
+       struct acpi_table_header header;
+
+       if (ACPI_SUCCESS(acpi_get_table_header(ACPI_SIG_DSDT, 0, &header)) &&
+           !strncmp(header.oem_id, "UNISYS", 6))
+               return 1;
+       return 0;
+}
+
+static int es7000_acpi_ret;
+
+/* Hook from generic ACPI tables.c */
+static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       unsigned long oem_addr = 0;
+       int check_dsdt;
+       int ret = 0;
+
+       /* check dsdt at first to avoid clear fix_map for oem_addr */
+       check_dsdt = es7000_check_dsdt();
+
+       if (!find_unisys_acpi_oem_table(&oem_addr)) {
+               if (check_dsdt) {
+                       ret = parse_unisys_oem((char *)oem_addr);
+               } else {
+                       setup_unisys();
+                       ret = 1;
+               }
+               /*
+                * we need to unmap it
+                */
+               unmap_unisys_acpi_oem_table(oem_addr);
+       }
+
+       es7000_acpi_ret = ret;
+
+       return ret && !es7000_apic_is_cluster();
+}
+
+static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id)
+{
+       int ret = es7000_acpi_ret;
+
+       return ret && es7000_apic_is_cluster();
+}
+
+#else /* !CONFIG_ACPI: */
+static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       return 0;
+}
+
+static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id)
+{
+       return 0;
+}
+#endif /* !CONFIG_ACPI */
+
+static void es7000_spin(int n)
+{
+       int i = 0;
+
+       while (i++ < n)
+               rep_nop();
+}
+
+static int es7000_mip_write(struct mip_reg *mip_reg)
+{
+       int status = 0;
+       int spin;
+
+       spin = MIP_SPIN;
+       while ((host_reg->off_0x38 & MIP_VALID) != 0) {
+               if (--spin <= 0) {
+                       WARN(1, "Timeout waiting for Host Valid Flag\n");
+                       return -1;
+               }
+               es7000_spin(MIP_SPIN);
+       }
+
+       memcpy(host_reg, mip_reg, sizeof(struct mip_reg));
+       outb(1, mip_port);
+
+       spin = MIP_SPIN;
+
+       while ((mip_reg->off_0x38 & MIP_VALID) == 0) {
+               if (--spin <= 0) {
+                       WARN(1, "Timeout waiting for MIP Valid Flag\n");
+                       return -1;
+               }
+               es7000_spin(MIP_SPIN);
+       }
+
+       status = (mip_reg->off_0x00 & 0xffff0000000000ULL) >> 48;
+       mip_reg->off_0x38 &= ~MIP_VALID;
+
+       return status;
+}
+
+static void es7000_enable_apic_mode(void)
+{
+       struct mip_reg es7000_mip_reg;
+       int mip_status;
+
+       if (!es7000_plat)
+               return;
+
+       printk(KERN_INFO "ES7000: Enabling APIC mode.\n");
+       memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
+       es7000_mip_reg.off_0x00 = MIP_SW_APIC;
+       es7000_mip_reg.off_0x38 = MIP_VALID;
+
+       while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
+               WARN(1, "Command failed, status = %x\n", mip_status);
+}
+
+static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+       /* Careful. Some cpus do not strictly honor the set of cpus
+        * specified in the interrupt destination when using lowest
+        * priority interrupt delivery mode.
+        *
+        * In particular there was a hyperthreading cpu observed to
+        * deliver interrupts to the wrong hyperthread when only one
+        * hyperthread was specified in the interrupt desitination.
+        */
+       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
+}
+
+
+static void es7000_wait_for_init_deassert(atomic_t *deassert)
+{
+       while (!atomic_read(deassert))
+               cpu_relax();
+}
+
+static unsigned int es7000_get_apic_id(unsigned long x)
+{
+       return (x >> 24) & 0xFF;
+}
+
+static void es7000_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       default_send_IPI_mask_sequence_phys(mask, vector);
+}
+
+static void es7000_send_IPI_allbutself(int vector)
+{
+       default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
+}
+
+static void es7000_send_IPI_all(int vector)
+{
+       es7000_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static int es7000_apic_id_registered(void)
+{
+       return 1;
+}
+
+static const cpumask_t *target_cpus_cluster(void)
+{
+       return &CPU_MASK_ALL;
+}
+
+static const cpumask_t *es7000_target_cpus(void)
+{
+       return &cpumask_of_cpu(smp_processor_id());
+}
+
+static unsigned long
+es7000_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+       return 0;
+}
+static unsigned long es7000_check_apicid_present(int bit)
+{
+       return physid_isset(bit, phys_cpu_present_map);
+}
+
+static unsigned long calculate_ldr(int cpu)
+{
+       unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);
+
+       return SET_APIC_LOGICAL_ID(id);
+}
+
+/*
+ * Set up the logical destination ID.
+ *
+ * Intel recommends to set DFR, LdR and TPR before enabling
+ * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
+ * document number 292116).  So here it goes...
+ */
+static void es7000_init_apic_ldr_cluster(void)
+{
+       unsigned long val;
+       int cpu = smp_processor_id();
+
+       apic_write(APIC_DFR, APIC_DFR_CLUSTER);
+       val = calculate_ldr(cpu);
+       apic_write(APIC_LDR, val);
+}
+
+static void es7000_init_apic_ldr(void)
+{
+       unsigned long val;
+       int cpu = smp_processor_id();
+
+       apic_write(APIC_DFR, APIC_DFR_FLAT);
+       val = calculate_ldr(cpu);
+       apic_write(APIC_LDR, val);
+}
+
+static void es7000_setup_apic_routing(void)
+{
+       int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
+
+       printk(KERN_INFO
+         "Enabling APIC mode:  %s. Using %d I/O APICs, target cpus %lx\n",
+               (apic_version[apic] == 0x14) ?
+                       "Physical Cluster" : "Logical Cluster",
+               nr_ioapics, cpus_addr(*es7000_target_cpus())[0]);
+}
+
+static int es7000_apicid_to_node(int logical_apicid)
+{
+       return 0;
+}
+
+
+static int es7000_cpu_present_to_apicid(int mps_cpu)
+{
+       if (!mps_cpu)
+               return boot_cpu_physical_apicid;
+       else if (mps_cpu < nr_cpu_ids)
+               return per_cpu(x86_bios_cpu_apicid, mps_cpu);
+       else
+               return BAD_APICID;
+}
+
+static int cpu_id;
+
+static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid)
+{
+       physid_mask_t mask;
+
+       mask = physid_mask_of_physid(cpu_id);
+       ++cpu_id;
+
+       return mask;
+}
+
+/* Mapping from cpu number to logical apicid */
+static int es7000_cpu_to_logical_apicid(int cpu)
+{
+#ifdef CONFIG_SMP
+       if (cpu >= nr_cpu_ids)
+               return BAD_APICID;
+       return cpu_2_logical_apicid[cpu];
+#else
+       return logical_smp_processor_id();
+#endif
+}
+
+static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+       /* For clustered we don't have a good way to do this yet - hack */
+       return physids_promote(0xff);
+}
+
+static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
+{
+       boot_cpu_physical_apicid = read_apic_id();
+       return 1;
+}
+
+static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+       unsigned int round = 0;
+       int cpu, uninitialized_var(apicid);
+
+       /*
+        * The cpus in the mask must all be on the apic cluster.
+        */
+       for_each_cpu(cpu, cpumask) {
+               int new_apicid = es7000_cpu_to_logical_apicid(cpu);
+
+               if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
+                       WARN(1, "Not a valid mask!");
+
+                       return BAD_APICID;
+               }
+               apicid = new_apicid;
+               round++;
+       }
+       return apicid;
+}
+
+static unsigned int
+es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
+                             const struct cpumask *andmask)
+{
+       int apicid = es7000_cpu_to_logical_apicid(0);
+       cpumask_var_t cpumask;
+
+       if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
+               return apicid;
+
+       cpumask_and(cpumask, inmask, andmask);
+       cpumask_and(cpumask, cpumask, cpu_online_mask);
+       apicid = es7000_cpu_mask_to_apicid(cpumask);
+
+       free_cpumask_var(cpumask);
+
+       return apicid;
+}
+
+static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+       return cpuid_apic >> index_msb;
+}
+
+static int probe_es7000(void)
+{
+       /* probed later in mptable/ACPI hooks */
+       return 0;
+}
+
+static int es7000_mps_ret;
+static int es7000_mps_oem_check(struct mpc_table *mpc, char *oem,
+               char *productid)
+{
+       int ret = 0;
+
+       if (mpc->oemptr) {
+               struct mpc_oemtable *oem_table =
+                       (struct mpc_oemtable *)mpc->oemptr;
+
+               if (!strncmp(oem, "UNISYS", 6))
+                       ret = parse_unisys_oem((char *)oem_table);
+       }
+
+       es7000_mps_ret = ret;
+
+       return ret && !es7000_apic_is_cluster();
+}
+
+static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
+               char *productid)
+{
+       int ret = es7000_mps_ret;
+
+       return ret && es7000_apic_is_cluster();
+}
+
+struct apic apic_es7000_cluster = {
+
+       .name                           = "es7000",
+       .probe                          = probe_es7000,
+       .acpi_madt_oem_check            = es7000_acpi_madt_oem_check_cluster,
+       .apic_id_registered             = es7000_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       /* logical delivery broadcast to all procs: */
+       .irq_dest_mode                  = 1,
+
+       .target_cpus                    = target_cpus_cluster,
+       .disable_esr                    = 1,
+       .dest_logical                   = 0,
+       .check_apicid_used              = es7000_check_apicid_used,
+       .check_apicid_present           = es7000_check_apicid_present,
+
+       .vector_allocation_domain       = es7000_vector_allocation_domain,
+       .init_apic_ldr                  = es7000_init_apic_ldr_cluster,
+
+       .ioapic_phys_id_map             = es7000_ioapic_phys_id_map,
+       .setup_apic_routing             = es7000_setup_apic_routing,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = es7000_apicid_to_node,
+       .cpu_to_logical_apicid          = es7000_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = es7000_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = es7000_apicid_to_cpu_present,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = es7000_check_phys_apicid_present,
+       .enable_apic_mode               = es7000_enable_apic_mode,
+       .phys_pkg_id                    = es7000_phys_pkg_id,
+       .mps_oem_check                  = es7000_mps_oem_check_cluster,
+
+       .get_apic_id                    = es7000_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0xFF << 24,
+
+       .cpu_mask_to_apicid             = es7000_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = es7000_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = es7000_send_IPI_mask,
+       .send_IPI_mask_allbutself       = NULL,
+       .send_IPI_allbutself            = es7000_send_IPI_allbutself,
+       .send_IPI_all                   = es7000_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .wakeup_secondary_cpu           = wakeup_secondary_cpu_via_mip,
+
+       .trampoline_phys_low            = 0x467,
+       .trampoline_phys_high           = 0x469,
+
+       .wait_for_init_deassert         = NULL,
+
+       /* Nothing to do for most platforms, since cleared by the INIT cycle: */
+       .smp_callin_clear_local_apic    = NULL,
+       .inquire_remote_apic            = default_inquire_remote_apic,
+
+       .read                           = native_apic_mem_read,
+       .write                          = native_apic_mem_write,
+       .icr_read                       = native_apic_icr_read,
+       .icr_write                      = native_apic_icr_write,
+       .wait_icr_idle                  = native_apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+};
+
+struct apic apic_es7000 = {
+
+       .name                           = "es7000",
+       .probe                          = probe_es7000,
+       .acpi_madt_oem_check            = es7000_acpi_madt_oem_check,
+       .apic_id_registered             = es7000_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       /* phys delivery to target CPUs: */
+       .irq_dest_mode                  = 0,
+
+       .target_cpus                    = es7000_target_cpus,
+       .disable_esr                    = 1,
+       .dest_logical                   = 0,
+       .check_apicid_used              = es7000_check_apicid_used,
+       .check_apicid_present           = es7000_check_apicid_present,
+
+       .vector_allocation_domain       = es7000_vector_allocation_domain,
+       .init_apic_ldr                  = es7000_init_apic_ldr,
+
+       .ioapic_phys_id_map             = es7000_ioapic_phys_id_map,
+       .setup_apic_routing             = es7000_setup_apic_routing,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = es7000_apicid_to_node,
+       .cpu_to_logical_apicid          = es7000_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = es7000_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = es7000_apicid_to_cpu_present,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = es7000_check_phys_apicid_present,
+       .enable_apic_mode               = es7000_enable_apic_mode,
+       .phys_pkg_id                    = es7000_phys_pkg_id,
+       .mps_oem_check                  = es7000_mps_oem_check,
+
+       .get_apic_id                    = es7000_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0xFF << 24,
+
+       .cpu_mask_to_apicid             = es7000_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = es7000_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = es7000_send_IPI_mask,
+       .send_IPI_mask_allbutself       = NULL,
+       .send_IPI_allbutself            = es7000_send_IPI_allbutself,
+       .send_IPI_all                   = es7000_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .trampoline_phys_low            = 0x467,
+       .trampoline_phys_high           = 0x469,
+
+       .wait_for_init_deassert         = es7000_wait_for_init_deassert,
+
+       /* Nothing to do for most platforms, since cleared by the INIT cycle: */
+       .smp_callin_clear_local_apic    = NULL,
+       .inquire_remote_apic            = default_inquire_remote_apic,
+
+       .read                           = native_apic_mem_read,
+       .write                          = native_apic_mem_write,
+       .icr_read                       = native_apic_icr_read,
+       .icr_write                      = native_apic_icr_write,
+       .wait_icr_idle                  = native_apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+};
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
new file mode 100644 (file)
index 0000000..00e6071
--- /dev/null
@@ -0,0 +1,4160 @@
+/*
+ *     Intel IO-APIC support for multi-Pentium hosts.
+ *
+ *     Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
+ *
+ *     Many thanks to Stig Venaas for trying out countless experimental
+ *     patches and reporting/debugging problems patiently!
+ *
+ *     (c) 1999, Multiple IO-APIC support, developed by
+ *     Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
+ *      Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
+ *     further tested and cleaned up by Zach Brown <zab@redhat.com>
+ *     and Ingo Molnar <mingo@redhat.com>
+ *
+ *     Fixes
+ *     Maciej W. Rozycki       :       Bits for genuine 82489DX APICs;
+ *                                     thanks to Eric Gilmore
+ *                                     and Rolf G. Tews
+ *                                     for testing these extensively
+ *     Paul Diefenbaugh        :       Added full ACPI support
+ */
+
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/mc146818rtc.h>
+#include <linux/compiler.h>
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/msi.h>
+#include <linux/htirq.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/jiffies.h>     /* time_after() */
+#ifdef CONFIG_ACPI
+#include <acpi/acpi_bus.h>
+#endif
+#include <linux/bootmem.h>
+#include <linux/dmar.h>
+#include <linux/hpet.h>
+
+#include <asm/idle.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+#include <asm/cpu.h>
+#include <asm/desc.h>
+#include <asm/proto.h>
+#include <asm/acpi.h>
+#include <asm/dma.h>
+#include <asm/timer.h>
+#include <asm/i8259.h>
+#include <asm/nmi.h>
+#include <asm/msidef.h>
+#include <asm/hypertransport.h>
+#include <asm/setup.h>
+#include <asm/irq_remapping.h>
+#include <asm/hpet.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/uv_irq.h>
+
+#include <asm/apic.h>
+
+#define __apicdebuginit(type) static type __init
+
+/*
+ *      Is the SiS APIC rmw bug present ?
+ *      -1 = don't know, 0 = no, 1 = yes
+ */
+int sis_apic_bug = -1;
+
+static DEFINE_SPINLOCK(ioapic_lock);
+static DEFINE_SPINLOCK(vector_lock);
+
+/*
+ * # of IRQ routing registers
+ */
+int nr_ioapic_registers[MAX_IO_APICS];
+
+/* I/O APIC entries */
+struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
+int nr_ioapics;
+
+/* MP IRQ source entries */
+struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
+
+/* # of MP IRQ source entries */
+int mp_irq_entries;
+
+#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
+int mp_bus_id_to_type[MAX_MP_BUSSES];
+#endif
+
+DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
+
+int skip_ioapic_setup;
+
+void arch_disable_smp_support(void)
+{
+#ifdef CONFIG_PCI
+       noioapicquirk = 1;
+       noioapicreroute = -1;
+#endif
+       skip_ioapic_setup = 1;
+}
+
+static int __init parse_noapic(char *str)
+{
+       /* disable IO-APIC */
+       arch_disable_smp_support();
+       return 0;
+}
+early_param("noapic", parse_noapic);
+
+struct irq_pin_list;
+
+/*
+ * This is performance-critical, we want to do it O(1)
+ *
+ * the indexing order of this array favors 1:1 mappings
+ * between pins and IRQs.
+ */
+
+struct irq_pin_list {
+       int apic, pin;
+       struct irq_pin_list *next;
+};
+
+static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
+{
+       struct irq_pin_list *pin;
+       int node;
+
+       node = cpu_to_node(cpu);
+
+       pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
+
+       return pin;
+}
+
+struct irq_cfg {
+       struct irq_pin_list *irq_2_pin;
+       cpumask_var_t domain;
+       cpumask_var_t old_domain;
+       unsigned move_cleanup_count;
+       u8 vector;
+       u8 move_in_progress : 1;
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+       u8 move_desc_pending : 1;
+#endif
+};
+
+/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
+#ifdef CONFIG_SPARSE_IRQ
+static struct irq_cfg irq_cfgx[] = {
+#else
+static struct irq_cfg irq_cfgx[NR_IRQS] = {
+#endif
+       [0]  = { .vector = IRQ0_VECTOR,  },
+       [1]  = { .vector = IRQ1_VECTOR,  },
+       [2]  = { .vector = IRQ2_VECTOR,  },
+       [3]  = { .vector = IRQ3_VECTOR,  },
+       [4]  = { .vector = IRQ4_VECTOR,  },
+       [5]  = { .vector = IRQ5_VECTOR,  },
+       [6]  = { .vector = IRQ6_VECTOR,  },
+       [7]  = { .vector = IRQ7_VECTOR,  },
+       [8]  = { .vector = IRQ8_VECTOR,  },
+       [9]  = { .vector = IRQ9_VECTOR,  },
+       [10] = { .vector = IRQ10_VECTOR, },
+       [11] = { .vector = IRQ11_VECTOR, },
+       [12] = { .vector = IRQ12_VECTOR, },
+       [13] = { .vector = IRQ13_VECTOR, },
+       [14] = { .vector = IRQ14_VECTOR, },
+       [15] = { .vector = IRQ15_VECTOR, },
+};
+
+int __init arch_early_irq_init(void)
+{
+       struct irq_cfg *cfg;
+       struct irq_desc *desc;
+       int count;
+       int i;
+
+       cfg = irq_cfgx;
+       count = ARRAY_SIZE(irq_cfgx);
+
+       for (i = 0; i < count; i++) {
+               desc = irq_to_desc(i);
+               desc->chip_data = &cfg[i];
+               alloc_bootmem_cpumask_var(&cfg[i].domain);
+               alloc_bootmem_cpumask_var(&cfg[i].old_domain);
+               if (i < NR_IRQS_LEGACY)
+                       cpumask_setall(cfg[i].domain);
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_SPARSE_IRQ
+static struct irq_cfg *irq_cfg(unsigned int irq)
+{
+       struct irq_cfg *cfg = NULL;
+       struct irq_desc *desc;
+
+       desc = irq_to_desc(irq);
+       if (desc)
+               cfg = desc->chip_data;
+
+       return cfg;
+}
+
+static struct irq_cfg *get_one_free_irq_cfg(int cpu)
+{
+       struct irq_cfg *cfg;
+       int node;
+
+       node = cpu_to_node(cpu);
+
+       cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
+       if (cfg) {
+               if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
+                       kfree(cfg);
+                       cfg = NULL;
+               } else if (!alloc_cpumask_var_node(&cfg->old_domain,
+                                                         GFP_ATOMIC, node)) {
+                       free_cpumask_var(cfg->domain);
+                       kfree(cfg);
+                       cfg = NULL;
+               } else {
+                       cpumask_clear(cfg->domain);
+                       cpumask_clear(cfg->old_domain);
+               }
+       }
+
+       return cfg;
+}
+
+int arch_init_chip_data(struct irq_desc *desc, int cpu)
+{
+       struct irq_cfg *cfg;
+
+       cfg = desc->chip_data;
+       if (!cfg) {
+               desc->chip_data = get_one_free_irq_cfg(cpu);
+               if (!desc->chip_data) {
+                       printk(KERN_ERR "can not alloc irq_cfg\n");
+                       BUG_ON(1);
+               }
+       }
+
+       return 0;
+}
+
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+
+static void
+init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu)
+{
+       struct irq_pin_list *old_entry, *head, *tail, *entry;
+
+       cfg->irq_2_pin = NULL;
+       old_entry = old_cfg->irq_2_pin;
+       if (!old_entry)
+               return;
+
+       entry = get_one_free_irq_2_pin(cpu);
+       if (!entry)
+               return;
+
+       entry->apic     = old_entry->apic;
+       entry->pin      = old_entry->pin;
+       head            = entry;
+       tail            = entry;
+       old_entry       = old_entry->next;
+       while (old_entry) {
+               entry = get_one_free_irq_2_pin(cpu);
+               if (!entry) {
+                       entry = head;
+                       while (entry) {
+                               head = entry->next;
+                               kfree(entry);
+                               entry = head;
+                       }
+                       /* still use the old one */
+                       return;
+               }
+               entry->apic     = old_entry->apic;
+               entry->pin      = old_entry->pin;
+               tail->next      = entry;
+               tail            = entry;
+               old_entry       = old_entry->next;
+       }
+
+       tail->next = NULL;
+       cfg->irq_2_pin = head;
+}
+
+static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
+{
+       struct irq_pin_list *entry, *next;
+
+       if (old_cfg->irq_2_pin == cfg->irq_2_pin)
+               return;
+
+       entry = old_cfg->irq_2_pin;
+
+       while (entry) {
+               next = entry->next;
+               kfree(entry);
+               entry = next;
+       }
+       old_cfg->irq_2_pin = NULL;
+}
+
+void arch_init_copy_chip_data(struct irq_desc *old_desc,
+                                struct irq_desc *desc, int cpu)
+{
+       struct irq_cfg *cfg;
+       struct irq_cfg *old_cfg;
+
+       cfg = get_one_free_irq_cfg(cpu);
+
+       if (!cfg)
+               return;
+
+       desc->chip_data = cfg;
+
+       old_cfg = old_desc->chip_data;
+
+       memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
+
+       init_copy_irq_2_pin(old_cfg, cfg, cpu);
+}
+
+static void free_irq_cfg(struct irq_cfg *old_cfg)
+{
+       kfree(old_cfg);
+}
+
+void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
+{
+       struct irq_cfg *old_cfg, *cfg;
+
+       old_cfg = old_desc->chip_data;
+       cfg = desc->chip_data;
+
+       if (old_cfg == cfg)
+               return;
+
+       if (old_cfg) {
+               free_irq_2_pin(old_cfg, cfg);
+               free_irq_cfg(old_cfg);
+               old_desc->chip_data = NULL;
+       }
+}
+
+static void
+set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
+{
+       struct irq_cfg *cfg = desc->chip_data;
+
+       if (!cfg->move_in_progress) {
+               /* it means that domain is not changed */
+               if (!cpumask_intersects(desc->affinity, mask))
+                       cfg->move_desc_pending = 1;
+       }
+}
+#endif
+
+#else
+static struct irq_cfg *irq_cfg(unsigned int irq)
+{
+       return irq < nr_irqs ? irq_cfgx + irq : NULL;
+}
+
+#endif
+
+#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
+static inline void
+set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
+{
+}
+#endif
+
+struct io_apic {
+       unsigned int index;
+       unsigned int unused[3];
+       unsigned int data;
+};
+
+static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
+{
+       return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
+               + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
+}
+
+static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
+{
+       struct io_apic __iomem *io_apic = io_apic_base(apic);
+       writel(reg, &io_apic->index);
+       return readl(&io_apic->data);
+}
+
+static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+{
+       struct io_apic __iomem *io_apic = io_apic_base(apic);
+       writel(reg, &io_apic->index);
+       writel(value, &io_apic->data);
+}
+
+/*
+ * Re-write a value: to be used for read-modify-write
+ * cycles where the read already set up the index register.
+ *
+ * Older SiS APIC requires we rewrite the index register
+ */
+static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
+{
+       struct io_apic __iomem *io_apic = io_apic_base(apic);
+
+       if (sis_apic_bug)
+               writel(reg, &io_apic->index);
+       writel(value, &io_apic->data);
+}
+
+static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
+{
+       struct irq_pin_list *entry;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       entry = cfg->irq_2_pin;
+       for (;;) {
+               unsigned int reg;
+               int pin;
+
+               if (!entry)
+                       break;
+               pin = entry->pin;
+               reg = io_apic_read(entry->apic, 0x10 + pin*2);
+               /* Is the remote IRR bit set? */
+               if (reg & IO_APIC_REDIR_REMOTE_IRR) {
+                       spin_unlock_irqrestore(&ioapic_lock, flags);
+                       return true;
+               }
+               if (!entry->next)
+                       break;
+               entry = entry->next;
+       }
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+
+       return false;
+}
+
+union entry_union {
+       struct { u32 w1, w2; };
+       struct IO_APIC_route_entry entry;
+};
+
+static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
+{
+       union entry_union eu;
+       unsigned long flags;
+       spin_lock_irqsave(&ioapic_lock, flags);
+       eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
+       eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+       return eu.entry;
+}
+
+/*
+ * When we write a new IO APIC routing entry, we need to write the high
+ * word first! If the mask bit in the low word is clear, we will enable
+ * the interrupt, and we need to make sure the entry is fully populated
+ * before that happens.
+ */
+static void
+__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+{
+       union entry_union eu;
+       eu.entry = e;
+       io_apic_write(apic, 0x11 + 2*pin, eu.w2);
+       io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+}
+
+void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&ioapic_lock, flags);
+       __ioapic_write_entry(apic, pin, e);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+/*
+ * When we mask an IO APIC routing entry, we need to write the low
+ * word first, in order to set the mask bit before we change the
+ * high bits!
+ */
+static void ioapic_mask_entry(int apic, int pin)
+{
+       unsigned long flags;
+       union entry_union eu = { .entry.mask = 1 };
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       io_apic_write(apic, 0x10 + 2*pin, eu.w1);
+       io_apic_write(apic, 0x11 + 2*pin, eu.w2);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+#ifdef CONFIG_SMP
+static void send_cleanup_vector(struct irq_cfg *cfg)
+{
+       cpumask_var_t cleanup_mask;
+
+       if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
+               unsigned int i;
+               cfg->move_cleanup_count = 0;
+               for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
+                       cfg->move_cleanup_count++;
+               for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
+                       apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
+       } else {
+               cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
+               cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
+               apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+               free_cpumask_var(cleanup_mask);
+       }
+       cfg->move_in_progress = 0;
+}
+
+static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
+{
+       int apic, pin;
+       struct irq_pin_list *entry;
+       u8 vector = cfg->vector;
+
+       entry = cfg->irq_2_pin;
+       for (;;) {
+               unsigned int reg;
+
+               if (!entry)
+                       break;
+
+               apic = entry->apic;
+               pin = entry->pin;
+#ifdef CONFIG_INTR_REMAP
+               /*
+                * With interrupt-remapping, destination information comes
+                * from interrupt-remapping table entry.
+                */
+               if (!irq_remapped(irq))
+                       io_apic_write(apic, 0x11 + pin*2, dest);
+#else
+               io_apic_write(apic, 0x11 + pin*2, dest);
+#endif
+               reg = io_apic_read(apic, 0x10 + pin*2);
+               reg &= ~IO_APIC_REDIR_VECTOR_MASK;
+               reg |= vector;
+               io_apic_modify(apic, 0x10 + pin*2, reg);
+               if (!entry->next)
+                       break;
+               entry = entry->next;
+       }
+}
+
+static int
+assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
+
+/*
+ * Either sets desc->affinity to a valid value, and returns
+ * ->cpu_mask_to_apicid of that, or returns BAD_APICID and
+ * leaves desc->affinity untouched.
+ */
+static unsigned int
+set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
+{
+       struct irq_cfg *cfg;
+       unsigned int irq;
+
+       if (!cpumask_intersects(mask, cpu_online_mask))
+               return BAD_APICID;
+
+       irq = desc->irq;
+       cfg = desc->chip_data;
+       if (assign_irq_vector(irq, cfg, mask))
+               return BAD_APICID;
+
+       cpumask_and(desc->affinity, cfg->domain, mask);
+       set_extra_move_desc(desc, mask);
+
+       return apic->cpu_mask_to_apicid_and(desc->affinity, cpu_online_mask);
+}
+
+static void
+set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
+{
+       struct irq_cfg *cfg;
+       unsigned long flags;
+       unsigned int dest;
+       unsigned int irq;
+
+       irq = desc->irq;
+       cfg = desc->chip_data;
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       dest = set_desc_affinity(desc, mask);
+       if (dest != BAD_APICID) {
+               /* Only the high 8 bits are valid. */
+               dest = SET_APIC_LOGICAL_ID(dest);
+               __target_IO_APIC_irq(irq, dest, cfg);
+       }
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+static void
+set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
+{
+       struct irq_desc *desc;
+
+       desc = irq_to_desc(irq);
+
+       set_ioapic_affinity_irq_desc(desc, mask);
+}
+#endif /* CONFIG_SMP */
+
+/*
+ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
+ * shared ISA-space IRQs, so we have to support them. We are super
+ * fast in the common case, and fast for shared ISA-space IRQs.
+ */
+static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin)
+{
+       struct irq_pin_list *entry;
+
+       entry = cfg->irq_2_pin;
+       if (!entry) {
+               entry = get_one_free_irq_2_pin(cpu);
+               if (!entry) {
+                       printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
+                                       apic, pin);
+                       return;
+               }
+               cfg->irq_2_pin = entry;
+               entry->apic = apic;
+               entry->pin = pin;
+               return;
+       }
+
+       while (entry->next) {
+               /* not again, please */
+               if (entry->apic == apic && entry->pin == pin)
+                       return;
+
+               entry = entry->next;
+       }
+
+       entry->next = get_one_free_irq_2_pin(cpu);
+       entry = entry->next;
+       entry->apic = apic;
+       entry->pin = pin;
+}
+
+/*
+ * Reroute an IRQ to a different pin.
+ */
+static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu,
+                                     int oldapic, int oldpin,
+                                     int newapic, int newpin)
+{
+       struct irq_pin_list *entry = cfg->irq_2_pin;
+       int replaced = 0;
+
+       while (entry) {
+               if (entry->apic == oldapic && entry->pin == oldpin) {
+                       entry->apic = newapic;
+                       entry->pin = newpin;
+                       replaced = 1;
+                       /* every one is different, right? */
+                       break;
+               }
+               entry = entry->next;
+       }
+
+       /* why? call replace before add? */
+       if (!replaced)
+               add_pin_to_irq_cpu(cfg, cpu, newapic, newpin);
+}
+
+static inline void io_apic_modify_irq(struct irq_cfg *cfg,
+                               int mask_and, int mask_or,
+                               void (*final)(struct irq_pin_list *entry))
+{
+       int pin;
+       struct irq_pin_list *entry;
+
+       for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
+               unsigned int reg;
+               pin = entry->pin;
+               reg = io_apic_read(entry->apic, 0x10 + pin * 2);
+               reg &= mask_and;
+               reg |= mask_or;
+               io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
+               if (final)
+                       final(entry);
+       }
+}
+
+static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
+{
+       io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
+}
+
+#ifdef CONFIG_X86_64
+static void io_apic_sync(struct irq_pin_list *entry)
+{
+       /*
+        * Synchronize the IO-APIC and the CPU by doing
+        * a dummy read from the IO-APIC
+        */
+       struct io_apic __iomem *io_apic;
+       io_apic = io_apic_base(entry->apic);
+       readl(&io_apic->data);
+}
+
+static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
+{
+       io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
+}
+#else /* CONFIG_X86_32 */
+static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
+{
+       io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
+}
+
+static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
+{
+       io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
+                       IO_APIC_REDIR_MASKED, NULL);
+}
+
+static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
+{
+       io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
+                       IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
+}
+#endif /* CONFIG_X86_32 */
+
+static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
+{
+       struct irq_cfg *cfg = desc->chip_data;
+       unsigned long flags;
+
+       BUG_ON(!cfg);
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       __mask_IO_APIC_irq(cfg);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
+{
+       struct irq_cfg *cfg = desc->chip_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       __unmask_IO_APIC_irq(cfg);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+}
+
+static void mask_IO_APIC_irq(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       mask_IO_APIC_irq_desc(desc);
+}
+static void unmask_IO_APIC_irq(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       unmask_IO_APIC_irq_desc(desc);
+}
+
+static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
+{
+       struct IO_APIC_route_entry entry;
+
+       /* Check delivery_mode to be sure we're not clearing an SMI pin */
+       entry = ioapic_read_entry(apic, pin);
+       if (entry.delivery_mode == dest_SMI)
+               return;
+       /*
+        * Disable it in the IO-APIC irq-routing table:
+        */
+       ioapic_mask_entry(apic, pin);
+}
+
+static void clear_IO_APIC (void)
+{
+       int apic, pin;
+
+       for (apic = 0; apic < nr_ioapics; apic++)
+               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
+                       clear_IO_APIC_pin(apic, pin);
+}
+
+#ifdef CONFIG_X86_32
+/*
+ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
+ * specific CPU-side IRQs.
+ */
+
+#define MAX_PIRQS 8
+static int pirq_entries[MAX_PIRQS] = {
+       [0 ... MAX_PIRQS - 1] = -1
+};
+
+static int __init ioapic_pirq_setup(char *str)
+{
+       int i, max;
+       int ints[MAX_PIRQS+1];
+
+       get_options(str, ARRAY_SIZE(ints), ints);
+
+       apic_printk(APIC_VERBOSE, KERN_INFO
+                       "PIRQ redirection, working around broken MP-BIOS.\n");
+       max = MAX_PIRQS;
+       if (ints[0] < MAX_PIRQS)
+               max = ints[0];
+
+       for (i = 0; i < max; i++) {
+               apic_printk(APIC_VERBOSE, KERN_DEBUG
+                               "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
+               /*
+                * PIRQs are mapped upside down, usually.
+                */
+               pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
+       }
+       return 1;
+}
+
+__setup("pirq=", ioapic_pirq_setup);
+#endif /* CONFIG_X86_32 */
+
+#ifdef CONFIG_INTR_REMAP
+/* I/O APIC RTE contents at the OS boot up */
+static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
+
+/*
+ * Saves and masks all the unmasked IO-APIC RTE's
+ */
+int save_mask_IO_APIC_setup(void)
+{
+       union IO_APIC_reg_01 reg_01;
+       unsigned long flags;
+       int apic, pin;
+
+       /*
+        * The number of IO-APIC IRQ registers (== #pins):
+        */
+       for (apic = 0; apic < nr_ioapics; apic++) {
+               spin_lock_irqsave(&ioapic_lock, flags);
+               reg_01.raw = io_apic_read(apic, 1);
+               spin_unlock_irqrestore(&ioapic_lock, flags);
+               nr_ioapic_registers[apic] = reg_01.bits.entries+1;
+       }
+
+       for (apic = 0; apic < nr_ioapics; apic++) {
+               early_ioapic_entries[apic] =
+                       kzalloc(sizeof(struct IO_APIC_route_entry) *
+                               nr_ioapic_registers[apic], GFP_KERNEL);
+               if (!early_ioapic_entries[apic])
+                       goto nomem;
+       }
+
+       for (apic = 0; apic < nr_ioapics; apic++)
+               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+                       struct IO_APIC_route_entry entry;
+
+                       entry = early_ioapic_entries[apic][pin] =
+                               ioapic_read_entry(apic, pin);
+                       if (!entry.mask) {
+                               entry.mask = 1;
+                               ioapic_write_entry(apic, pin, entry);
+                       }
+               }
+
+       return 0;
+
+nomem:
+       while (apic >= 0)
+               kfree(early_ioapic_entries[apic--]);
+       memset(early_ioapic_entries, 0,
+               ARRAY_SIZE(early_ioapic_entries));
+
+       return -ENOMEM;
+}
+
+void restore_IO_APIC_setup(void)
+{
+       int apic, pin;
+
+       for (apic = 0; apic < nr_ioapics; apic++) {
+               if (!early_ioapic_entries[apic])
+                       break;
+               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
+                       ioapic_write_entry(apic, pin,
+                                          early_ioapic_entries[apic][pin]);
+               kfree(early_ioapic_entries[apic]);
+               early_ioapic_entries[apic] = NULL;
+       }
+}
+
+void reinit_intr_remapped_IO_APIC(int intr_remapping)
+{
+       /*
+        * for now plain restore of previous settings.
+        * TBD: In the case of OS enabling interrupt-remapping,
+        * IO-APIC RTE's need to be setup to point to interrupt-remapping
+        * table entries. for now, do a plain restore, and wait for
+        * the setup_IO_APIC_irqs() to do proper initialization.
+        */
+       restore_IO_APIC_setup();
+}
+#endif
+
+/*
+ * Find the IRQ entry number of a certain pin.
+ */
+static int find_irq_entry(int apic, int pin, int type)
+{
+       int i;
+
+       for (i = 0; i < mp_irq_entries; i++)
+               if (mp_irqs[i].irqtype == type &&
+                   (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
+                    mp_irqs[i].dstapic == MP_APIC_ALL) &&
+                   mp_irqs[i].dstirq == pin)
+                       return i;
+
+       return -1;
+}
+
+/*
+ * Find the pin to which IRQ[irq] (ISA) is connected
+ */
+static int __init find_isa_irq_pin(int irq, int type)
+{
+       int i;
+
+       for (i = 0; i < mp_irq_entries; i++) {
+               int lbus = mp_irqs[i].srcbus;
+
+               if (test_bit(lbus, mp_bus_not_pci) &&
+                   (mp_irqs[i].irqtype == type) &&
+                   (mp_irqs[i].srcbusirq == irq))
+
+                       return mp_irqs[i].dstirq;
+       }
+       return -1;
+}
+
+static int __init find_isa_irq_apic(int irq, int type)
+{
+       int i;
+
+       for (i = 0; i < mp_irq_entries; i++) {
+               int lbus = mp_irqs[i].srcbus;
+
+               if (test_bit(lbus, mp_bus_not_pci) &&
+                   (mp_irqs[i].irqtype == type) &&
+                   (mp_irqs[i].srcbusirq == irq))
+                       break;
+       }
+       if (i < mp_irq_entries) {
+               int apic;
+               for(apic = 0; apic < nr_ioapics; apic++) {
+                       if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
+                               return apic;
+               }
+       }
+
+       return -1;
+}
+
+/*
+ * Find a specific PCI IRQ entry.
+ * Not an __init, possibly needed by modules
+ */
+static int pin_2_irq(int idx, int apic, int pin);
+
+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
+{
+       int apic, i, best_guess = -1;
+
+       apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
+               bus, slot, pin);
+       if (test_bit(bus, mp_bus_not_pci)) {
+               apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
+               return -1;
+       }
+       for (i = 0; i < mp_irq_entries; i++) {
+               int lbus = mp_irqs[i].srcbus;
+
+               for (apic = 0; apic < nr_ioapics; apic++)
+                       if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
+                           mp_irqs[i].dstapic == MP_APIC_ALL)
+                               break;
+
+               if (!test_bit(lbus, mp_bus_not_pci) &&
+                   !mp_irqs[i].irqtype &&
+                   (bus == lbus) &&
+                   (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
+                       int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
+
+                       if (!(apic || IO_APIC_IRQ(irq)))
+                               continue;
+
+                       if (pin == (mp_irqs[i].srcbusirq & 3))
+                               return irq;
+                       /*
+                        * Use the first all-but-pin matching entry as a
+                        * best-guess fuzzy result for broken mptables.
+                        */
+                       if (best_guess < 0)
+                               best_guess = irq;
+               }
+       }
+       return best_guess;
+}
+
+EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
+
+#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
+/*
+ * EISA Edge/Level control register, ELCR
+ */
+static int EISA_ELCR(unsigned int irq)
+{
+       if (irq < NR_IRQS_LEGACY) {
+               unsigned int port = 0x4d0 + (irq >> 3);
+               return (inb(port) >> (irq & 7)) & 1;
+       }
+       apic_printk(APIC_VERBOSE, KERN_INFO
+                       "Broken MPtable reports ISA irq %d\n", irq);
+       return 0;
+}
+
+#endif
+
+/* ISA interrupts are always polarity zero edge triggered,
+ * when listed as conforming in the MP table. */
+
+#define default_ISA_trigger(idx)       (0)
+#define default_ISA_polarity(idx)      (0)
+
+/* EISA interrupts are always polarity zero and can be edge or level
+ * trigger depending on the ELCR value.  If an interrupt is listed as
+ * EISA conforming in the MP table, that means its trigger type must
+ * be read in from the ELCR */
+
+#define default_EISA_trigger(idx)      (EISA_ELCR(mp_irqs[idx].srcbusirq))
+#define default_EISA_polarity(idx)     default_ISA_polarity(idx)
+
+/* PCI interrupts are always polarity one level triggered,
+ * when listed as conforming in the MP table. */
+
+#define default_PCI_trigger(idx)       (1)
+#define default_PCI_polarity(idx)      (1)
+
+/* MCA interrupts are always polarity zero level triggered,
+ * when listed as conforming in the MP table. */
+
+#define default_MCA_trigger(idx)       (1)
+#define default_MCA_polarity(idx)      default_ISA_polarity(idx)
+
+static int MPBIOS_polarity(int idx)
+{
+       int bus = mp_irqs[idx].srcbus;
+       int polarity;
+
+       /*
+        * Determine IRQ line polarity (high active or low active):
+        */
+       switch (mp_irqs[idx].irqflag & 3)
+       {
+               case 0: /* conforms, ie. bus-type dependent polarity */
+                       if (test_bit(bus, mp_bus_not_pci))
+                               polarity = default_ISA_polarity(idx);
+                       else
+                               polarity = default_PCI_polarity(idx);
+                       break;
+               case 1: /* high active */
+               {
+                       polarity = 0;
+                       break;
+               }
+               case 2: /* reserved */
+               {
+                       printk(KERN_WARNING "broken BIOS!!\n");
+                       polarity = 1;
+                       break;
+               }
+               case 3: /* low active */
+               {
+                       polarity = 1;
+                       break;
+               }
+               default: /* invalid */
+               {
+                       printk(KERN_WARNING "broken BIOS!!\n");
+                       polarity = 1;
+                       break;
+               }
+       }
+       return polarity;
+}
+
+static int MPBIOS_trigger(int idx)
+{
+       int bus = mp_irqs[idx].srcbus;
+       int trigger;
+
+       /*
+        * Determine IRQ trigger mode (edge or level sensitive):
+        */
+       switch ((mp_irqs[idx].irqflag>>2) & 3)
+       {
+               case 0: /* conforms, ie. bus-type dependent */
+                       if (test_bit(bus, mp_bus_not_pci))
+                               trigger = default_ISA_trigger(idx);
+                       else
+                               trigger = default_PCI_trigger(idx);
+#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
+                       switch (mp_bus_id_to_type[bus]) {
+                               case MP_BUS_ISA: /* ISA pin */
+                               {
+                                       /* set before the switch */
+                                       break;
+                               }
+                               case MP_BUS_EISA: /* EISA pin */
+                               {
+                                       trigger = default_EISA_trigger(idx);
+                                       break;
+                               }
+                               case MP_BUS_PCI: /* PCI pin */
+                               {
+                                       /* set before the switch */
+                                       break;
+                               }
+                               case MP_BUS_MCA: /* MCA pin */
+                               {
+                                       trigger = default_MCA_trigger(idx);
+                                       break;
+                               }
+                               default:
+                               {
+                                       printk(KERN_WARNING "broken BIOS!!\n");
+                                       trigger = 1;
+                                       break;
+                               }
+                       }
+#endif
+                       break;
+               case 1: /* edge */
+               {
+                       trigger = 0;
+                       break;
+               }
+               case 2: /* reserved */
+               {
+                       printk(KERN_WARNING "broken BIOS!!\n");
+                       trigger = 1;
+                       break;
+               }
+               case 3: /* level */
+               {
+                       trigger = 1;
+                       break;
+               }
+               default: /* invalid */
+               {
+                       printk(KERN_WARNING "broken BIOS!!\n");
+                       trigger = 0;
+                       break;
+               }
+       }
+       return trigger;
+}
+
+static inline int irq_polarity(int idx)
+{
+       return MPBIOS_polarity(idx);
+}
+
+static inline int irq_trigger(int idx)
+{
+       return MPBIOS_trigger(idx);
+}
+
+int (*ioapic_renumber_irq)(int ioapic, int irq);
+static int pin_2_irq(int idx, int apic, int pin)
+{
+       int irq, i;
+       int bus = mp_irqs[idx].srcbus;
+
+       /*
+        * Debugging check, we are in big trouble if this message pops up!
+        */
+       if (mp_irqs[idx].dstirq != pin)
+               printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
+
+       if (test_bit(bus, mp_bus_not_pci)) {
+               irq = mp_irqs[idx].srcbusirq;
+       } else {
+               /*
+                * PCI IRQs are mapped in order
+                */
+               i = irq = 0;
+               while (i < apic)
+                       irq += nr_ioapic_registers[i++];
+               irq += pin;
+               /*
+                 * For MPS mode, so far only needed by ES7000 platform
+                 */
+               if (ioapic_renumber_irq)
+                       irq = ioapic_renumber_irq(apic, irq);
+       }
+
+#ifdef CONFIG_X86_32
+       /*
+        * PCI IRQ command line redirection. Yes, limits are hardcoded.
+        */
+       if ((pin >= 16) && (pin <= 23)) {
+               if (pirq_entries[pin-16] != -1) {
+                       if (!pirq_entries[pin-16]) {
+                               apic_printk(APIC_VERBOSE, KERN_DEBUG
+                                               "disabling PIRQ%d\n", pin-16);
+                       } else {
+                               irq = pirq_entries[pin-16];
+                               apic_printk(APIC_VERBOSE, KERN_DEBUG
+                                               "using PIRQ%d -> IRQ %d\n",
+                                               pin-16, irq);
+                       }
+               }
+       }
+#endif
+
+       return irq;
+}
+
+void lock_vector_lock(void)
+{
+       /* Used to the online set of cpus does not change
+        * during assign_irq_vector.
+        */
+       spin_lock(&vector_lock);
+}
+
+void unlock_vector_lock(void)
+{
+       spin_unlock(&vector_lock);
+}
+
+static int
+__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+{
+       /*
+        * NOTE! The local APIC isn't very good at handling
+        * multiple interrupts at the same interrupt level.
+        * As the interrupt level is determined by taking the
+        * vector number and shifting that right by 4, we
+        * want to spread these out a bit so that they don't
+        * all fall in the same interrupt level.
+        *
+        * Also, we've got to be careful not to trash gate
+        * 0x80, because int 0x80 is hm, kind of importantish. ;)
+        */
+       static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
+       unsigned int old_vector;
+       int cpu, err;
+       cpumask_var_t tmp_mask;
+
+       if ((cfg->move_in_progress) || cfg->move_cleanup_count)
+               return -EBUSY;
+
+       if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
+               return -ENOMEM;
+
+       old_vector = cfg->vector;
+       if (old_vector) {
+               cpumask_and(tmp_mask, mask, cpu_online_mask);
+               cpumask_and(tmp_mask, cfg->domain, tmp_mask);
+               if (!cpumask_empty(tmp_mask)) {
+                       free_cpumask_var(tmp_mask);
+                       return 0;
+               }
+       }
+
+       /* Only try and allocate irqs on cpus that are present */
+       err = -ENOSPC;
+       for_each_cpu_and(cpu, mask, cpu_online_mask) {
+               int new_cpu;
+               int vector, offset;
+
+               apic->vector_allocation_domain(cpu, tmp_mask);
+
+               vector = current_vector;
+               offset = current_offset;
+next:
+               vector += 8;
+               if (vector >= first_system_vector) {
+                       /* If out of vectors on large boxen, must share them. */
+                       offset = (offset + 1) % 8;
+                       vector = FIRST_DEVICE_VECTOR + offset;
+               }
+               if (unlikely(current_vector == vector))
+                       continue;
+
+               if (test_bit(vector, used_vectors))
+                       goto next;
+
+               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+                       if (per_cpu(vector_irq, new_cpu)[vector] != -1)
+                               goto next;
+               /* Found one! */
+               current_vector = vector;
+               current_offset = offset;
+               if (old_vector) {
+                       cfg->move_in_progress = 1;
+                       cpumask_copy(cfg->old_domain, cfg->domain);
+               }
+               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+                       per_cpu(vector_irq, new_cpu)[vector] = irq;
+               cfg->vector = vector;
+               cpumask_copy(cfg->domain, tmp_mask);
+               err = 0;
+               break;
+       }
+       free_cpumask_var(tmp_mask);
+       return err;
+}
+
+static int
+assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+{
+       int err;
+       unsigned long flags;
+
+       spin_lock_irqsave(&vector_lock, flags);
+       err = __assign_irq_vector(irq, cfg, mask);
+       spin_unlock_irqrestore(&vector_lock, flags);
+       return err;
+}
+
+static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
+{
+       int cpu, vector;
+
+       BUG_ON(!cfg->vector);
+
+       vector = cfg->vector;
+       for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+               per_cpu(vector_irq, cpu)[vector] = -1;
+
+       cfg->vector = 0;
+       cpumask_clear(cfg->domain);
+
+       if (likely(!cfg->move_in_progress))
+               return;
+       for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+               for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
+                                                               vector++) {
+                       if (per_cpu(vector_irq, cpu)[vector] != irq)
+                               continue;
+                       per_cpu(vector_irq, cpu)[vector] = -1;
+                       break;
+               }
+       }
+       cfg->move_in_progress = 0;
+}
+
+void __setup_vector_irq(int cpu)
+{
+       /* Initialize vector_irq on a new cpu */
+       /* This function must be called with vector_lock held */
+       int irq, vector;
+       struct irq_cfg *cfg;
+       struct irq_desc *desc;
+
+       /* Mark the inuse vectors */
+       for_each_irq_desc(irq, desc) {
+               cfg = desc->chip_data;
+               if (!cpumask_test_cpu(cpu, cfg->domain))
+                       continue;
+               vector = cfg->vector;
+               per_cpu(vector_irq, cpu)[vector] = irq;
+       }
+       /* Mark the free vectors */
+       for (vector = 0; vector < NR_VECTORS; ++vector) {
+               irq = per_cpu(vector_irq, cpu)[vector];
+               if (irq < 0)
+                       continue;
+
+               cfg = irq_cfg(irq);
+               if (!cpumask_test_cpu(cpu, cfg->domain))
+                       per_cpu(vector_irq, cpu)[vector] = -1;
+       }
+}
+
+static struct irq_chip ioapic_chip;
+#ifdef CONFIG_INTR_REMAP
+static struct irq_chip ir_ioapic_chip;
+#endif
+
+#define IOAPIC_AUTO     -1
+#define IOAPIC_EDGE     0
+#define IOAPIC_LEVEL    1
+
+#ifdef CONFIG_X86_32
+static inline int IO_APIC_irq_trigger(int irq)
+{
+       int apic, idx, pin;
+
+       for (apic = 0; apic < nr_ioapics; apic++) {
+               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+                       idx = find_irq_entry(apic, pin, mp_INT);
+                       if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
+                               return irq_trigger(idx);
+               }
+       }
+       /*
+         * nonexistent IRQs are edge default
+         */
+       return 0;
+}
+#else
+static inline int IO_APIC_irq_trigger(int irq)
+{
+       return 1;
+}
+#endif
+
+static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
+{
+
+       if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+           trigger == IOAPIC_LEVEL)
+               desc->status |= IRQ_LEVEL;
+       else
+               desc->status &= ~IRQ_LEVEL;
+
+#ifdef CONFIG_INTR_REMAP
+       if (irq_remapped(irq)) {
+               desc->status |= IRQ_MOVE_PCNTXT;
+               if (trigger)
+                       set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
+                                                     handle_fasteoi_irq,
+                                                    "fasteoi");
+               else
+                       set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
+                                                     handle_edge_irq, "edge");
+               return;
+       }
+#endif
+       if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
+           trigger == IOAPIC_LEVEL)
+               set_irq_chip_and_handler_name(irq, &ioapic_chip,
+                                             handle_fasteoi_irq,
+                                             "fasteoi");
+       else
+               set_irq_chip_and_handler_name(irq, &ioapic_chip,
+                                             handle_edge_irq, "edge");
+}
+
+int setup_ioapic_entry(int apic_id, int irq,
+                      struct IO_APIC_route_entry *entry,
+                      unsigned int destination, int trigger,
+                      int polarity, int vector)
+{
+       /*
+        * add it to the IO-APIC irq-routing table:
+        */
+       memset(entry,0,sizeof(*entry));
+
+#ifdef CONFIG_INTR_REMAP
+       if (intr_remapping_enabled) {
+               struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
+               struct irte irte;
+               struct IR_IO_APIC_route_entry *ir_entry =
+                       (struct IR_IO_APIC_route_entry *) entry;
+               int index;
+
+               if (!iommu)
+                       panic("No mapping iommu for ioapic %d\n", apic_id);
+
+               index = alloc_irte(iommu, irq, 1);
+               if (index < 0)
+                       panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
+
+               memset(&irte, 0, sizeof(irte));
+
+               irte.present = 1;
+               irte.dst_mode = apic->irq_dest_mode;
+               irte.trigger_mode = trigger;
+               irte.dlvry_mode = apic->irq_delivery_mode;
+               irte.vector = vector;
+               irte.dest_id = IRTE_DEST(destination);
+
+               modify_irte(irq, &irte);
+
+               ir_entry->index2 = (index >> 15) & 0x1;
+               ir_entry->zero = 0;
+               ir_entry->format = 1;
+               ir_entry->index = (index & 0x7fff);
+       } else
+#endif
+       {
+               entry->delivery_mode = apic->irq_delivery_mode;
+               entry->dest_mode = apic->irq_dest_mode;
+               entry->dest = destination;
+       }
+
+       entry->mask = 0;                                /* enable IRQ */
+       entry->trigger = trigger;
+       entry->polarity = polarity;
+       entry->vector = vector;
+
+       /* Mask level triggered irqs.
+        * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
+        */
+       if (trigger)
+               entry->mask = 1;
+       return 0;
+}
+
+static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
+                             int trigger, int polarity)
+{
+       struct irq_cfg *cfg;
+       struct IO_APIC_route_entry entry;
+       unsigned int dest;
+
+       if (!IO_APIC_IRQ(irq))
+               return;
+
+       cfg = desc->chip_data;
+
+       if (assign_irq_vector(irq, cfg, apic->target_cpus()))
+               return;
+
+       dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
+
+       apic_printk(APIC_VERBOSE,KERN_DEBUG
+                   "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
+                   "IRQ %d Mode:%i Active:%i)\n",
+                   apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
+                   irq, trigger, polarity);
+
+
+       if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
+                              dest, trigger, polarity, cfg->vector)) {
+               printk("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
+                      mp_ioapics[apic_id].apicid, pin);
+               __clear_irq_vector(irq, cfg);
+               return;
+       }
+
+       ioapic_register_intr(irq, desc, trigger);
+       if (irq < NR_IRQS_LEGACY)
+               disable_8259A_irq(irq);
+
+       ioapic_write_entry(apic_id, pin, entry);
+}
+
+static void __init setup_IO_APIC_irqs(void)
+{
+       int apic_id, pin, idx, irq;
+       int notcon = 0;
+       struct irq_desc *desc;
+       struct irq_cfg *cfg;
+       int cpu = boot_cpu_id;
+
+       apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
+
+       for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
+               for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
+
+                       idx = find_irq_entry(apic_id, pin, mp_INT);
+                       if (idx == -1) {
+                               if (!notcon) {
+                                       notcon = 1;
+                                       apic_printk(APIC_VERBOSE,
+                                               KERN_DEBUG " %d-%d",
+                                               mp_ioapics[apic_id].apicid, pin);
+                               } else
+                                       apic_printk(APIC_VERBOSE, " %d-%d",
+                                               mp_ioapics[apic_id].apicid, pin);
+                               continue;
+                       }
+                       if (notcon) {
+                               apic_printk(APIC_VERBOSE,
+                                       " (apicid-pin) not connected\n");
+                               notcon = 0;
+                       }
+
+                       irq = pin_2_irq(idx, apic_id, pin);
+
+                       /*
+                        * Skip the timer IRQ if there's a quirk handler
+                        * installed and if it returns 1:
+                        */
+                       if (apic->multi_timer_check &&
+                                       apic->multi_timer_check(apic_id, irq))
+                               continue;
+
+                       desc = irq_to_desc_alloc_cpu(irq, cpu);
+                       if (!desc) {
+                               printk(KERN_INFO "can not get irq_desc for %d\n", irq);
+                               continue;
+                       }
+                       cfg = desc->chip_data;
+                       add_pin_to_irq_cpu(cfg, cpu, apic_id, pin);
+
+                       setup_IO_APIC_irq(apic_id, pin, irq, desc,
+                                       irq_trigger(idx), irq_polarity(idx));
+               }
+       }
+
+       if (notcon)
+               apic_printk(APIC_VERBOSE,
+                       " (apicid-pin) not connected\n");
+}
+
+/*
+ * Set up the timer pin, possibly with the 8259A-master behind.
+ */
+static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
+                                       int vector)
+{
+       struct IO_APIC_route_entry entry;
+
+#ifdef CONFIG_INTR_REMAP
+       if (intr_remapping_enabled)
+               return;
+#endif
+
+       memset(&entry, 0, sizeof(entry));
+
+       /*
+        * We use logical delivery to get the timer IRQ
+        * to the first CPU.
+        */
+       entry.dest_mode = apic->irq_dest_mode;
+       entry.mask = 0;                 /* don't mask IRQ for edge */
+       entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
+       entry.delivery_mode = apic->irq_delivery_mode;
+       entry.polarity = 0;
+       entry.trigger = 0;
+       entry.vector = vector;
+
+       /*
+        * The timer IRQ doesn't have to know that behind the
+        * scene we may have a 8259A-master in AEOI mode ...
+        */
+       set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
+
+       /*
+        * Add it to the IO-APIC irq-routing table:
+        */
+       ioapic_write_entry(apic_id, pin, entry);
+}
+
+
+__apicdebuginit(void) print_IO_APIC(void)
+{
+       int apic, i;
+       union IO_APIC_reg_00 reg_00;
+       union IO_APIC_reg_01 reg_01;
+       union IO_APIC_reg_02 reg_02;
+       union IO_APIC_reg_03 reg_03;
+       unsigned long flags;
+       struct irq_cfg *cfg;
+       struct irq_desc *desc;
+       unsigned int irq;
+
+       if (apic_verbosity == APIC_QUIET)
+               return;
+
+       printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
+       for (i = 0; i < nr_ioapics; i++)
+               printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
+                      mp_ioapics[i].apicid, nr_ioapic_registers[i]);
+
+       /*
+        * We are a bit conservative about what we expect.  We have to
+        * know about every hardware change ASAP.
+        */
+       printk(KERN_INFO "testing the IO APIC.......................\n");
+
+       for (apic = 0; apic < nr_ioapics; apic++) {
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       reg_00.raw = io_apic_read(apic, 0);
+       reg_01.raw = io_apic_read(apic, 1);
+       if (reg_01.bits.version >= 0x10)
+               reg_02.raw = io_apic_read(apic, 2);
+       if (reg_01.bits.version >= 0x20)
+               reg_03.raw = io_apic_read(apic, 3);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+
+       printk("\n");
+       printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
+       printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
+       printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
+       printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
+       printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);
+
+       printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
+       printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
+
+       printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
+       printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
+
+       /*
+        * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
+        * but the value of reg_02 is read as the previous read register
+        * value, so ignore it if reg_02 == reg_01.
+        */
+       if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
+               printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
+               printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
+       }
+
+       /*
+        * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
+        * or reg_03, but the value of reg_0[23] is read as the previous read
+        * register value, so ignore it if reg_03 == reg_0[12].
+        */
+       if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
+           reg_03.raw != reg_01.raw) {
+               printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
+               printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
+       }
+
+       printk(KERN_DEBUG ".... IRQ redirection table:\n");
+
+       printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
+                         " Stat Dmod Deli Vect:   \n");
+
+       for (i = 0; i <= reg_01.bits.entries; i++) {
+               struct IO_APIC_route_entry entry;
+
+               entry = ioapic_read_entry(apic, i);
+
+               printk(KERN_DEBUG " %02x %03X ",
+                       i,
+                       entry.dest
+               );
+
+               printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
+                       entry.mask,
+                       entry.trigger,
+                       entry.irr,
+                       entry.polarity,
+                       entry.delivery_status,
+                       entry.dest_mode,
+                       entry.delivery_mode,
+                       entry.vector
+               );
+       }
+       }
+       printk(KERN_DEBUG "IRQ to pin mappings:\n");
+       for_each_irq_desc(irq, desc) {
+               struct irq_pin_list *entry;
+
+               cfg = desc->chip_data;
+               entry = cfg->irq_2_pin;
+               if (!entry)
+                       continue;
+               printk(KERN_DEBUG "IRQ%d ", irq);
+               for (;;) {
+                       printk("-> %d:%d", entry->apic, entry->pin);
+                       if (!entry->next)
+                               break;
+                       entry = entry->next;
+               }
+               printk("\n");
+       }
+
+       printk(KERN_INFO ".................................... done.\n");
+
+       return;
+}
+
+__apicdebuginit(void) print_APIC_bitfield(int base)
+{
+       unsigned int v;
+       int i, j;
+
+       if (apic_verbosity == APIC_QUIET)
+               return;
+
+       printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
+       for (i = 0; i < 8; i++) {
+               v = apic_read(base + i*0x10);
+               for (j = 0; j < 32; j++) {
+                       if (v & (1<<j))
+                               printk("1");
+                       else
+                               printk("0");
+               }
+               printk("\n");
+       }
+}
+
+__apicdebuginit(void) print_local_APIC(void *dummy)
+{
+       unsigned int v, ver, maxlvt;
+       u64 icr;
+
+       if (apic_verbosity == APIC_QUIET)
+               return;
+
+       printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
+               smp_processor_id(), hard_smp_processor_id());
+       v = apic_read(APIC_ID);
+       printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, read_apic_id());
+       v = apic_read(APIC_LVR);
+       printk(KERN_INFO "... APIC VERSION: %08x\n", v);
+       ver = GET_APIC_VERSION(v);
+       maxlvt = lapic_get_maxlvt();
+
+       v = apic_read(APIC_TASKPRI);
+       printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
+
+       if (APIC_INTEGRATED(ver)) {                     /* !82489DX */
+               if (!APIC_XAPIC(ver)) {
+                       v = apic_read(APIC_ARBPRI);
+                       printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
+                              v & APIC_ARBPRI_MASK);
+               }
+               v = apic_read(APIC_PROCPRI);
+               printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
+       }
+
+       /*
+        * Remote read supported only in the 82489DX and local APIC for
+        * Pentium processors.
+        */
+       if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
+               v = apic_read(APIC_RRR);
+               printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
+       }
+
+       v = apic_read(APIC_LDR);
+       printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
+       if (!x2apic_enabled()) {
+               v = apic_read(APIC_DFR);
+               printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
+       }
+       v = apic_read(APIC_SPIV);
+       printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
+
+       printk(KERN_DEBUG "... APIC ISR field:\n");
+       print_APIC_bitfield(APIC_ISR);
+       printk(KERN_DEBUG "... APIC TMR field:\n");
+       print_APIC_bitfield(APIC_TMR);
+       printk(KERN_DEBUG "... APIC IRR field:\n");
+       print_APIC_bitfield(APIC_IRR);
+
+       if (APIC_INTEGRATED(ver)) {             /* !82489DX */
+               if (maxlvt > 3)         /* Due to the Pentium erratum 3AP. */
+                       apic_write(APIC_ESR, 0);
+
+               v = apic_read(APIC_ESR);
+               printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
+       }
+
+       icr = apic_icr_read();
+       printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
+       printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
+
+       v = apic_read(APIC_LVTT);
+       printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
+
+       if (maxlvt > 3) {                       /* PC is LVT#4. */
+               v = apic_read(APIC_LVTPC);
+               printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
+       }
+       v = apic_read(APIC_LVT0);
+       printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
+       v = apic_read(APIC_LVT1);
+       printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
+
+       if (maxlvt > 2) {                       /* ERR is LVT#3. */
+               v = apic_read(APIC_LVTERR);
+               printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
+       }
+
+       v = apic_read(APIC_TMICT);
+       printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
+       v = apic_read(APIC_TMCCT);
+       printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
+       v = apic_read(APIC_TDCR);
+       printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
+       printk("\n");
+}
+
+__apicdebuginit(void) print_all_local_APICs(void)
+{
+       int cpu;
+
+       preempt_disable();
+       for_each_online_cpu(cpu)
+               smp_call_function_single(cpu, print_local_APIC, NULL, 1);
+       preempt_enable();
+}
+
+__apicdebuginit(void) print_PIC(void)
+{
+       unsigned int v;
+       unsigned long flags;
+
+       if (apic_verbosity == APIC_QUIET)
+               return;
+
+       printk(KERN_DEBUG "\nprinting PIC contents\n");
+
+       spin_lock_irqsave(&i8259A_lock, flags);
+
+       v = inb(0xa1) << 8 | inb(0x21);
+       printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
+
+       v = inb(0xa0) << 8 | inb(0x20);
+       printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
+
+       outb(0x0b,0xa0);
+       outb(0x0b,0x20);
+       v = inb(0xa0) << 8 | inb(0x20);
+       outb(0x0a,0xa0);
+       outb(0x0a,0x20);
+
+       spin_unlock_irqrestore(&i8259A_lock, flags);
+
+       printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
+
+       v = inb(0x4d1) << 8 | inb(0x4d0);
+       printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
+}
+
+__apicdebuginit(int) print_all_ICs(void)
+{
+       print_PIC();
+       print_all_local_APICs();
+       print_IO_APIC();
+
+       return 0;
+}
+
+fs_initcall(print_all_ICs);
+
+
+/* Where if anywhere is the i8259 connect in external int mode */
+static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
+
+void __init enable_IO_APIC(void)
+{
+       union IO_APIC_reg_01 reg_01;
+       int i8259_apic, i8259_pin;
+       int apic;
+       unsigned long flags;
+
+       /*
+        * The number of IO-APIC IRQ registers (== #pins):
+        */
+       for (apic = 0; apic < nr_ioapics; apic++) {
+               spin_lock_irqsave(&ioapic_lock, flags);
+               reg_01.raw = io_apic_read(apic, 1);
+               spin_unlock_irqrestore(&ioapic_lock, flags);
+               nr_ioapic_registers[apic] = reg_01.bits.entries+1;
+       }
+       for(apic = 0; apic < nr_ioapics; apic++) {
+               int pin;
+               /* See if any of the pins is in ExtINT mode */
+               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+                       struct IO_APIC_route_entry entry;
+                       entry = ioapic_read_entry(apic, pin);
+
+                       /* If the interrupt line is enabled and in ExtInt mode
+                        * I have found the pin where the i8259 is connected.
+                        */
+                       if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
+                               ioapic_i8259.apic = apic;
+                               ioapic_i8259.pin  = pin;
+                               goto found_i8259;
+                       }
+               }
+       }
+ found_i8259:
+       /* Look to see what if the MP table has reported the ExtINT */
+       /* If we could not find the appropriate pin by looking at the ioapic
+        * the i8259 probably is not connected the ioapic but give the
+        * mptable a chance anyway.
+        */
+       i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
+       i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
+       /* Trust the MP table if nothing is setup in the hardware */
+       if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
+               printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
+               ioapic_i8259.pin  = i8259_pin;
+               ioapic_i8259.apic = i8259_apic;
+       }
+       /* Complain if the MP table and the hardware disagree */
+       if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
+               (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
+       {
+               printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
+       }
+
+       /*
+        * Do not trust the IO-APIC being empty at bootup
+        */
+       clear_IO_APIC();
+}
+
+/*
+ * Not an __init, needed by the reboot code
+ */
+void disable_IO_APIC(void)
+{
+       /*
+        * Clear the IO-APIC before rebooting:
+        */
+       clear_IO_APIC();
+
+       /*
+        * If the i8259 is routed through an IOAPIC
+        * Put that IOAPIC in virtual wire mode
+        * so legacy interrupts can be delivered.
+        */
+       if (ioapic_i8259.pin != -1) {
+               struct IO_APIC_route_entry entry;
+
+               memset(&entry, 0, sizeof(entry));
+               entry.mask            = 0; /* Enabled */
+               entry.trigger         = 0; /* Edge */
+               entry.irr             = 0;
+               entry.polarity        = 0; /* High */
+               entry.delivery_status = 0;
+               entry.dest_mode       = 0; /* Physical */
+               entry.delivery_mode   = dest_ExtINT; /* ExtInt */
+               entry.vector          = 0;
+               entry.dest            = read_apic_id();
+
+               /*
+                * Add it to the IO-APIC irq-routing table:
+                */
+               ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
+       }
+
+       disconnect_bsp_APIC(ioapic_i8259.pin != -1);
+}
+
+#ifdef CONFIG_X86_32
+/*
+ * function to set the IO-APIC physical IDs based on the
+ * values stored in the MPC table.
+ *
+ * by Matt Domsch <Matt_Domsch@dell.com>  Tue Dec 21 12:25:05 CST 1999
+ */
+
+static void __init setup_ioapic_ids_from_mpc(void)
+{
+       union IO_APIC_reg_00 reg_00;
+       physid_mask_t phys_id_present_map;
+       int apic_id;
+       int i;
+       unsigned char old_id;
+       unsigned long flags;
+
+       if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
+               return;
+
+       /*
+        * Don't check I/O APIC IDs for xAPIC systems.  They have
+        * no meaning without the serial APIC bus.
+        */
+       if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+               || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
+               return;
+       /*
+        * This is broken; anything with a real cpu count has to
+        * circumvent this idiocy regardless.
+        */
+       phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
+
+       /*
+        * Set the IOAPIC ID to the value stored in the MPC table.
+        */
+       for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
+
+               /* Read the register 0 value */
+               spin_lock_irqsave(&ioapic_lock, flags);
+               reg_00.raw = io_apic_read(apic_id, 0);
+               spin_unlock_irqrestore(&ioapic_lock, flags);
+
+               old_id = mp_ioapics[apic_id].apicid;
+
+               if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
+                       printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
+                               apic_id, mp_ioapics[apic_id].apicid);
+                       printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
+                               reg_00.bits.ID);
+                       mp_ioapics[apic_id].apicid = reg_00.bits.ID;
+               }
+
+               /*
+                * Sanity check, is the ID really free? Every APIC in a
+                * system must have a unique ID or we get lots of nice
+                * 'stuck on smp_invalidate_needed IPI wait' messages.
+                */
+               if (apic->check_apicid_used(phys_id_present_map,
+                                       mp_ioapics[apic_id].apicid)) {
+                       printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
+                               apic_id, mp_ioapics[apic_id].apicid);
+                       for (i = 0; i < get_physical_broadcast(); i++)
+                               if (!physid_isset(i, phys_id_present_map))
+                                       break;
+                       if (i >= get_physical_broadcast())
+                               panic("Max APIC ID exceeded!\n");
+                       printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
+                               i);
+                       physid_set(i, phys_id_present_map);
+                       mp_ioapics[apic_id].apicid = i;
+               } else {
+                       physid_mask_t tmp;
+                       tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid);
+                       apic_printk(APIC_VERBOSE, "Setting %d in the "
+                                       "phys_id_present_map\n",
+                                       mp_ioapics[apic_id].apicid);
+                       physids_or(phys_id_present_map, phys_id_present_map, tmp);
+               }
+
+
+               /*
+                * We need to adjust the IRQ routing table
+                * if the ID changed.
+                */
+               if (old_id != mp_ioapics[apic_id].apicid)
+                       for (i = 0; i < mp_irq_entries; i++)
+                               if (mp_irqs[i].dstapic == old_id)
+                                       mp_irqs[i].dstapic
+                                               = mp_ioapics[apic_id].apicid;
+
+               /*
+                * Read the right value from the MPC table and
+                * write it into the ID register.
+                */
+               apic_printk(APIC_VERBOSE, KERN_INFO
+                       "...changing IO-APIC physical APIC ID to %d ...",
+                       mp_ioapics[apic_id].apicid);
+
+               reg_00.bits.ID = mp_ioapics[apic_id].apicid;
+               spin_lock_irqsave(&ioapic_lock, flags);
+               io_apic_write(apic_id, 0, reg_00.raw);
+               spin_unlock_irqrestore(&ioapic_lock, flags);
+
+               /*
+                * Sanity check
+                */
+               spin_lock_irqsave(&ioapic_lock, flags);
+               reg_00.raw = io_apic_read(apic_id, 0);
+               spin_unlock_irqrestore(&ioapic_lock, flags);
+               if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
+                       printk("could not set ID!\n");
+               else
+                       apic_printk(APIC_VERBOSE, " ok.\n");
+       }
+}
+#endif
+
+int no_timer_check __initdata;
+
+static int __init notimercheck(char *s)
+{
+       no_timer_check = 1;
+       return 1;
+}
+__setup("no_timer_check", notimercheck);
+
+/*
+ * There is a nasty bug in some older SMP boards, their mptable lies
+ * about the timer IRQ. We do the following to work around the situation:
+ *
+ *     - timer IRQ defaults to IO-APIC IRQ
+ *     - if this function detects that timer IRQs are defunct, then we fall
+ *       back to ISA timer IRQs
+ */
+static int __init timer_irq_works(void)
+{
+       unsigned long t1 = jiffies;
+       unsigned long flags;
+
+       if (no_timer_check)
+               return 1;
+
+       local_save_flags(flags);
+       local_irq_enable();
+       /* Let ten ticks pass... */
+       mdelay((10 * 1000) / HZ);
+       local_irq_restore(flags);
+
+       /*
+        * Expect a few ticks at least, to be sure some possible
+        * glue logic does not lock up after one or two first
+        * ticks in a non-ExtINT mode.  Also the local APIC
+        * might have cached one ExtINT interrupt.  Finally, at
+        * least one tick may be lost due to delays.
+        */
+
+       /* jiffies wrap? */
+       if (time_after(jiffies, t1 + 4))
+               return 1;
+       return 0;
+}
+
+/*
+ * In the SMP+IOAPIC case it might happen that there are an unspecified
+ * number of pending IRQ events unhandled. These cases are very rare,
+ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
+ * better to do it this way as thus we do not have to be aware of
+ * 'pending' interrupts in the IRQ path, except at this point.
+ */
+/*
+ * Edge triggered needs to resend any interrupt
+ * that was delayed but this is now handled in the device
+ * independent code.
+ */
+
+/*
+ * Starting up a edge-triggered IO-APIC interrupt is
+ * nasty - we need to make sure that we get the edge.
+ * If it is already asserted for some reason, we need
+ * return 1 to indicate that is was pending.
+ *
+ * This is not complete - we should be able to fake
+ * an edge even if it isn't on the 8259A...
+ */
+
+static unsigned int startup_ioapic_irq(unsigned int irq)
+{
+       int was_pending = 0;
+       unsigned long flags;
+       struct irq_cfg *cfg;
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       if (irq < NR_IRQS_LEGACY) {
+               disable_8259A_irq(irq);
+               if (i8259A_irq_pending(irq))
+                       was_pending = 1;
+       }
+       cfg = irq_cfg(irq);
+       __unmask_IO_APIC_irq(cfg);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+
+       return was_pending;
+}
+
+#ifdef CONFIG_X86_64
+static int ioapic_retrigger_irq(unsigned int irq)
+{
+
+       struct irq_cfg *cfg = irq_cfg(irq);
+       unsigned long flags;
+
+       spin_lock_irqsave(&vector_lock, flags);
+       apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
+       spin_unlock_irqrestore(&vector_lock, flags);
+
+       return 1;
+}
+#else
+static int ioapic_retrigger_irq(unsigned int irq)
+{
+       apic->send_IPI_self(irq_cfg(irq)->vector);
+
+       return 1;
+}
+#endif
+
+/*
+ * Level and edge triggered IO-APIC interrupts need different handling,
+ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
+ * handled with the level-triggered descriptor, but that one has slightly
+ * more overhead. Level-triggered interrupts cannot be handled with the
+ * edge-triggered handler, without risking IRQ storms and other ugly
+ * races.
+ */
+
+#ifdef CONFIG_SMP
+
+#ifdef CONFIG_INTR_REMAP
+static void ir_irq_migration(struct work_struct *work);
+
+static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
+
+/*
+ * Migrate the IO-APIC irq in the presence of intr-remapping.
+ *
+ * For edge triggered, irq migration is a simple atomic update(of vector
+ * and cpu destination) of IRTE and flush the hardware cache.
+ *
+ * For level triggered, we need to modify the io-apic RTE aswell with the update
+ * vector information, along with modifying IRTE with vector and destination.
+ * So irq migration for level triggered is little  bit more complex compared to
+ * edge triggered migration. But the good news is, we use the same algorithm
+ * for level triggered migration as we have today, only difference being,
+ * we now initiate the irq migration from process context instead of the
+ * interrupt context.
+ *
+ * In future, when we do a directed EOI (combined with cpu EOI broadcast
+ * suppression) to the IO-APIC, level triggered irq migration will also be
+ * as simple as edge triggered migration and we can do the irq migration
+ * with a simple atomic update to IO-APIC RTE.
+ */
+static void
+migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
+{
+       struct irq_cfg *cfg;
+       struct irte irte;
+       int modify_ioapic_rte;
+       unsigned int dest;
+       unsigned long flags;
+       unsigned int irq;
+
+       if (!cpumask_intersects(mask, cpu_online_mask))
+               return;
+
+       irq = desc->irq;
+       if (get_irte(irq, &irte))
+               return;
+
+       cfg = desc->chip_data;
+       if (assign_irq_vector(irq, cfg, mask))
+               return;
+
+       set_extra_move_desc(desc, mask);
+
+       dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
+
+       modify_ioapic_rte = desc->status & IRQ_LEVEL;
+       if (modify_ioapic_rte) {
+               spin_lock_irqsave(&ioapic_lock, flags);
+               __target_IO_APIC_irq(irq, dest, cfg);
+               spin_unlock_irqrestore(&ioapic_lock, flags);
+       }
+
+       irte.vector = cfg->vector;
+       irte.dest_id = IRTE_DEST(dest);
+
+       /*
+        * Modified the IRTE and flushes the Interrupt entry cache.
+        */
+       modify_irte(irq, &irte);
+
+       if (cfg->move_in_progress)
+               send_cleanup_vector(cfg);
+
+       cpumask_copy(desc->affinity, mask);
+}
+
+static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
+{
+       int ret = -1;
+       struct irq_cfg *cfg = desc->chip_data;
+
+       mask_IO_APIC_irq_desc(desc);
+
+       if (io_apic_level_ack_pending(cfg)) {
+               /*
+                * Interrupt in progress. Migrating irq now will change the
+                * vector information in the IO-APIC RTE and that will confuse
+                * the EOI broadcast performed by cpu.
+                * So, delay the irq migration to the next instance.
+                */
+               schedule_delayed_work(&ir_migration_work, 1);
+               goto unmask;
+       }
+
+       /* everthing is clear. we have right of way */
+       migrate_ioapic_irq_desc(desc, desc->pending_mask);
+
+       ret = 0;
+       desc->status &= ~IRQ_MOVE_PENDING;
+       cpumask_clear(desc->pending_mask);
+
+unmask:
+       unmask_IO_APIC_irq_desc(desc);
+
+       return ret;
+}
+
+static void ir_irq_migration(struct work_struct *work)
+{
+       unsigned int irq;
+       struct irq_desc *desc;
+
+       for_each_irq_desc(irq, desc) {
+               if (desc->status & IRQ_MOVE_PENDING) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&desc->lock, flags);
+                       if (!desc->chip->set_affinity ||
+                           !(desc->status & IRQ_MOVE_PENDING)) {
+                               desc->status &= ~IRQ_MOVE_PENDING;
+                               spin_unlock_irqrestore(&desc->lock, flags);
+                               continue;
+                       }
+
+                       desc->chip->set_affinity(irq, desc->pending_mask);
+                       spin_unlock_irqrestore(&desc->lock, flags);
+               }
+       }
+}
+
+/*
+ * Migrates the IRQ destination in the process context.
+ */
+static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
+                                           const struct cpumask *mask)
+{
+       if (desc->status & IRQ_LEVEL) {
+               desc->status |= IRQ_MOVE_PENDING;
+               cpumask_copy(desc->pending_mask, mask);
+               migrate_irq_remapped_level_desc(desc);
+               return;
+       }
+
+       migrate_ioapic_irq_desc(desc, mask);
+}
+static void set_ir_ioapic_affinity_irq(unsigned int irq,
+                                      const struct cpumask *mask)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       set_ir_ioapic_affinity_irq_desc(desc, mask);
+}
+#endif
+
+asmlinkage void smp_irq_move_cleanup_interrupt(void)
+{
+       unsigned vector, me;
+
+       ack_APIC_irq();
+       exit_idle();
+       irq_enter();
+
+       me = smp_processor_id();
+       for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+               unsigned int irq;
+               struct irq_desc *desc;
+               struct irq_cfg *cfg;
+               irq = __get_cpu_var(vector_irq)[vector];
+
+               if (irq == -1)
+                       continue;
+
+               desc = irq_to_desc(irq);
+               if (!desc)
+                       continue;
+
+               cfg = irq_cfg(irq);
+               spin_lock(&desc->lock);
+               if (!cfg->move_cleanup_count)
+                       goto unlock;
+
+               if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+                       goto unlock;
+
+               __get_cpu_var(vector_irq)[vector] = -1;
+               cfg->move_cleanup_count--;
+unlock:
+               spin_unlock(&desc->lock);
+       }
+
+       irq_exit();
+}
+
+static void irq_complete_move(struct irq_desc **descp)
+{
+       struct irq_desc *desc = *descp;
+       struct irq_cfg *cfg = desc->chip_data;
+       unsigned vector, me;
+
+       if (likely(!cfg->move_in_progress)) {
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+               if (likely(!cfg->move_desc_pending))
+                       return;
+
+               /* domain has not changed, but affinity did */
+               me = smp_processor_id();
+               if (cpumask_test_cpu(me, desc->affinity)) {
+                       *descp = desc = move_irq_desc(desc, me);
+                       /* get the new one */
+                       cfg = desc->chip_data;
+                       cfg->move_desc_pending = 0;
+               }
+#endif
+               return;
+       }
+
+       vector = ~get_irq_regs()->orig_ax;
+       me = smp_processor_id();
+
+       if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
+#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
+               *descp = desc = move_irq_desc(desc, me);
+               /* get the new one */
+               cfg = desc->chip_data;
+#endif
+               send_cleanup_vector(cfg);
+       }
+}
+#else
+static inline void irq_complete_move(struct irq_desc **descp) {}
+#endif
+
+#ifdef CONFIG_INTR_REMAP
+static void ack_x2apic_level(unsigned int irq)
+{
+       ack_x2APIC_irq();
+}
+
+static void ack_x2apic_edge(unsigned int irq)
+{
+       ack_x2APIC_irq();
+}
+
+#endif
+
+static void ack_apic_edge(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+       irq_complete_move(&desc);
+       move_native_irq(irq);
+       ack_APIC_irq();
+}
+
+atomic_t irq_mis_count;
+
+static void ack_apic_level(unsigned int irq)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+
+#ifdef CONFIG_X86_32
+       unsigned long v;
+       int i;
+#endif
+       struct irq_cfg *cfg;
+       int do_unmask_irq = 0;
+
+       irq_complete_move(&desc);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       /* If we are moving the irq we need to mask it */
+       if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
+               do_unmask_irq = 1;
+               mask_IO_APIC_irq_desc(desc);
+       }
+#endif
+
+#ifdef CONFIG_X86_32
+       /*
+       * It appears there is an erratum which affects at least version 0x11
+       * of I/O APIC (that's the 82093AA and cores integrated into various
+       * chipsets).  Under certain conditions a level-triggered interrupt is
+       * erroneously delivered as edge-triggered one but the respective IRR
+       * bit gets set nevertheless.  As a result the I/O unit expects an EOI
+       * message but it will never arrive and further interrupts are blocked
+       * from the source.  The exact reason is so far unknown, but the
+       * phenomenon was observed when two consecutive interrupt requests
+       * from a given source get delivered to the same CPU and the source is
+       * temporarily disabled in between.
+       *
+       * A workaround is to simulate an EOI message manually.  We achieve it
+       * by setting the trigger mode to edge and then to level when the edge
+       * trigger mode gets detected in the TMR of a local APIC for a
+       * level-triggered interrupt.  We mask the source for the time of the
+       * operation to prevent an edge-triggered interrupt escaping meanwhile.
+       * The idea is from Manfred Spraul.  --macro
+       */
+       cfg = desc->chip_data;
+       i = cfg->vector;
+
+       v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
+#endif
+
+       /*
+        * We must acknowledge the irq before we move it or the acknowledge will
+        * not propagate properly.
+        */
+       ack_APIC_irq();
+
+       /* Now we can move and renable the irq */
+       if (unlikely(do_unmask_irq)) {
+               /* Only migrate the irq if the ack has been received.
+                *
+                * On rare occasions the broadcast level triggered ack gets
+                * delayed going to ioapics, and if we reprogram the
+                * vector while Remote IRR is still set the irq will never
+                * fire again.
+                *
+                * To prevent this scenario we read the Remote IRR bit
+                * of the ioapic.  This has two effects.
+                * - On any sane system the read of the ioapic will
+                *   flush writes (and acks) going to the ioapic from
+                *   this cpu.
+                * - We get to see if the ACK has actually been delivered.
+                *
+                * Based on failed experiments of reprogramming the
+                * ioapic entry from outside of irq context starting
+                * with masking the ioapic entry and then polling until
+                * Remote IRR was clear before reprogramming the
+                * ioapic I don't trust the Remote IRR bit to be
+                * completey accurate.
+                *
+                * However there appears to be no other way to plug
+                * this race, so if the Remote IRR bit is not
+                * accurate and is causing problems then it is a hardware bug
+                * and you can go talk to the chipset vendor about it.
+                */
+               cfg = desc->chip_data;
+               if (!io_apic_level_ack_pending(cfg))
+                       move_masked_irq(irq);
+               unmask_IO_APIC_irq_desc(desc);
+       }
+
+#ifdef CONFIG_X86_32
+       if (!(v & (1 << (i & 0x1f)))) {
+               atomic_inc(&irq_mis_count);
+               spin_lock(&ioapic_lock);
+               __mask_and_edge_IO_APIC_irq(cfg);
+               __unmask_and_level_IO_APIC_irq(cfg);
+               spin_unlock(&ioapic_lock);
+       }
+#endif
+}
+
+static struct irq_chip ioapic_chip __read_mostly = {
+       .name           = "IO-APIC",
+       .startup        = startup_ioapic_irq,
+       .mask           = mask_IO_APIC_irq,
+       .unmask         = unmask_IO_APIC_irq,
+       .ack            = ack_apic_edge,
+       .eoi            = ack_apic_level,
+#ifdef CONFIG_SMP
+       .set_affinity   = set_ioapic_affinity_irq,
+#endif
+       .retrigger      = ioapic_retrigger_irq,
+};
+
+#ifdef CONFIG_INTR_REMAP
+static struct irq_chip ir_ioapic_chip __read_mostly = {
+       .name           = "IR-IO-APIC",
+       .startup        = startup_ioapic_irq,
+       .mask           = mask_IO_APIC_irq,
+       .unmask         = unmask_IO_APIC_irq,
+       .ack            = ack_x2apic_edge,
+       .eoi            = ack_x2apic_level,
+#ifdef CONFIG_SMP
+       .set_affinity   = set_ir_ioapic_affinity_irq,
+#endif
+       .retrigger      = ioapic_retrigger_irq,
+};
+#endif
+
+static inline void init_IO_APIC_traps(void)
+{
+       int irq;
+       struct irq_desc *desc;
+       struct irq_cfg *cfg;
+
+       /*
+        * NOTE! The local APIC isn't very good at handling
+        * multiple interrupts at the same interrupt level.
+        * As the interrupt level is determined by taking the
+        * vector number and shifting that right by 4, we
+        * want to spread these out a bit so that they don't
+        * all fall in the same interrupt level.
+        *
+        * Also, we've got to be careful not to trash gate
+        * 0x80, because int 0x80 is hm, kind of importantish. ;)
+        */
+       for_each_irq_desc(irq, desc) {
+               cfg = desc->chip_data;
+               if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
+                       /*
+                        * Hmm.. We don't have an entry for this,
+                        * so default to an old-fashioned 8259
+                        * interrupt if we can..
+                        */
+                       if (irq < NR_IRQS_LEGACY)
+                               make_8259A_irq(irq);
+                       else
+                               /* Strange. Oh, well.. */
+                               desc->chip = &no_irq_chip;
+               }
+       }
+}
+
+/*
+ * The local APIC irq-chip implementation:
+ */
+
+static void mask_lapic_irq(unsigned int irq)
+{
+       unsigned long v;
+
+       v = apic_read(APIC_LVT0);
+       apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
+}
+
+static void unmask_lapic_irq(unsigned int irq)
+{
+       unsigned long v;
+
+       v = apic_read(APIC_LVT0);
+       apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
+}
+
+static void ack_lapic_irq(unsigned int irq)
+{
+       ack_APIC_irq();
+}
+
+static struct irq_chip lapic_chip __read_mostly = {
+       .name           = "local-APIC",
+       .mask           = mask_lapic_irq,
+       .unmask         = unmask_lapic_irq,
+       .ack            = ack_lapic_irq,
+};
+
+static void lapic_register_intr(int irq, struct irq_desc *desc)
+{
+       desc->status &= ~IRQ_LEVEL;
+       set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
+                                     "edge");
+}
+
+static void __init setup_nmi(void)
+{
+       /*
+        * Dirty trick to enable the NMI watchdog ...
+        * We put the 8259A master into AEOI mode and
+        * unmask on all local APICs LVT0 as NMI.
+        *
+        * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
+        * is from Maciej W. Rozycki - so we do not have to EOI from
+        * the NMI handler or the timer interrupt.
+        */
+       apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
+
+       enable_NMI_through_LVT0();
+
+       apic_printk(APIC_VERBOSE, " done.\n");
+}
+
+/*
+ * This looks a bit hackish but it's about the only one way of sending
+ * a few INTA cycles to 8259As and any associated glue logic.  ICR does
+ * not support the ExtINT mode, unfortunately.  We need to send these
+ * cycles as some i82489DX-based boards have glue logic that keeps the
+ * 8259A interrupt line asserted until INTA.  --macro
+ */
+static inline void __init unlock_ExtINT_logic(void)
+{
+       int apic, pin, i;
+       struct IO_APIC_route_entry entry0, entry1;
+       unsigned char save_control, save_freq_select;
+
+       pin  = find_isa_irq_pin(8, mp_INT);
+       if (pin == -1) {
+               WARN_ON_ONCE(1);
+               return;
+       }
+       apic = find_isa_irq_apic(8, mp_INT);
+       if (apic == -1) {
+               WARN_ON_ONCE(1);
+               return;
+       }
+
+       entry0 = ioapic_read_entry(apic, pin);
+       clear_IO_APIC_pin(apic, pin);
+
+       memset(&entry1, 0, sizeof(entry1));
+
+       entry1.dest_mode = 0;                   /* physical delivery */
+       entry1.mask = 0;                        /* unmask IRQ now */
+       entry1.dest = hard_smp_processor_id();
+       entry1.delivery_mode = dest_ExtINT;
+       entry1.polarity = entry0.polarity;
+       entry1.trigger = 0;
+       entry1.vector = 0;
+
+       ioapic_write_entry(apic, pin, entry1);
+
+       save_control = CMOS_READ(RTC_CONTROL);
+       save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+       CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
+                  RTC_FREQ_SELECT);
+       CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
+
+       i = 100;
+       while (i-- > 0) {
+               mdelay(10);
+               if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
+                       i -= 10;
+       }
+
+       CMOS_WRITE(save_control, RTC_CONTROL);
+       CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+       clear_IO_APIC_pin(apic, pin);
+
+       ioapic_write_entry(apic, pin, entry0);
+}
+
+static int disable_timer_pin_1 __initdata;
+/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
+static int __init disable_timer_pin_setup(char *arg)
+{
+       disable_timer_pin_1 = 1;
+       return 0;
+}
+early_param("disable_timer_pin_1", disable_timer_pin_setup);
+
+int timer_through_8259 __initdata;
+
+/*
+ * This code may look a bit paranoid, but it's supposed to cooperate with
+ * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
+ * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
+ * fanatically on his truly buggy board.
+ *
+ * FIXME: really need to revamp this for all platforms.
+ */
+static inline void __init check_timer(void)
+{
+       struct irq_desc *desc = irq_to_desc(0);
+       struct irq_cfg *cfg = desc->chip_data;
+       int cpu = boot_cpu_id;
+       int apic1, pin1, apic2, pin2;
+       unsigned long flags;
+       int no_pin1 = 0;
+
+       local_irq_save(flags);
+
+       /*
+        * get/set the timer IRQ vector:
+        */
+       disable_8259A_irq(0);
+       assign_irq_vector(0, cfg, apic->target_cpus());
+
+       /*
+        * As IRQ0 is to be enabled in the 8259A, the virtual
+        * wire has to be disabled in the local APIC.  Also
+        * timer interrupts need to be acknowledged manually in
+        * the 8259A for the i82489DX when using the NMI
+        * watchdog as that APIC treats NMIs as level-triggered.
+        * The AEOI mode will finish them in the 8259A
+        * automatically.
+        */
+       apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
+       init_8259A(1);
+#ifdef CONFIG_X86_32
+       {
+               unsigned int ver;
+
+               ver = apic_read(APIC_LVR);
+               ver = GET_APIC_VERSION(ver);
+               timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
+       }
+#endif
+
+       pin1  = find_isa_irq_pin(0, mp_INT);
+       apic1 = find_isa_irq_apic(0, mp_INT);
+       pin2  = ioapic_i8259.pin;
+       apic2 = ioapic_i8259.apic;
+
+       apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
+                   "apic1=%d pin1=%d apic2=%d pin2=%d\n",
+                   cfg->vector, apic1, pin1, apic2, pin2);
+
+       /*
+        * Some BIOS writers are clueless and report the ExtINTA
+        * I/O APIC input from the cascaded 8259A as the timer
+        * interrupt input.  So just in case, if only one pin
+        * was found above, try it both directly and through the
+        * 8259A.
+        */
+       if (pin1 == -1) {
+#ifdef CONFIG_INTR_REMAP
+               if (intr_remapping_enabled)
+                       panic("BIOS bug: timer not connected to IO-APIC");
+#endif
+               pin1 = pin2;
+               apic1 = apic2;
+               no_pin1 = 1;
+       } else if (pin2 == -1) {
+               pin2 = pin1;
+               apic2 = apic1;
+       }
+
+       if (pin1 != -1) {
+               /*
+                * Ok, does IRQ0 through the IOAPIC work?
+                */
+               if (no_pin1) {
+                       add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
+                       setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
+               } else {
+                       /* for edge trigger, setup_IO_APIC_irq already
+                        * leave it unmasked.
+                        * so only need to unmask if it is level-trigger
+                        * do we really have level trigger timer?
+                        */
+                       int idx;
+                       idx = find_irq_entry(apic1, pin1, mp_INT);
+                       if (idx != -1 && irq_trigger(idx))
+                               unmask_IO_APIC_irq_desc(desc);
+               }
+               if (timer_irq_works()) {
+                       if (nmi_watchdog == NMI_IO_APIC) {
+                               setup_nmi();
+                               enable_8259A_irq(0);
+                       }
+                       if (disable_timer_pin_1 > 0)
+                               clear_IO_APIC_pin(0, pin1);
+                       goto out;
+               }
+#ifdef CONFIG_INTR_REMAP
+               if (intr_remapping_enabled)
+                       panic("timer doesn't work through Interrupt-remapped IO-APIC");
+#endif
+               local_irq_disable();
+               clear_IO_APIC_pin(apic1, pin1);
+               if (!no_pin1)
+                       apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
+                                   "8254 timer not connected to IO-APIC\n");
+
+               apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
+                           "(IRQ0) through the 8259A ...\n");
+               apic_printk(APIC_QUIET, KERN_INFO
+                           "..... (found apic %d pin %d) ...\n", apic2, pin2);
+               /*
+                * legacy devices should be connected to IO APIC #0
+                */
+               replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
+               setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
+               enable_8259A_irq(0);
+               if (timer_irq_works()) {
+                       apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
+                       timer_through_8259 = 1;
+                       if (nmi_watchdog == NMI_IO_APIC) {
+                               disable_8259A_irq(0);
+                               setup_nmi();
+                               enable_8259A_irq(0);
+                       }
+                       goto out;
+               }
+               /*
+                * Cleanup, just in case ...
+                */
+               local_irq_disable();
+               disable_8259A_irq(0);
+               clear_IO_APIC_pin(apic2, pin2);
+               apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
+       }
+
+       if (nmi_watchdog == NMI_IO_APIC) {
+               apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
+                           "through the IO-APIC - disabling NMI Watchdog!\n");
+               nmi_watchdog = NMI_NONE;
+       }
+#ifdef CONFIG_X86_32
+       timer_ack = 0;
+#endif
+
+       apic_printk(APIC_QUIET, KERN_INFO
+                   "...trying to set up timer as Virtual Wire IRQ...\n");
+
+       lapic_register_intr(0, desc);
+       apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);     /* Fixed mode */
+       enable_8259A_irq(0);
+
+       if (timer_irq_works()) {
+               apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
+               goto out;
+       }
+       local_irq_disable();
+       disable_8259A_irq(0);
+       apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
+       apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
+
+       apic_printk(APIC_QUIET, KERN_INFO
+                   "...trying to set up timer as ExtINT IRQ...\n");
+
+       init_8259A(0);
+       make_8259A_irq(0);
+       apic_write(APIC_LVT0, APIC_DM_EXTINT);
+
+       unlock_ExtINT_logic();
+
+       if (timer_irq_works()) {
+               apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
+               goto out;
+       }
+       local_irq_disable();
+       apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
+       panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
+               "report.  Then try booting with the 'noapic' option.\n");
+out:
+       local_irq_restore(flags);
+}
+
+/*
+ * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
+ * to devices.  However there may be an I/O APIC pin available for
+ * this interrupt regardless.  The pin may be left unconnected, but
+ * typically it will be reused as an ExtINT cascade interrupt for
+ * the master 8259A.  In the MPS case such a pin will normally be
+ * reported as an ExtINT interrupt in the MP table.  With ACPI
+ * there is no provision for ExtINT interrupts, and in the absence
+ * of an override it would be treated as an ordinary ISA I/O APIC
+ * interrupt, that is edge-triggered and unmasked by default.  We
+ * used to do this, but it caused problems on some systems because
+ * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
+ * the same ExtINT cascade interrupt to drive the local APIC of the
+ * bootstrap processor.  Therefore we refrain from routing IRQ2 to
+ * the I/O APIC in all cases now.  No actual device should request
+ * it anyway.  --macro
+ */
+#define PIC_IRQS       (1 << PIC_CASCADE_IR)
+
+void __init setup_IO_APIC(void)
+{
+
+       /*
+        * calling enable_IO_APIC() is moved to setup_local_APIC for BP
+        */
+
+       io_apic_irqs = ~PIC_IRQS;
+
+       apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
+       /*
+         * Set up IO-APIC IRQ routing.
+         */
+#ifdef CONFIG_X86_32
+       if (!acpi_ioapic)
+               setup_ioapic_ids_from_mpc();
+#endif
+       sync_Arb_IDs();
+       setup_IO_APIC_irqs();
+       init_IO_APIC_traps();
+       check_timer();
+}
+
+/*
+ *      Called after all the initialization is done. If we didnt find any
+ *      APIC bugs then we can allow the modify fast path
+ */
+
+static int __init io_apic_bug_finalize(void)
+{
+       if (sis_apic_bug == -1)
+               sis_apic_bug = 0;
+       return 0;
+}
+
+late_initcall(io_apic_bug_finalize);
+
+struct sysfs_ioapic_data {
+       struct sys_device dev;
+       struct IO_APIC_route_entry entry[0];
+};
+static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
+
+static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
+{
+       struct IO_APIC_route_entry *entry;
+       struct sysfs_ioapic_data *data;
+       int i;
+
+       data = container_of(dev, struct sysfs_ioapic_data, dev);
+       entry = data->entry;
+       for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
+               *entry = ioapic_read_entry(dev->id, i);
+
+       return 0;
+}
+
+static int ioapic_resume(struct sys_device *dev)
+{
+       struct IO_APIC_route_entry *entry;
+       struct sysfs_ioapic_data *data;
+       unsigned long flags;
+       union IO_APIC_reg_00 reg_00;
+       int i;
+
+       data = container_of(dev, struct sysfs_ioapic_data, dev);
+       entry = data->entry;
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       reg_00.raw = io_apic_read(dev->id, 0);
+       if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
+               reg_00.bits.ID = mp_ioapics[dev->id].apicid;
+               io_apic_write(dev->id, 0, reg_00.raw);
+       }
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+       for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
+               ioapic_write_entry(dev->id, i, entry[i]);
+
+       return 0;
+}
+
+static struct sysdev_class ioapic_sysdev_class = {
+       .name = "ioapic",
+       .suspend = ioapic_suspend,
+       .resume = ioapic_resume,
+};
+
+static int __init ioapic_init_sysfs(void)
+{
+       struct sys_device * dev;
+       int i, size, error;
+
+       error = sysdev_class_register(&ioapic_sysdev_class);
+       if (error)
+               return error;
+
+       for (i = 0; i < nr_ioapics; i++ ) {
+               size = sizeof(struct sys_device) + nr_ioapic_registers[i]
+                       * sizeof(struct IO_APIC_route_entry);
+               mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
+               if (!mp_ioapic_data[i]) {
+                       printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
+                       continue;
+               }
+               dev = &mp_ioapic_data[i]->dev;
+               dev->id = i;
+               dev->cls = &ioapic_sysdev_class;
+               error = sysdev_register(dev);
+               if (error) {
+                       kfree(mp_ioapic_data[i]);
+                       mp_ioapic_data[i] = NULL;
+                       printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
+                       continue;
+               }
+       }
+
+       return 0;
+}
+
+device_initcall(ioapic_init_sysfs);
+
+static int nr_irqs_gsi = NR_IRQS_LEGACY;
+/*
+ * Dynamic irq allocate and deallocation
+ */
+unsigned int create_irq_nr(unsigned int irq_want)
+{
+       /* Allocate an unused irq */
+       unsigned int irq;
+       unsigned int new;
+       unsigned long flags;
+       struct irq_cfg *cfg_new = NULL;
+       int cpu = boot_cpu_id;
+       struct irq_desc *desc_new = NULL;
+
+       irq = 0;
+       if (irq_want < nr_irqs_gsi)
+               irq_want = nr_irqs_gsi;
+
+       spin_lock_irqsave(&vector_lock, flags);
+       for (new = irq_want; new < nr_irqs; new++) {
+               desc_new = irq_to_desc_alloc_cpu(new, cpu);
+               if (!desc_new) {
+                       printk(KERN_INFO "can not get irq_desc for %d\n", new);
+                       continue;
+               }
+               cfg_new = desc_new->chip_data;
+
+               if (cfg_new->vector != 0)
+                       continue;
+               if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
+                       irq = new;
+               break;
+       }
+       spin_unlock_irqrestore(&vector_lock, flags);
+
+       if (irq > 0) {
+               dynamic_irq_init(irq);
+               /* restore it, in case dynamic_irq_init clear it */
+               if (desc_new)
+                       desc_new->chip_data = cfg_new;
+       }
+       return irq;
+}
+
+int create_irq(void)
+{
+       unsigned int irq_want;
+       int irq;
+
+       irq_want = nr_irqs_gsi;
+       irq = create_irq_nr(irq_want);
+
+       if (irq == 0)
+               irq = -1;
+
+       return irq;
+}
+
+void destroy_irq(unsigned int irq)
+{
+       unsigned long flags;
+       struct irq_cfg *cfg;
+       struct irq_desc *desc;
+
+       /* store it, in case dynamic_irq_cleanup clear it */
+       desc = irq_to_desc(irq);
+       cfg = desc->chip_data;
+       dynamic_irq_cleanup(irq);
+       /* connect back irq_cfg */
+       if (desc)
+               desc->chip_data = cfg;
+
+#ifdef CONFIG_INTR_REMAP
+       free_irte(irq);
+#endif
+       spin_lock_irqsave(&vector_lock, flags);
+       __clear_irq_vector(irq, cfg);
+       spin_unlock_irqrestore(&vector_lock, flags);
+}
+
+/*
+ * MSI message composition
+ */
+#ifdef CONFIG_PCI_MSI
+static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
+{
+       struct irq_cfg *cfg;
+       int err;
+       unsigned dest;
+
+       if (disable_apic)
+               return -ENXIO;
+
+       cfg = irq_cfg(irq);
+       err = assign_irq_vector(irq, cfg, apic->target_cpus());
+       if (err)
+               return err;
+
+       dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
+
+#ifdef CONFIG_INTR_REMAP
+       if (irq_remapped(irq)) {
+               struct irte irte;
+               int ir_index;
+               u16 sub_handle;
+
+               ir_index = map_irq_to_irte_handle(irq, &sub_handle);
+               BUG_ON(ir_index == -1);
+
+               memset (&irte, 0, sizeof(irte));
+
+               irte.present = 1;
+               irte.dst_mode = apic->irq_dest_mode;
+               irte.trigger_mode = 0; /* edge */
+               irte.dlvry_mode = apic->irq_delivery_mode;
+               irte.vector = cfg->vector;
+               irte.dest_id = IRTE_DEST(dest);
+
+               modify_irte(irq, &irte);
+
+               msg->address_hi = MSI_ADDR_BASE_HI;
+               msg->data = sub_handle;
+               msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
+                                 MSI_ADDR_IR_SHV |
+                                 MSI_ADDR_IR_INDEX1(ir_index) |
+                                 MSI_ADDR_IR_INDEX2(ir_index);
+       } else
+#endif
+       {
+               msg->address_hi = MSI_ADDR_BASE_HI;
+               msg->address_lo =
+                       MSI_ADDR_BASE_LO |
+                       ((apic->irq_dest_mode == 0) ?
+                               MSI_ADDR_DEST_MODE_PHYSICAL:
+                               MSI_ADDR_DEST_MODE_LOGICAL) |
+                       ((apic->irq_delivery_mode != dest_LowestPrio) ?
+                               MSI_ADDR_REDIRECTION_CPU:
+                               MSI_ADDR_REDIRECTION_LOWPRI) |
+                       MSI_ADDR_DEST_ID(dest);
+
+               msg->data =
+                       MSI_DATA_TRIGGER_EDGE |
+                       MSI_DATA_LEVEL_ASSERT |
+                       ((apic->irq_delivery_mode != dest_LowestPrio) ?
+                               MSI_DATA_DELIVERY_FIXED:
+                               MSI_DATA_DELIVERY_LOWPRI) |
+                       MSI_DATA_VECTOR(cfg->vector);
+       }
+       return err;
+}
+
+#ifdef CONFIG_SMP
+static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_cfg *cfg;
+       struct msi_msg msg;
+       unsigned int dest;
+
+       dest = set_desc_affinity(desc, mask);
+       if (dest == BAD_APICID)
+               return;
+
+       cfg = desc->chip_data;
+
+       read_msi_msg_desc(desc, &msg);
+
+       msg.data &= ~MSI_DATA_VECTOR_MASK;
+       msg.data |= MSI_DATA_VECTOR(cfg->vector);
+       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+       write_msi_msg_desc(desc, &msg);
+}
+#ifdef CONFIG_INTR_REMAP
+/*
+ * Migrate the MSI irq to another cpumask. This migration is
+ * done in the process context using interrupt-remapping hardware.
+ */
+static void
+ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_cfg *cfg = desc->chip_data;
+       unsigned int dest;
+       struct irte irte;
+
+       if (get_irte(irq, &irte))
+               return;
+
+       dest = set_desc_affinity(desc, mask);
+       if (dest == BAD_APICID)
+               return;
+
+       irte.vector = cfg->vector;
+       irte.dest_id = IRTE_DEST(dest);
+
+       /*
+        * atomically update the IRTE with the new destination and vector.
+        */
+       modify_irte(irq, &irte);
+
+       /*
+        * After this point, all the interrupts will start arriving
+        * at the new destination. So, time to cleanup the previous
+        * vector allocation.
+        */
+       if (cfg->move_in_progress)
+               send_cleanup_vector(cfg);
+}
+
+#endif
+#endif /* CONFIG_SMP */
+
+/*
+ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
+ * which implement the MSI or MSI-X Capability Structure.
+ */
+static struct irq_chip msi_chip = {
+       .name           = "PCI-MSI",
+       .unmask         = unmask_msi_irq,
+       .mask           = mask_msi_irq,
+       .ack            = ack_apic_edge,
+#ifdef CONFIG_SMP
+       .set_affinity   = set_msi_irq_affinity,
+#endif
+       .retrigger      = ioapic_retrigger_irq,
+};
+
+#ifdef CONFIG_INTR_REMAP
+static struct irq_chip msi_ir_chip = {
+       .name           = "IR-PCI-MSI",
+       .unmask         = unmask_msi_irq,
+       .mask           = mask_msi_irq,
+       .ack            = ack_x2apic_edge,
+#ifdef CONFIG_SMP
+       .set_affinity   = ir_set_msi_irq_affinity,
+#endif
+       .retrigger      = ioapic_retrigger_irq,
+};
+
+/*
+ * Map the PCI dev to the corresponding remapping hardware unit
+ * and allocate 'nvec' consecutive interrupt-remapping table entries
+ * in it.
+ */
+static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
+{
+       struct intel_iommu *iommu;
+       int index;
+
+       iommu = map_dev_to_ir(dev);
+       if (!iommu) {
+               printk(KERN_ERR
+                      "Unable to map PCI %s to iommu\n", pci_name(dev));
+               return -ENOENT;
+       }
+
+       index = alloc_irte(iommu, irq, nvec);
+       if (index < 0) {
+               printk(KERN_ERR
+                      "Unable to allocate %d IRTE for PCI %s\n", nvec,
+                      pci_name(dev));
+               return -ENOSPC;
+       }
+       return index;
+}
+#endif
+
+static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
+{
+       int ret;
+       struct msi_msg msg;
+
+       ret = msi_compose_msg(dev, irq, &msg);
+       if (ret < 0)
+               return ret;
+
+       set_irq_msi(irq, msidesc);
+       write_msi_msg(irq, &msg);
+
+#ifdef CONFIG_INTR_REMAP
+       if (irq_remapped(irq)) {
+               struct irq_desc *desc = irq_to_desc(irq);
+               /*
+                * irq migration in process context
+                */
+               desc->status |= IRQ_MOVE_PCNTXT;
+               set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
+       } else
+#endif
+               set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
+
+       dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
+
+       return 0;
+}
+
+int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+       unsigned int irq;
+       int ret, sub_handle;
+       struct msi_desc *msidesc;
+       unsigned int irq_want;
+
+#ifdef CONFIG_INTR_REMAP
+       struct intel_iommu *iommu = 0;
+       int index = 0;
+#endif
+
+       irq_want = nr_irqs_gsi;
+       sub_handle = 0;
+       list_for_each_entry(msidesc, &dev->msi_list, list) {
+               irq = create_irq_nr(irq_want);
+               if (irq == 0)
+                       return -1;
+               irq_want = irq + 1;
+#ifdef CONFIG_INTR_REMAP
+               if (!intr_remapping_enabled)
+                       goto no_ir;
+
+               if (!sub_handle) {
+                       /*
+                        * allocate the consecutive block of IRTE's
+                        * for 'nvec'
+                        */
+                       index = msi_alloc_irte(dev, irq, nvec);
+                       if (index < 0) {
+                               ret = index;
+                               goto error;
+                       }
+               } else {
+                       iommu = map_dev_to_ir(dev);
+                       if (!iommu) {
+                               ret = -ENOENT;
+                               goto error;
+                       }
+                       /*
+                        * setup the mapping between the irq and the IRTE
+                        * base index, the sub_handle pointing to the
+                        * appropriate interrupt remap table entry.
+                        */
+                       set_irte_irq(irq, iommu, index, sub_handle);
+               }
+no_ir:
+#endif
+               ret = setup_msi_irq(dev, msidesc, irq);
+               if (ret < 0)
+                       goto error;
+               sub_handle++;
+       }
+       return 0;
+
+error:
+       destroy_irq(irq);
+       return ret;
+}
+
+void arch_teardown_msi_irq(unsigned int irq)
+{
+       destroy_irq(irq);
+}
+
+#ifdef CONFIG_DMAR
+#ifdef CONFIG_SMP
+static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_cfg *cfg;
+       struct msi_msg msg;
+       unsigned int dest;
+
+       dest = set_desc_affinity(desc, mask);
+       if (dest == BAD_APICID)
+               return;
+
+       cfg = desc->chip_data;
+
+       dmar_msi_read(irq, &msg);
+
+       msg.data &= ~MSI_DATA_VECTOR_MASK;
+       msg.data |= MSI_DATA_VECTOR(cfg->vector);
+       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+       dmar_msi_write(irq, &msg);
+}
+
+#endif /* CONFIG_SMP */
+
+struct irq_chip dmar_msi_type = {
+       .name = "DMAR_MSI",
+       .unmask = dmar_msi_unmask,
+       .mask = dmar_msi_mask,
+       .ack = ack_apic_edge,
+#ifdef CONFIG_SMP
+       .set_affinity = dmar_msi_set_affinity,
+#endif
+       .retrigger = ioapic_retrigger_irq,
+};
+
+int arch_setup_dmar_msi(unsigned int irq)
+{
+       int ret;
+       struct msi_msg msg;
+
+       ret = msi_compose_msg(NULL, irq, &msg);
+       if (ret < 0)
+               return ret;
+       dmar_msi_write(irq, &msg);
+       set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
+               "edge");
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_HPET_TIMER
+
+#ifdef CONFIG_SMP
+static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_cfg *cfg;
+       struct msi_msg msg;
+       unsigned int dest;
+
+       dest = set_desc_affinity(desc, mask);
+       if (dest == BAD_APICID)
+               return;
+
+       cfg = desc->chip_data;
+
+       hpet_msi_read(irq, &msg);
+
+       msg.data &= ~MSI_DATA_VECTOR_MASK;
+       msg.data |= MSI_DATA_VECTOR(cfg->vector);
+       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+       hpet_msi_write(irq, &msg);
+}
+
+#endif /* CONFIG_SMP */
+
+struct irq_chip hpet_msi_type = {
+       .name = "HPET_MSI",
+       .unmask = hpet_msi_unmask,
+       .mask = hpet_msi_mask,
+       .ack = ack_apic_edge,
+#ifdef CONFIG_SMP
+       .set_affinity = hpet_msi_set_affinity,
+#endif
+       .retrigger = ioapic_retrigger_irq,
+};
+
+int arch_setup_hpet_msi(unsigned int irq)
+{
+       int ret;
+       struct msi_msg msg;
+
+       ret = msi_compose_msg(NULL, irq, &msg);
+       if (ret < 0)
+               return ret;
+
+       hpet_msi_write(irq, &msg);
+       set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
+               "edge");
+
+       return 0;
+}
+#endif
+
+#endif /* CONFIG_PCI_MSI */
+/*
+ * Hypertransport interrupt support
+ */
+#ifdef CONFIG_HT_IRQ
+
+#ifdef CONFIG_SMP
+
+static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
+{
+       struct ht_irq_msg msg;
+       fetch_ht_irq_msg(irq, &msg);
+
+       msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
+       msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
+
+       msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
+       msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
+
+       write_ht_irq_msg(irq, &msg);
+}
+
+static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
+{
+       struct irq_desc *desc = irq_to_desc(irq);
+       struct irq_cfg *cfg;
+       unsigned int dest;
+
+       dest = set_desc_affinity(desc, mask);
+       if (dest == BAD_APICID)
+               return;
+
+       cfg = desc->chip_data;
+
+       target_ht_irq(irq, dest, cfg->vector);
+}
+
+#endif
+
+static struct irq_chip ht_irq_chip = {
+       .name           = "PCI-HT",
+       .mask           = mask_ht_irq,
+       .unmask         = unmask_ht_irq,
+       .ack            = ack_apic_edge,
+#ifdef CONFIG_SMP
+       .set_affinity   = set_ht_irq_affinity,
+#endif
+       .retrigger      = ioapic_retrigger_irq,
+};
+
+int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
+{
+       struct irq_cfg *cfg;
+       int err;
+
+       if (disable_apic)
+               return -ENXIO;
+
+       cfg = irq_cfg(irq);
+       err = assign_irq_vector(irq, cfg, apic->target_cpus());
+       if (!err) {
+               struct ht_irq_msg msg;
+               unsigned dest;
+
+               dest = apic->cpu_mask_to_apicid_and(cfg->domain,
+                                                   apic->target_cpus());
+
+               msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
+
+               msg.address_lo =
+                       HT_IRQ_LOW_BASE |
+                       HT_IRQ_LOW_DEST_ID(dest) |
+                       HT_IRQ_LOW_VECTOR(cfg->vector) |
+                       ((apic->irq_dest_mode == 0) ?
+                               HT_IRQ_LOW_DM_PHYSICAL :
+                               HT_IRQ_LOW_DM_LOGICAL) |
+                       HT_IRQ_LOW_RQEOI_EDGE |
+                       ((apic->irq_delivery_mode != dest_LowestPrio) ?
+                               HT_IRQ_LOW_MT_FIXED :
+                               HT_IRQ_LOW_MT_ARBITRATED) |
+                       HT_IRQ_LOW_IRQ_MASKED;
+
+               write_ht_irq_msg(irq, &msg);
+
+               set_irq_chip_and_handler_name(irq, &ht_irq_chip,
+                                             handle_edge_irq, "edge");
+
+               dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
+       }
+       return err;
+}
+#endif /* CONFIG_HT_IRQ */
+
+#ifdef CONFIG_X86_UV
+/*
+ * Re-target the irq to the specified CPU and enable the specified MMR located
+ * on the specified blade to allow the sending of MSIs to the specified CPU.
+ */
+int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
+                      unsigned long mmr_offset)
+{
+       const struct cpumask *eligible_cpu = cpumask_of(cpu);
+       struct irq_cfg *cfg;
+       int mmr_pnode;
+       unsigned long mmr_value;
+       struct uv_IO_APIC_route_entry *entry;
+       unsigned long flags;
+       int err;
+
+       cfg = irq_cfg(irq);
+
+       err = assign_irq_vector(irq, cfg, eligible_cpu);
+       if (err != 0)
+               return err;
+
+       spin_lock_irqsave(&vector_lock, flags);
+       set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
+                                     irq_name);
+       spin_unlock_irqrestore(&vector_lock, flags);
+
+       mmr_value = 0;
+       entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+       BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
+
+       entry->vector = cfg->vector;
+       entry->delivery_mode = apic->irq_delivery_mode;
+       entry->dest_mode = apic->irq_dest_mode;
+       entry->polarity = 0;
+       entry->trigger = 0;
+       entry->mask = 0;
+       entry->dest = apic->cpu_mask_to_apicid(eligible_cpu);
+
+       mmr_pnode = uv_blade_to_pnode(mmr_blade);
+       uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+
+       return irq;
+}
+
+/*
+ * Disable the specified MMR located on the specified blade so that MSIs are
+ * longer allowed to be sent.
+ */
+void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
+{
+       unsigned long mmr_value;
+       struct uv_IO_APIC_route_entry *entry;
+       int mmr_pnode;
+
+       mmr_value = 0;
+       entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+       BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
+
+       entry->mask = 1;
+
+       mmr_pnode = uv_blade_to_pnode(mmr_blade);
+       uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+}
+#endif /* CONFIG_X86_64 */
+
+int __init io_apic_get_redir_entries (int ioapic)
+{
+       union IO_APIC_reg_01    reg_01;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       reg_01.raw = io_apic_read(ioapic, 1);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+
+       return reg_01.bits.entries;
+}
+
+void __init probe_nr_irqs_gsi(void)
+{
+       int nr = 0;
+
+       nr = acpi_probe_gsi();
+       if (nr > nr_irqs_gsi) {
+               nr_irqs_gsi = nr;
+       } else {
+               /* for acpi=off or acpi is not compiled in */
+               int idx;
+
+               nr = 0;
+               for (idx = 0; idx < nr_ioapics; idx++)
+                       nr += io_apic_get_redir_entries(idx) + 1;
+
+               if (nr > nr_irqs_gsi)
+                       nr_irqs_gsi = nr;
+       }
+
+       printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
+}
+
+#ifdef CONFIG_SPARSE_IRQ
+int __init arch_probe_nr_irqs(void)
+{
+       int nr;
+
+       if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
+               nr_irqs = NR_VECTORS * nr_cpu_ids;
+
+       nr = nr_irqs_gsi + 8 * nr_cpu_ids;
+#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
+       /*
+        * for MSI and HT dyn irq
+        */
+       nr += nr_irqs_gsi * 16;
+#endif
+       if (nr < nr_irqs)
+               nr_irqs = nr;
+
+       return 0;
+}
+#endif
+
+/* --------------------------------------------------------------------------
+                          ACPI-based IOAPIC Configuration
+   -------------------------------------------------------------------------- */
+
+#ifdef CONFIG_ACPI
+
+#ifdef CONFIG_X86_32
+int __init io_apic_get_unique_id(int ioapic, int apic_id)
+{
+       union IO_APIC_reg_00 reg_00;
+       static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
+       physid_mask_t tmp;
+       unsigned long flags;
+       int i = 0;
+
+       /*
+        * The P4 platform supports up to 256 APIC IDs on two separate APIC
+        * buses (one for LAPICs, one for IOAPICs), where predecessors only
+        * supports up to 16 on one shared APIC bus.
+        *
+        * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
+        *      advantage of new APIC bus architecture.
+        */
+
+       if (physids_empty(apic_id_map))
+               apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map);
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       reg_00.raw = io_apic_read(ioapic, 0);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+
+       if (apic_id >= get_physical_broadcast()) {
+               printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
+                       "%d\n", ioapic, apic_id, reg_00.bits.ID);
+               apic_id = reg_00.bits.ID;
+       }
+
+       /*
+        * Every APIC in a system must have a unique ID or we get lots of nice
+        * 'stuck on smp_invalidate_needed IPI wait' messages.
+        */
+       if (apic->check_apicid_used(apic_id_map, apic_id)) {
+
+               for (i = 0; i < get_physical_broadcast(); i++) {
+                       if (!apic->check_apicid_used(apic_id_map, i))
+                               break;
+               }
+
+               if (i == get_physical_broadcast())
+                       panic("Max apic_id exceeded!\n");
+
+               printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
+                       "trying %d\n", ioapic, apic_id, i);
+
+               apic_id = i;
+       }
+
+       tmp = apic->apicid_to_cpu_present(apic_id);
+       physids_or(apic_id_map, apic_id_map, tmp);
+
+       if (reg_00.bits.ID != apic_id) {
+               reg_00.bits.ID = apic_id;
+
+               spin_lock_irqsave(&ioapic_lock, flags);
+               io_apic_write(ioapic, 0, reg_00.raw);
+               reg_00.raw = io_apic_read(ioapic, 0);
+               spin_unlock_irqrestore(&ioapic_lock, flags);
+
+               /* Sanity check */
+               if (reg_00.bits.ID != apic_id) {
+                       printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
+                       return -1;
+               }
+       }
+
+       apic_printk(APIC_VERBOSE, KERN_INFO
+                       "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
+
+       return apic_id;
+}
+
+int __init io_apic_get_version(int ioapic)
+{
+       union IO_APIC_reg_01    reg_01;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ioapic_lock, flags);
+       reg_01.raw = io_apic_read(ioapic, 1);
+       spin_unlock_irqrestore(&ioapic_lock, flags);
+
+       return reg_01.bits.version;
+}
+#endif
+
+int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
+{
+       struct irq_desc *desc;
+       struct irq_cfg *cfg;
+       int cpu = boot_cpu_id;
+
+       if (!IO_APIC_IRQ(irq)) {
+               apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
+                       ioapic);
+               return -EINVAL;
+       }
+
+       desc = irq_to_desc_alloc_cpu(irq, cpu);
+       if (!desc) {
+               printk(KERN_INFO "can not get irq_desc %d\n", irq);
+               return 0;
+       }
+
+       /*
+        * IRQs < 16 are already in the irq_2_pin[] map
+        */
+       if (irq >= NR_IRQS_LEGACY) {
+               cfg = desc->chip_data;
+               add_pin_to_irq_cpu(cfg, cpu, ioapic, pin);
+       }
+
+       setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity);
+
+       return 0;
+}
+
+
+int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
+{
+       int i;
+
+       if (skip_ioapic_setup)
+               return -1;
+
+       for (i = 0; i < mp_irq_entries; i++)
+               if (mp_irqs[i].irqtype == mp_INT &&
+                   mp_irqs[i].srcbusirq == bus_irq)
+                       break;
+       if (i >= mp_irq_entries)
+               return -1;
+
+       *trigger = irq_trigger(i);
+       *polarity = irq_polarity(i);
+       return 0;
+}
+
+#endif /* CONFIG_ACPI */
+
+/*
+ * This function currently is only a helper for the i386 smp boot process where
+ * we need to reprogram the ioredtbls to cater for the cpus which have come online
+ * so mask in all cases should simply be apic->target_cpus()
+ */
+#ifdef CONFIG_SMP
+void __init setup_ioapic_dest(void)
+{
+       int pin, ioapic, irq, irq_entry;
+       struct irq_desc *desc;
+       struct irq_cfg *cfg;
+       const struct cpumask *mask;
+
+       if (skip_ioapic_setup == 1)
+               return;
+
+       for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
+               for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
+                       irq_entry = find_irq_entry(ioapic, pin, mp_INT);
+                       if (irq_entry == -1)
+                               continue;
+                       irq = pin_2_irq(irq_entry, ioapic, pin);
+
+                       /* setup_IO_APIC_irqs could fail to get vector for some device
+                        * when you have too many devices, because at that time only boot
+                        * cpu is online.
+                        */
+                       desc = irq_to_desc(irq);
+                       cfg = desc->chip_data;
+                       if (!cfg->vector) {
+                               setup_IO_APIC_irq(ioapic, pin, irq, desc,
+                                                 irq_trigger(irq_entry),
+                                                 irq_polarity(irq_entry));
+                               continue;
+
+                       }
+
+                       /*
+                        * Honour affinities which have been set in early boot
+                        */
+                       if (desc->status &
+                           (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
+                               mask = desc->affinity;
+                       else
+                               mask = apic->target_cpus();
+
+#ifdef CONFIG_INTR_REMAP
+                       if (intr_remapping_enabled)
+                               set_ir_ioapic_affinity_irq_desc(desc, mask);
+                       else
+#endif
+                               set_ioapic_affinity_irq_desc(desc, mask);
+               }
+
+       }
+}
+#endif
+
+#define IOAPIC_RESOURCE_NAME_SIZE 11
+
+static struct resource *ioapic_resources;
+
+static struct resource * __init ioapic_setup_resources(void)
+{
+       unsigned long n;
+       struct resource *res;
+       char *mem;
+       int i;
+
+       if (nr_ioapics <= 0)
+               return NULL;
+
+       n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
+       n *= nr_ioapics;
+
+       mem = alloc_bootmem(n);
+       res = (void *)mem;
+
+       if (mem != NULL) {
+               mem += sizeof(struct resource) * nr_ioapics;
+
+               for (i = 0; i < nr_ioapics; i++) {
+                       res[i].name = mem;
+                       res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+                       sprintf(mem,  "IOAPIC %u", i);
+                       mem += IOAPIC_RESOURCE_NAME_SIZE;
+               }
+       }
+
+       ioapic_resources = res;
+
+       return res;
+}
+
+void __init ioapic_init_mappings(void)
+{
+       unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
+       struct resource *ioapic_res;
+       int i;
+
+       ioapic_res = ioapic_setup_resources();
+       for (i = 0; i < nr_ioapics; i++) {
+               if (smp_found_config) {
+                       ioapic_phys = mp_ioapics[i].apicaddr;
+#ifdef CONFIG_X86_32
+                       if (!ioapic_phys) {
+                               printk(KERN_ERR
+                                      "WARNING: bogus zero IO-APIC "
+                                      "address found in MPTABLE, "
+                                      "disabling IO/APIC support!\n");
+                               smp_found_config = 0;
+                               skip_ioapic_setup = 1;
+                               goto fake_ioapic_page;
+                       }
+#endif
+               } else {
+#ifdef CONFIG_X86_32
+fake_ioapic_page:
+#endif
+                       ioapic_phys = (unsigned long)
+                               alloc_bootmem_pages(PAGE_SIZE);
+                       ioapic_phys = __pa(ioapic_phys);
+               }
+               set_fixmap_nocache(idx, ioapic_phys);
+               apic_printk(APIC_VERBOSE,
+                           "mapped IOAPIC to %08lx (%08lx)\n",
+                           __fix_to_virt(idx), ioapic_phys);
+               idx++;
+
+               if (ioapic_res != NULL) {
+                       ioapic_res->start = ioapic_phys;
+                       ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
+                       ioapic_res++;
+               }
+       }
+}
+
+static int __init ioapic_insert_resources(void)
+{
+       int i;
+       struct resource *r = ioapic_resources;
+
+       if (!r) {
+               printk(KERN_ERR
+                      "IO APIC resources could be not be allocated.\n");
+               return -1;
+       }
+
+       for (i = 0; i < nr_ioapics; i++) {
+               insert_resource(&iomem_resource, r);
+               r++;
+       }
+
+       return 0;
+}
+
+/* Insert the IO APIC resources after PCI initialization has occured to handle
+ * IO APICS that are mapped in on a BAR in PCI space. */
+late_initcall(ioapic_insert_resources);
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
new file mode 100644 (file)
index 0000000..dbf5445
--- /dev/null
@@ -0,0 +1,164 @@
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/kernel_stat.h>
+#include <linux/mc146818rtc.h>
+#include <linux/cache.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+
+#include <asm/smp.h>
+#include <asm/mtrr.h>
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/apic.h>
+#include <asm/proto.h>
+#include <asm/ipi.h>
+
+void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
+{
+       unsigned long query_cpu;
+       unsigned long flags;
+
+       /*
+        * Hack. The clustered APIC addressing mode doesn't allow us to send
+        * to an arbitrary mask, so I do a unicast to each CPU instead.
+        * - mbligh
+        */
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
+                               query_cpu), vector, APIC_DEST_PHYSICAL);
+       }
+       local_irq_restore(flags);
+}
+
+void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
+                                                int vector)
+{
+       unsigned int this_cpu = smp_processor_id();
+       unsigned int query_cpu;
+       unsigned long flags;
+
+       /* See Hack comment above */
+
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
+                                query_cpu), vector, APIC_DEST_PHYSICAL);
+       }
+       local_irq_restore(flags);
+}
+
+void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
+                                                int vector)
+{
+       unsigned long flags;
+       unsigned int query_cpu;
+
+       /*
+        * Hack. The clustered APIC addressing mode doesn't allow us to send
+        * to an arbitrary mask, so I do a unicasts to each CPU instead. This
+        * should be modified to do 1 message per cluster ID - mbligh
+        */
+
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask)
+               __default_send_IPI_dest_field(
+                       apic->cpu_to_logical_apicid(query_cpu), vector,
+                       apic->dest_logical);
+       local_irq_restore(flags);
+}
+
+void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
+                                                int vector)
+{
+       unsigned long flags;
+       unsigned int query_cpu;
+       unsigned int this_cpu = smp_processor_id();
+
+       /* See Hack comment above */
+
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __default_send_IPI_dest_field(
+                       apic->cpu_to_logical_apicid(query_cpu), vector,
+                       apic->dest_logical);
+               }
+       local_irq_restore(flags);
+}
+
+#ifdef CONFIG_X86_32
+
+/*
+ * This is only used on smaller machines.
+ */
+void default_send_IPI_mask_logical(const struct cpumask *cpumask, int vector)
+{
+       unsigned long mask = cpumask_bits(cpumask)[0];
+       unsigned long flags;
+
+       local_irq_save(flags);
+       WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
+       __default_send_IPI_dest_field(mask, vector, apic->dest_logical);
+       local_irq_restore(flags);
+}
+
+void default_send_IPI_allbutself(int vector)
+{
+       /*
+        * if there are no other CPUs in the system then we get an APIC send
+        * error if we try to broadcast, thus avoid sending IPIs in this case.
+        */
+       if (!(num_online_cpus() > 1))
+               return;
+
+       __default_local_send_IPI_allbutself(vector);
+}
+
+void default_send_IPI_all(int vector)
+{
+       __default_local_send_IPI_all(vector);
+}
+
+void default_send_IPI_self(int vector)
+{
+       __default_send_IPI_shortcut(APIC_DEST_SELF, vector, apic->dest_logical);
+}
+
+/* must come after the send_IPI functions above for inlining */
+static int convert_apicid_to_cpu(int apic_id)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
+                       return i;
+       }
+       return -1;
+}
+
+int safe_smp_processor_id(void)
+{
+       int apicid, cpuid;
+
+       if (!boot_cpu_has(X86_FEATURE_APIC))
+               return 0;
+
+       apicid = hard_smp_processor_id();
+       if (apicid == BAD_APICID)
+               return 0;
+
+       cpuid = convert_apicid_to_cpu(apicid);
+
+       return cpuid >= 0 ? cpuid : 0;
+}
+#endif
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
new file mode 100644 (file)
index 0000000..bdfad80
--- /dev/null
@@ -0,0 +1,564 @@
+/*
+ *  NMI watchdog support on APIC systems
+ *
+ *  Started by Ingo Molnar <mingo@redhat.com>
+ *
+ *  Fixes:
+ *  Mikael Pettersson  : AMD K7 support for local APIC NMI watchdog.
+ *  Mikael Pettersson  : Power Management for local APIC NMI watchdog.
+ *  Mikael Pettersson  : Pentium 4 support for local APIC NMI watchdog.
+ *  Pavel Machek and
+ *  Mikael Pettersson  : PM converted to driver model. Disable/enable API.
+ */
+
+#include <asm/apic.h>
+
+#include <linux/nmi.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/sysctl.h>
+#include <linux/percpu.h>
+#include <linux/kprobes.h>
+#include <linux/cpumask.h>
+#include <linux/kernel_stat.h>
+#include <linux/kdebug.h>
+#include <linux/smp.h>
+
+#include <asm/i8259.h>
+#include <asm/io_apic.h>
+#include <asm/proto.h>
+#include <asm/timer.h>
+
+#include <asm/mce.h>
+
+#include <asm/mach_traps.h>
+
+int unknown_nmi_panic;
+int nmi_watchdog_enabled;
+
+static cpumask_t backtrace_mask = CPU_MASK_NONE;
+
+/* nmi_active:
+ * >0: the lapic NMI watchdog is active, but can be disabled
+ * <0: the lapic NMI watchdog has not been set up, and cannot
+ *     be enabled
+ *  0: the lapic NMI watchdog is disabled, but can be enabled
+ */
+atomic_t nmi_active = ATOMIC_INIT(0);          /* oprofile uses this */
+EXPORT_SYMBOL(nmi_active);
+
+unsigned int nmi_watchdog = NMI_NONE;
+EXPORT_SYMBOL(nmi_watchdog);
+
+static int panic_on_timeout;
+
+static unsigned int nmi_hz = HZ;
+static DEFINE_PER_CPU(short, wd_enabled);
+static int endflag __initdata;
+
+static inline unsigned int get_nmi_count(int cpu)
+{
+       return per_cpu(irq_stat, cpu).__nmi_count;
+}
+
+static inline int mce_in_progress(void)
+{
+#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
+       return atomic_read(&mce_entry) > 0;
+#endif
+       return 0;
+}
+
+/*
+ * Take the local apic timer and PIT/HPET into account. We don't
+ * know which one is active, when we have highres/dyntick on
+ */
+static inline unsigned int get_timer_irqs(int cpu)
+{
+       return per_cpu(irq_stat, cpu).apic_timer_irqs +
+               per_cpu(irq_stat, cpu).irq0_irqs;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * The performance counters used by NMI_LOCAL_APIC don't trigger when
+ * the CPU is idle. To make sure the NMI watchdog really ticks on all
+ * CPUs during the test make them busy.
+ */
+static __init void nmi_cpu_busy(void *data)
+{
+       local_irq_enable_in_hardirq();
+       /*
+        * Intentionally don't use cpu_relax here. This is
+        * to make sure that the performance counter really ticks,
+        * even if there is a simulator or similar that catches the
+        * pause instruction. On a real HT machine this is fine because
+        * all other CPUs are busy with "useless" delay loops and don't
+        * care if they get somewhat less cycles.
+        */
+       while (endflag == 0)
+               mb();
+}
+#endif
+
+static void report_broken_nmi(int cpu, int *prev_nmi_count)
+{
+       printk(KERN_CONT "\n");
+
+       printk(KERN_WARNING
+               "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
+                       cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
+
+       printk(KERN_WARNING
+               "Please report this to bugzilla.kernel.org,\n");
+       printk(KERN_WARNING
+               "and attach the output of the 'dmesg' command.\n");
+
+       per_cpu(wd_enabled, cpu) = 0;
+       atomic_dec(&nmi_active);
+}
+
+static void __acpi_nmi_disable(void *__unused)
+{
+       apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
+}
+
+int __init check_nmi_watchdog(void)
+{
+       unsigned int *prev_nmi_count;
+       int cpu;
+
+       if (!nmi_watchdog_active() || !atomic_read(&nmi_active))
+               return 0;
+
+       prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
+       if (!prev_nmi_count)
+               goto error;
+
+       printk(KERN_INFO "Testing NMI watchdog ... ");
+
+#ifdef CONFIG_SMP
+       if (nmi_watchdog == NMI_LOCAL_APIC)
+               smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
+#endif
+
+       for_each_possible_cpu(cpu)
+               prev_nmi_count[cpu] = get_nmi_count(cpu);
+       local_irq_enable();
+       mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
+
+       for_each_online_cpu(cpu) {
+               if (!per_cpu(wd_enabled, cpu))
+                       continue;
+               if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
+                       report_broken_nmi(cpu, prev_nmi_count);
+       }
+       endflag = 1;
+       if (!atomic_read(&nmi_active)) {
+               kfree(prev_nmi_count);
+               atomic_set(&nmi_active, -1);
+               goto error;
+       }
+       printk("OK.\n");
+
+       /*
+        * now that we know it works we can reduce NMI frequency to
+        * something more reasonable; makes a difference in some configs
+        */
+       if (nmi_watchdog == NMI_LOCAL_APIC)
+               nmi_hz = lapic_adjust_nmi_hz(1);
+
+       kfree(prev_nmi_count);
+       return 0;
+error:
+       if (nmi_watchdog == NMI_IO_APIC) {
+               if (!timer_through_8259)
+                       disable_8259A_irq(0);
+               on_each_cpu(__acpi_nmi_disable, NULL, 1);
+       }
+
+#ifdef CONFIG_X86_32
+       timer_ack = 0;
+#endif
+       return -1;
+}
+
+static int __init setup_nmi_watchdog(char *str)
+{
+       unsigned int nmi;
+
+       if (!strncmp(str, "panic", 5)) {
+               panic_on_timeout = 1;
+               str = strchr(str, ',');
+               if (!str)
+                       return 1;
+               ++str;
+       }
+
+       if (!strncmp(str, "lapic", 5))
+               nmi_watchdog = NMI_LOCAL_APIC;
+       else if (!strncmp(str, "ioapic", 6))
+               nmi_watchdog = NMI_IO_APIC;
+       else {
+               get_option(&str, &nmi);
+               if (nmi >= NMI_INVALID)
+                       return 0;
+               nmi_watchdog = nmi;
+       }
+
+       return 1;
+}
+__setup("nmi_watchdog=", setup_nmi_watchdog);
+
+/*
+ * Suspend/resume support
+ */
+#ifdef CONFIG_PM
+
+static int nmi_pm_active; /* nmi_active before suspend */
+
+static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
+{
+       /* only CPU0 goes here, other CPUs should be offline */
+       nmi_pm_active = atomic_read(&nmi_active);
+       stop_apic_nmi_watchdog(NULL);
+       BUG_ON(atomic_read(&nmi_active) != 0);
+       return 0;
+}
+
+static int lapic_nmi_resume(struct sys_device *dev)
+{
+       /* only CPU0 goes here, other CPUs should be offline */
+       if (nmi_pm_active > 0) {
+               setup_apic_nmi_watchdog(NULL);
+               touch_nmi_watchdog();
+       }
+       return 0;
+}
+
+static struct sysdev_class nmi_sysclass = {
+       .name           = "lapic_nmi",
+       .resume         = lapic_nmi_resume,
+       .suspend        = lapic_nmi_suspend,
+};
+
+static struct sys_device device_lapic_nmi = {
+       .id     = 0,
+       .cls    = &nmi_sysclass,
+};
+
+static int __init init_lapic_nmi_sysfs(void)
+{
+       int error;
+
+       /*
+        * should really be a BUG_ON but b/c this is an
+        * init call, it just doesn't work.  -dcz
+        */
+       if (nmi_watchdog != NMI_LOCAL_APIC)
+               return 0;
+
+       if (atomic_read(&nmi_active) < 0)
+               return 0;
+
+       error = sysdev_class_register(&nmi_sysclass);
+       if (!error)
+               error = sysdev_register(&device_lapic_nmi);
+       return error;
+}
+
+/* must come after the local APIC's device_initcall() */
+late_initcall(init_lapic_nmi_sysfs);
+
+#endif /* CONFIG_PM */
+
+static void __acpi_nmi_enable(void *__unused)
+{
+       apic_write(APIC_LVT0, APIC_DM_NMI);
+}
+
+/*
+ * Enable timer based NMIs on all CPUs:
+ */
+void acpi_nmi_enable(void)
+{
+       if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
+               on_each_cpu(__acpi_nmi_enable, NULL, 1);
+}
+
+/*
+ * Disable timer based NMIs on all CPUs:
+ */
+void acpi_nmi_disable(void)
+{
+       if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
+               on_each_cpu(__acpi_nmi_disable, NULL, 1);
+}
+
+/*
+ * This function is called as soon the LAPIC NMI watchdog driver has everything
+ * in place and it's ready to check if the NMIs belong to the NMI watchdog
+ */
+void cpu_nmi_set_wd_enabled(void)
+{
+       __get_cpu_var(wd_enabled) = 1;
+}
+
+void setup_apic_nmi_watchdog(void *unused)
+{
+       if (__get_cpu_var(wd_enabled))
+               return;
+
+       /* cheap hack to support suspend/resume */
+       /* if cpu0 is not active neither should the other cpus */
+       if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0)
+               return;
+
+       switch (nmi_watchdog) {
+       case NMI_LOCAL_APIC:
+               if (lapic_watchdog_init(nmi_hz) < 0) {
+                       __get_cpu_var(wd_enabled) = 0;
+                       return;
+               }
+               /* FALL THROUGH */
+       case NMI_IO_APIC:
+               __get_cpu_var(wd_enabled) = 1;
+               atomic_inc(&nmi_active);
+       }
+}
+
+void stop_apic_nmi_watchdog(void *unused)
+{
+       /* only support LOCAL and IO APICs for now */
+       if (!nmi_watchdog_active())
+               return;
+       if (__get_cpu_var(wd_enabled) == 0)
+               return;
+       if (nmi_watchdog == NMI_LOCAL_APIC)
+               lapic_watchdog_stop();
+       else
+               __acpi_nmi_disable(NULL);
+       __get_cpu_var(wd_enabled) = 0;
+       atomic_dec(&nmi_active);
+}
+
+/*
+ * the best way to detect whether a CPU has a 'hard lockup' problem
+ * is to check it's local APIC timer IRQ counts. If they are not
+ * changing then that CPU has some problem.
+ *
+ * as these watchdog NMI IRQs are generated on every CPU, we only
+ * have to check the current processor.
+ *
+ * since NMIs don't listen to _any_ locks, we have to be extremely
+ * careful not to rely on unsafe variables. The printk might lock
+ * up though, so we have to break up any console locks first ...
+ * [when there will be more tty-related locks, break them up here too!]
+ */
+
+static DEFINE_PER_CPU(unsigned, last_irq_sum);
+static DEFINE_PER_CPU(local_t, alert_counter);
+static DEFINE_PER_CPU(int, nmi_touch);
+
+void touch_nmi_watchdog(void)
+{
+       if (nmi_watchdog_active()) {
+               unsigned cpu;
+
+               /*
+                * Tell other CPUs to reset their alert counters. We cannot
+                * do it ourselves because the alert count increase is not
+                * atomic.
+                */
+               for_each_present_cpu(cpu) {
+                       if (per_cpu(nmi_touch, cpu) != 1)
+                               per_cpu(nmi_touch, cpu) = 1;
+               }
+       }
+
+       /*
+        * Tickle the softlockup detector too:
+        */
+       touch_softlockup_watchdog();
+}
+EXPORT_SYMBOL(touch_nmi_watchdog);
+
+notrace __kprobes int
+nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
+{
+       /*
+        * Since current_thread_info()-> is always on the stack, and we
+        * always switch the stack NMI-atomically, it's safe to use
+        * smp_processor_id().
+        */
+       unsigned int sum;
+       int touched = 0;
+       int cpu = smp_processor_id();
+       int rc = 0;
+
+       /* check for other users first */
+       if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
+                       == NOTIFY_STOP) {
+               rc = 1;
+               touched = 1;
+       }
+
+       sum = get_timer_irqs(cpu);
+
+       if (__get_cpu_var(nmi_touch)) {
+               __get_cpu_var(nmi_touch) = 0;
+               touched = 1;
+       }
+
+       if (cpu_isset(cpu, backtrace_mask)) {
+               static DEFINE_SPINLOCK(lock);   /* Serialise the printks */
+
+               spin_lock(&lock);
+               printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
+               dump_stack();
+               spin_unlock(&lock);
+               cpu_clear(cpu, backtrace_mask);
+       }
+
+       /* Could check oops_in_progress here too, but it's safer not to */
+       if (mce_in_progress())
+               touched = 1;
+
+       /* if the none of the timers isn't firing, this cpu isn't doing much */
+       if (!touched && __get_cpu_var(last_irq_sum) == sum) {
+               /*
+                * Ayiee, looks like this CPU is stuck ...
+                * wait a few IRQs (5 seconds) before doing the oops ...
+                */
+               local_inc(&__get_cpu_var(alert_counter));
+               if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
+                       /*
+                        * die_nmi will return ONLY if NOTIFY_STOP happens..
+                        */
+                       die_nmi("BUG: NMI Watchdog detected LOCKUP",
+                               regs, panic_on_timeout);
+       } else {
+               __get_cpu_var(last_irq_sum) = sum;
+               local_set(&__get_cpu_var(alert_counter), 0);
+       }
+
+       /* see if the nmi watchdog went off */
+       if (!__get_cpu_var(wd_enabled))
+               return rc;
+       switch (nmi_watchdog) {
+       case NMI_LOCAL_APIC:
+               rc |= lapic_wd_event(nmi_hz);
+               break;
+       case NMI_IO_APIC:
+               /*
+                * don't know how to accurately check for this.
+                * just assume it was a watchdog timer interrupt
+                * This matches the old behaviour.
+                */
+               rc = 1;
+               break;
+       }
+       return rc;
+}
+
+#ifdef CONFIG_SYSCTL
+
+static void enable_ioapic_nmi_watchdog_single(void *unused)
+{
+       __get_cpu_var(wd_enabled) = 1;
+       atomic_inc(&nmi_active);
+       __acpi_nmi_enable(NULL);
+}
+
+static void enable_ioapic_nmi_watchdog(void)
+{
+       on_each_cpu(enable_ioapic_nmi_watchdog_single, NULL, 1);
+       touch_nmi_watchdog();
+}
+
+static void disable_ioapic_nmi_watchdog(void)
+{
+       on_each_cpu(stop_apic_nmi_watchdog, NULL, 1);
+}
+
+static int __init setup_unknown_nmi_panic(char *str)
+{
+       unknown_nmi_panic = 1;
+       return 1;
+}
+__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
+
+static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
+{
+       unsigned char reason = get_nmi_reason();
+       char buf[64];
+
+       sprintf(buf, "NMI received for unknown reason %02x\n", reason);
+       die_nmi(buf, regs, 1); /* Always panic here */
+       return 0;
+}
+
+/*
+ * proc handler for /proc/sys/kernel/nmi
+ */
+int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
+                       void __user *buffer, size_t *length, loff_t *ppos)
+{
+       int old_state;
+
+       nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
+       old_state = nmi_watchdog_enabled;
+       proc_dointvec(table, write, file, buffer, length, ppos);
+       if (!!old_state == !!nmi_watchdog_enabled)
+               return 0;
+
+       if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) {
+               printk(KERN_WARNING
+                       "NMI watchdog is permanently disabled\n");
+               return -EIO;
+       }
+
+       if (nmi_watchdog == NMI_LOCAL_APIC) {
+               if (nmi_watchdog_enabled)
+                       enable_lapic_nmi_watchdog();
+               else
+                       disable_lapic_nmi_watchdog();
+       } else if (nmi_watchdog == NMI_IO_APIC) {
+               if (nmi_watchdog_enabled)
+                       enable_ioapic_nmi_watchdog();
+               else
+                       disable_ioapic_nmi_watchdog();
+       } else {
+               printk(KERN_WARNING
+                       "NMI watchdog doesn't know what hardware to touch\n");
+               return -EIO;
+       }
+       return 0;
+}
+
+#endif /* CONFIG_SYSCTL */
+
+int do_nmi_callback(struct pt_regs *regs, int cpu)
+{
+#ifdef CONFIG_SYSCTL
+       if (unknown_nmi_panic)
+               return unknown_nmi_panic_callback(regs, cpu);
+#endif
+       return 0;
+}
+
+void __trigger_all_cpu_backtrace(void)
+{
+       int i;
+
+       backtrace_mask = cpu_online_map;
+       /* Wait for up to 10 seconds for all CPUs to do the backtrace */
+       for (i = 0; i < 10 * 1000; i++) {
+               if (cpus_empty(backtrace_mask))
+                       break;
+               mdelay(1);
+       }
+}
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
new file mode 100644 (file)
index 0000000..ba2fc64
--- /dev/null
@@ -0,0 +1,557 @@
+/*
+ * Written by: Patricia Gaughen, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ * Copyright (C) 2009, Red Hat, Inc., Ingo Molnar
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <gone@us.ibm.com>
+ */
+#include <linux/nodemask.h>
+#include <linux/topology.h>
+#include <linux/bootmem.h>
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/kernel.h>
+#include <linux/mmzone.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/numa.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+
+#include <asm/processor.h>
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
+#include <asm/numaq.h>
+#include <asm/setup.h>
+#include <asm/apic.h>
+#include <asm/e820.h>
+#include <asm/ipi.h>
+
+#define        MB_TO_PAGES(addr)               ((addr) << (20 - PAGE_SHIFT))
+
+int found_numaq;
+
+/*
+ * Have to match translation table entries to main table entries by counter
+ * hence the mpc_record variable .... can't see a less disgusting way of
+ * doing this ....
+ */
+struct mpc_trans {
+       unsigned char                   mpc_type;
+       unsigned char                   trans_len;
+       unsigned char                   trans_type;
+       unsigned char                   trans_quad;
+       unsigned char                   trans_global;
+       unsigned char                   trans_local;
+       unsigned short                  trans_reserved;
+};
+
+/* x86_quirks member */
+static int                             mpc_record;
+
+static struct mpc_trans                        *translation_table[MAX_MPC_ENTRY];
+
+int                                    mp_bus_id_to_node[MAX_MP_BUSSES];
+int                                    mp_bus_id_to_local[MAX_MP_BUSSES];
+int                                    quad_local_to_mp_bus_id[NR_CPUS/4][4];
+
+
+static inline void numaq_register_node(int node, struct sys_cfg_data *scd)
+{
+       struct eachquadmem *eq = scd->eq + node;
+
+       node_set_online(node);
+
+       /* Convert to pages */
+       node_start_pfn[node] =
+                MB_TO_PAGES(eq->hi_shrd_mem_start - eq->priv_mem_size);
+
+       node_end_pfn[node] =
+                MB_TO_PAGES(eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);
+
+       e820_register_active_regions(node, node_start_pfn[node],
+                                               node_end_pfn[node]);
+
+       memory_present(node, node_start_pfn[node], node_end_pfn[node]);
+
+       node_remap_size[node] = node_memmap_size_bytes(node,
+                                       node_start_pfn[node],
+                                       node_end_pfn[node]);
+}
+
+/*
+ * Function: smp_dump_qct()
+ *
+ * Description: gets memory layout from the quad config table.  This
+ * function also updates node_online_map with the nodes (quads) present.
+ */
+static void __init smp_dump_qct(void)
+{
+       struct sys_cfg_data *scd;
+       int node;
+
+       scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR);
+
+       nodes_clear(node_online_map);
+       for_each_node(node) {
+               if (scd->quads_present31_0 & (1 << node))
+                       numaq_register_node(node, scd);
+       }
+}
+
+void __cpuinit numaq_tsc_disable(void)
+{
+       if (!found_numaq)
+               return;
+
+       if (num_online_nodes() > 1) {
+               printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
+               setup_clear_cpu_cap(X86_FEATURE_TSC);
+       }
+}
+
+static int __init numaq_pre_time_init(void)
+{
+       numaq_tsc_disable();
+       return 0;
+}
+
+static inline int generate_logical_apicid(int quad, int phys_apicid)
+{
+       return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
+}
+
+/* x86_quirks member */
+static int mpc_apic_id(struct mpc_cpu *m)
+{
+       int quad = translation_table[mpc_record]->trans_quad;
+       int logical_apicid = generate_logical_apicid(quad, m->apicid);
+
+       printk(KERN_DEBUG
+               "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
+                m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
+               (m->cpufeature & CPU_MODEL_MASK) >> 4,
+                m->apicver, quad, logical_apicid);
+
+       return logical_apicid;
+}
+
+/* x86_quirks member */
+static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
+{
+       int quad = translation_table[mpc_record]->trans_quad;
+       int local = translation_table[mpc_record]->trans_local;
+
+       mp_bus_id_to_node[m->busid] = quad;
+       mp_bus_id_to_local[m->busid] = local;
+
+       printk(KERN_INFO "Bus #%d is %s (node %d)\n", m->busid, name, quad);
+}
+
+/* x86_quirks member */
+static void mpc_oem_pci_bus(struct mpc_bus *m)
+{
+       int quad = translation_table[mpc_record]->trans_quad;
+       int local = translation_table[mpc_record]->trans_local;
+
+       quad_local_to_mp_bus_id[quad][local] = m->busid;
+}
+
+static void __init MP_translation_info(struct mpc_trans *m)
+{
+       printk(KERN_INFO
+           "Translation: record %d, type %d, quad %d, global %d, local %d\n",
+              mpc_record, m->trans_type, m->trans_quad, m->trans_global,
+              m->trans_local);
+
+       if (mpc_record >= MAX_MPC_ENTRY)
+               printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
+       else
+               translation_table[mpc_record] = m; /* stash this for later */
+
+       if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
+               node_set_online(m->trans_quad);
+}
+
+static int __init mpf_checksum(unsigned char *mp, int len)
+{
+       int sum = 0;
+
+       while (len--)
+               sum += *mp++;
+
+       return sum & 0xFF;
+}
+
+/*
+ * Read/parse the MPC oem tables
+ */
+static void __init
+ smp_read_mpc_oem(struct mpc_oemtable *oemtable, unsigned short oemsize)
+{
+       int count = sizeof(*oemtable);  /* the header size */
+       unsigned char *oemptr = ((unsigned char *)oemtable) + count;
+
+       mpc_record = 0;
+       printk(KERN_INFO
+               "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
+
+       if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) {
+               printk(KERN_WARNING
+                      "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
+                      oemtable->signature[0], oemtable->signature[1],
+                      oemtable->signature[2], oemtable->signature[3]);
+               return;
+       }
+
+       if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) {
+               printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
+               return;
+       }
+
+       while (count < oemtable->length) {
+               switch (*oemptr) {
+               case MP_TRANSLATION:
+                       {
+                               struct mpc_trans *m = (void *)oemptr;
+
+                               MP_translation_info(m);
+                               oemptr += sizeof(*m);
+                               count += sizeof(*m);
+                               ++mpc_record;
+                               break;
+                       }
+               default:
+                       printk(KERN_WARNING
+                              "Unrecognised OEM table entry type! - %d\n",
+                              (int)*oemptr);
+                       return;
+               }
+       }
+}
+
+static int __init numaq_setup_ioapic_ids(void)
+{
+       /* so can skip it */
+       return 1;
+}
+
+static struct x86_quirks numaq_x86_quirks __initdata = {
+       .arch_pre_time_init             = numaq_pre_time_init,
+       .arch_time_init                 = NULL,
+       .arch_pre_intr_init             = NULL,
+       .arch_memory_setup              = NULL,
+       .arch_intr_init                 = NULL,
+       .arch_trap_init                 = NULL,
+       .mach_get_smp_config            = NULL,
+       .mach_find_smp_config           = NULL,
+       .mpc_record                     = &mpc_record,
+       .mpc_apic_id                    = mpc_apic_id,
+       .mpc_oem_bus_info               = mpc_oem_bus_info,
+       .mpc_oem_pci_bus                = mpc_oem_pci_bus,
+       .smp_read_mpc_oem               = smp_read_mpc_oem,
+       .setup_ioapic_ids               = numaq_setup_ioapic_ids,
+};
+
+static __init void early_check_numaq(void)
+{
+       /*
+        * Find possible boot-time SMP configuration:
+        */
+       early_find_smp_config();
+
+       /*
+        * get boot-time SMP configuration:
+        */
+       if (smp_found_config)
+               early_get_smp_config();
+
+       if (found_numaq)
+               x86_quirks = &numaq_x86_quirks;
+}
+
+int __init get_memcfg_numaq(void)
+{
+       early_check_numaq();
+       if (!found_numaq)
+               return 0;
+       smp_dump_qct();
+
+       return 1;
+}
+
+#define NUMAQ_APIC_DFR_VALUE   (APIC_DFR_CLUSTER)
+
+static inline unsigned int numaq_get_apic_id(unsigned long x)
+{
+       return (x >> 24) & 0x0F;
+}
+
+static inline void numaq_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       default_send_IPI_mask_sequence_logical(mask, vector);
+}
+
+static inline void numaq_send_IPI_allbutself(int vector)
+{
+       default_send_IPI_mask_allbutself_logical(cpu_online_mask, vector);
+}
+
+static inline void numaq_send_IPI_all(int vector)
+{
+       numaq_send_IPI_mask(cpu_online_mask, vector);
+}
+
+#define NUMAQ_TRAMPOLINE_PHYS_LOW      (0x8)
+#define NUMAQ_TRAMPOLINE_PHYS_HIGH     (0xa)
+
+/*
+ * Because we use NMIs rather than the INIT-STARTUP sequence to
+ * bootstrap the CPUs, the APIC may be in a weird state. Kick it:
+ */
+static inline void numaq_smp_callin_clear_local_apic(void)
+{
+       clear_local_APIC();
+}
+
+static inline const cpumask_t *numaq_target_cpus(void)
+{
+       return &CPU_MASK_ALL;
+}
+
+static inline unsigned long
+numaq_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+       return physid_isset(apicid, bitmap);
+}
+
+static inline unsigned long numaq_check_apicid_present(int bit)
+{
+       return physid_isset(bit, phys_cpu_present_map);
+}
+
+static inline int numaq_apic_id_registered(void)
+{
+       return 1;
+}
+
+static inline void numaq_init_apic_ldr(void)
+{
+       /* Already done in NUMA-Q firmware */
+}
+
+static inline void numaq_setup_apic_routing(void)
+{
+       printk(KERN_INFO
+               "Enabling APIC mode:  NUMA-Q.  Using %d I/O APICs\n",
+               nr_ioapics);
+}
+
+/*
+ * Skip adding the timer int on secondary nodes, which causes
+ * a small but painful rift in the time-space continuum.
+ */
+static inline int numaq_multi_timer_check(int apic, int irq)
+{
+       return apic != 0 && irq == 0;
+}
+
+static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map)
+{
+       /* We don't have a good way to do this yet - hack */
+       return physids_promote(0xFUL);
+}
+
+static inline int numaq_cpu_to_logical_apicid(int cpu)
+{
+       if (cpu >= nr_cpu_ids)
+               return BAD_APICID;
+       return cpu_2_logical_apicid[cpu];
+}
+
+/*
+ * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
+ * cpu to APIC ID relation to properly interact with the intelligent
+ * mode of the cluster controller.
+ */
+static inline int numaq_cpu_present_to_apicid(int mps_cpu)
+{
+       if (mps_cpu < 60)
+               return ((mps_cpu >> 2) << 4) | (1 << (mps_cpu & 0x3));
+       else
+               return BAD_APICID;
+}
+
+static inline int numaq_apicid_to_node(int logical_apicid)
+{
+       return logical_apicid >> 4;
+}
+
+static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid)
+{
+       int node = numaq_apicid_to_node(logical_apicid);
+       int cpu = __ffs(logical_apicid & 0xf);
+
+       return physid_mask_of_physid(cpu + 4*node);
+}
+
+/* Where the IO area was mapped on multiquad, always 0 otherwise */
+void *xquad_portio;
+
+static inline int numaq_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return 1;
+}
+
+/*
+ * We use physical apicids here, not logical, so just return the default
+ * physical broadcast to stop people from breaking us
+ */
+static inline unsigned int numaq_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+       return 0x0F;
+}
+
+static inline unsigned int
+numaq_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                            const struct cpumask *andmask)
+{
+       return 0x0F;
+}
+
+/* No NUMA-Q box has a HT CPU, but it can't hurt to use the default code. */
+static inline int numaq_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+       return cpuid_apic >> index_msb;
+}
+
+static int
+numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
+{
+       if (strncmp(oem, "IBM NUMA", 8))
+               printk(KERN_ERR "Warning! Not a NUMA-Q system!\n");
+       else
+               found_numaq = 1;
+
+       return found_numaq;
+}
+
+static int probe_numaq(void)
+{
+       /* already know from get_memcfg_numaq() */
+       return found_numaq;
+}
+
+static void numaq_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+       /* Careful. Some cpus do not strictly honor the set of cpus
+        * specified in the interrupt destination when using lowest
+        * priority interrupt delivery mode.
+        *
+        * In particular there was a hyperthreading cpu observed to
+        * deliver interrupts to the wrong hyperthread when only one
+        * hyperthread was specified in the interrupt desitination.
+        */
+       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
+}
+
+static void numaq_setup_portio_remap(void)
+{
+       int num_quads = num_online_nodes();
+
+       if (num_quads <= 1)
+               return;
+
+       printk(KERN_INFO
+               "Remapping cross-quad port I/O for %d quads\n", num_quads);
+
+       xquad_portio = ioremap(XQUAD_PORTIO_BASE, num_quads*XQUAD_PORTIO_QUAD);
+
+       printk(KERN_INFO
+               "xquad_portio vaddr 0x%08lx, len %08lx\n",
+               (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
+}
+
+struct apic apic_numaq = {
+
+       .name                           = "NUMAQ",
+       .probe                          = probe_numaq,
+       .acpi_madt_oem_check            = NULL,
+       .apic_id_registered             = numaq_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       /* physical delivery on LOCAL quad: */
+       .irq_dest_mode                  = 0,
+
+       .target_cpus                    = numaq_target_cpus,
+       .disable_esr                    = 1,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = numaq_check_apicid_used,
+       .check_apicid_present           = numaq_check_apicid_present,
+
+       .vector_allocation_domain       = numaq_vector_allocation_domain,
+       .init_apic_ldr                  = numaq_init_apic_ldr,
+
+       .ioapic_phys_id_map             = numaq_ioapic_phys_id_map,
+       .setup_apic_routing             = numaq_setup_apic_routing,
+       .multi_timer_check              = numaq_multi_timer_check,
+       .apicid_to_node                 = numaq_apicid_to_node,
+       .cpu_to_logical_apicid          = numaq_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = numaq_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = numaq_apicid_to_cpu_present,
+       .setup_portio_remap             = numaq_setup_portio_remap,
+       .check_phys_apicid_present      = numaq_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = numaq_phys_pkg_id,
+       .mps_oem_check                  = numaq_mps_oem_check,
+
+       .get_apic_id                    = numaq_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0x0F << 24,
+
+       .cpu_mask_to_apicid             = numaq_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = numaq_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = numaq_send_IPI_mask,
+       .send_IPI_mask_allbutself       = NULL,
+       .send_IPI_allbutself            = numaq_send_IPI_allbutself,
+       .send_IPI_all                   = numaq_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .wakeup_secondary_cpu           = wakeup_secondary_cpu_via_nmi,
+       .trampoline_phys_low            = NUMAQ_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = NUMAQ_TRAMPOLINE_PHYS_HIGH,
+
+       /* We don't do anything here because we use NMI's to boot instead */
+       .wait_for_init_deassert         = NULL,
+
+       .smp_callin_clear_local_apic    = numaq_smp_callin_clear_local_apic,
+       .inquire_remote_apic            = NULL,
+
+       .read                           = native_apic_mem_read,
+       .write                          = native_apic_mem_write,
+       .icr_read                       = native_apic_icr_read,
+       .icr_write                      = native_apic_icr_write,
+       .wait_icr_idle                  = native_apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+};
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
new file mode 100644 (file)
index 0000000..141c99a
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+ * Default generic APIC driver. This handles up to 8 CPUs.
+ *
+ * Copyright 2003 Andi Kleen, SuSE Labs.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic x86 APIC driver probe layer.
+ */
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <asm/fixmap.h>
+#include <asm/mpspec.h>
+#include <asm/apicdef.h>
+#include <asm/apic.h>
+#include <asm/setup.h>
+
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <asm/ipi.h>
+
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/acpi.h>
+#include <asm/e820.h>
+#include <asm/setup.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define DEFAULT_SEND_IPI       (1)
+#else
+#define DEFAULT_SEND_IPI       (0)
+#endif
+
+int no_broadcast = DEFAULT_SEND_IPI;
+
+static __init int no_ipi_broadcast(char *str)
+{
+       get_option(&str, &no_broadcast);
+       pr_info("Using %s mode\n",
+               no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
+       return 1;
+}
+__setup("no_ipi_broadcast=", no_ipi_broadcast);
+
+static int __init print_ipi_mode(void)
+{
+       pr_info("Using IPI %s mode\n",
+               no_broadcast ? "No-Shortcut" : "Shortcut");
+       return 0;
+}
+late_initcall(print_ipi_mode);
+
+void default_setup_apic_routing(void)
+{
+#ifdef CONFIG_X86_IO_APIC
+       printk(KERN_INFO
+               "Enabling APIC mode:  Flat.  Using %d I/O APICs\n",
+               nr_ioapics);
+#endif
+}
+
+static void default_vector_allocation_domain(int cpu, struct cpumask *retmask)
+{
+       /*
+        * Careful. Some cpus do not strictly honor the set of cpus
+        * specified in the interrupt destination when using lowest
+        * priority interrupt delivery mode.
+        *
+        * In particular there was a hyperthreading cpu observed to
+        * deliver interrupts to the wrong hyperthread when only one
+        * hyperthread was specified in the interrupt desitination.
+        */
+       *retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
+}
+
+/* should be called last. */
+static int probe_default(void)
+{
+       return 1;
+}
+
+struct apic apic_default = {
+
+       .name                           = "default",
+       .probe                          = probe_default,
+       .acpi_madt_oem_check            = NULL,
+       .apic_id_registered             = default_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       /* logical delivery broadcast to all CPUs: */
+       .irq_dest_mode                  = 1,
+
+       .target_cpus                    = default_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = default_check_apicid_used,
+       .check_apicid_present           = default_check_apicid_present,
+
+       .vector_allocation_domain       = default_vector_allocation_domain,
+       .init_apic_ldr                  = default_init_apic_ldr,
+
+       .ioapic_phys_id_map             = default_ioapic_phys_id_map,
+       .setup_apic_routing             = default_setup_apic_routing,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = default_apicid_to_node,
+       .cpu_to_logical_apicid          = default_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = default_apicid_to_cpu_present,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = default_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = default_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0x0F << 24,
+
+       .cpu_mask_to_apicid             = default_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = default_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = default_send_IPI_mask_logical,
+       .send_IPI_mask_allbutself       = default_send_IPI_mask_allbutself_logical,
+       .send_IPI_allbutself            = default_send_IPI_allbutself,
+       .send_IPI_all                   = default_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+
+       .wait_for_init_deassert         = default_wait_for_init_deassert,
+
+       .smp_callin_clear_local_apic    = NULL,
+       .inquire_remote_apic            = default_inquire_remote_apic,
+
+       .read                           = native_apic_mem_read,
+       .write                          = native_apic_mem_write,
+       .icr_read                       = native_apic_icr_read,
+       .icr_write                      = native_apic_icr_write,
+       .wait_icr_idle                  = native_apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+};
+
+extern struct apic apic_numaq;
+extern struct apic apic_summit;
+extern struct apic apic_bigsmp;
+extern struct apic apic_es7000;
+extern struct apic apic_es7000_cluster;
+extern struct apic apic_default;
+
+struct apic *apic = &apic_default;
+EXPORT_SYMBOL_GPL(apic);
+
+static struct apic *apic_probe[] __initdata = {
+#ifdef CONFIG_X86_NUMAQ
+       &apic_numaq,
+#endif
+#ifdef CONFIG_X86_SUMMIT
+       &apic_summit,
+#endif
+#ifdef CONFIG_X86_BIGSMP
+       &apic_bigsmp,
+#endif
+#ifdef CONFIG_X86_ES7000
+       &apic_es7000,
+       &apic_es7000_cluster,
+#endif
+       &apic_default,  /* must be last */
+       NULL,
+};
+
+static int cmdline_apic __initdata;
+static int __init parse_apic(char *arg)
+{
+       int i;
+
+       if (!arg)
+               return -EINVAL;
+
+       for (i = 0; apic_probe[i]; i++) {
+               if (!strcmp(apic_probe[i]->name, arg)) {
+                       apic = apic_probe[i];
+                       cmdline_apic = 1;
+                       return 0;
+               }
+       }
+
+       /* Parsed again by __setup for debug/verbose */
+       return 0;
+}
+early_param("apic", parse_apic);
+
+void __init generic_bigsmp_probe(void)
+{
+#ifdef CONFIG_X86_BIGSMP
+       /*
+        * This routine is used to switch to bigsmp mode when
+        * - There is no apic= option specified by the user
+        * - generic_apic_probe() has chosen apic_default as the sub_arch
+        * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
+        */
+
+       if (!cmdline_apic && apic == &apic_default) {
+               if (apic_bigsmp.probe()) {
+                       apic = &apic_bigsmp;
+                       printk(KERN_INFO "Overriding APIC driver with %s\n",
+                              apic->name);
+               }
+       }
+#endif
+}
+
+void __init generic_apic_probe(void)
+{
+       if (!cmdline_apic) {
+               int i;
+               for (i = 0; apic_probe[i]; i++) {
+                       if (apic_probe[i]->probe()) {
+                               apic = apic_probe[i];
+                               break;
+                       }
+               }
+               /* Not visible without early console */
+               if (!apic_probe[i])
+                       panic("Didn't find an APIC driver");
+       }
+       printk(KERN_INFO "Using APIC driver %s\n", apic->name);
+}
+
+/* These functions can switch the APIC even after the initial ->probe() */
+
+int __init
+generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
+{
+       int i;
+
+       for (i = 0; apic_probe[i]; ++i) {
+               if (!apic_probe[i]->mps_oem_check)
+                       continue;
+               if (!apic_probe[i]->mps_oem_check(mpc, oem, productid))
+                       continue;
+
+               if (!cmdline_apic) {
+                       apic = apic_probe[i];
+                       printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+                              apic->name);
+               }
+               return 1;
+       }
+       return 0;
+}
+
+int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       int i;
+
+       for (i = 0; apic_probe[i]; ++i) {
+               if (!apic_probe[i]->acpi_madt_oem_check)
+                       continue;
+               if (!apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id))
+                       continue;
+
+               if (!cmdline_apic) {
+                       apic = apic_probe[i];
+                       printk(KERN_INFO "Switched to APIC driver `%s'.\n",
+                              apic->name);
+               }
+               return 1;
+       }
+       return 0;
+}
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
new file mode 100644 (file)
index 0000000..8d7748e
--- /dev/null
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2004 James Cleverdon, IBM.
+ * Subject to the GNU Public License, v.2
+ *
+ * Generic APIC sub-arch probe layer.
+ *
+ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
+ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
+ * James Cleverdon.
+ */
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/hardirq.h>
+#include <linux/dmar.h>
+
+#include <asm/smp.h>
+#include <asm/apic.h>
+#include <asm/ipi.h>
+#include <asm/setup.h>
+
+extern struct apic apic_flat;
+extern struct apic apic_physflat;
+extern struct apic apic_x2xpic_uv_x;
+extern struct apic apic_x2apic_phys;
+extern struct apic apic_x2apic_cluster;
+
+struct apic __read_mostly *apic = &apic_flat;
+EXPORT_SYMBOL_GPL(apic);
+
+static struct apic *apic_probe[] __initdata = {
+#ifdef CONFIG_X86_UV
+       &apic_x2apic_uv_x,
+#endif
+#ifdef CONFIG_X86_X2APIC
+       &apic_x2apic_phys,
+       &apic_x2apic_cluster,
+#endif
+       &apic_physflat,
+       NULL,
+};
+
+/*
+ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
+ */
+void __init default_setup_apic_routing(void)
+{
+#ifdef CONFIG_X86_X2APIC
+       if (x2apic && (apic != &apic_x2apic_phys &&
+#ifdef CONFIG_X86_UV
+                      apic != &apic_x2apic_uv_x &&
+#endif
+                      apic != &apic_x2apic_cluster)) {
+               if (x2apic_phys)
+                       apic = &apic_x2apic_phys;
+               else
+                       apic = &apic_x2apic_cluster;
+               printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
+       }
+#endif
+
+       if (apic == &apic_flat) {
+               if (max_physical_apicid >= 8)
+                       apic = &apic_physflat;
+               printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
+       }
+}
+
+/* Same for both flat and physical. */
+
+void apic_send_IPI_self(int vector)
+{
+       __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
+}
+
+int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       int i;
+
+       for (i = 0; apic_probe[i]; ++i) {
+               if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
+                       apic = apic_probe[i];
+                       printk(KERN_INFO "Setting APIC routing to %s.\n",
+                               apic->name);
+                       return 1;
+               }
+       }
+       return 0;
+}
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
new file mode 100644 (file)
index 0000000..aac52fa
--- /dev/null
@@ -0,0 +1,579 @@
+/*
+ * IBM Summit-Specific Code
+ *
+ * Written By: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (c) 2003 IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@us.ibm.com>
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/bios_ebda.h>
+
+/*
+ * APIC driver for the IBM "Summit" chipset.
+ */
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <asm/mpspec.h>
+#include <asm/apic.h>
+#include <asm/smp.h>
+#include <asm/fixmap.h>
+#include <asm/apicdef.h>
+#include <asm/ipi.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <linux/smp.h>
+
+static unsigned summit_get_apic_id(unsigned long x)
+{
+       return (x >> 24) & 0xFF;
+}
+
+static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
+{
+       default_send_IPI_mask_sequence_logical(mask, vector);
+}
+
+static void summit_send_IPI_allbutself(int vector)
+{
+       cpumask_t mask = cpu_online_map;
+       cpu_clear(smp_processor_id(), mask);
+
+       if (!cpus_empty(mask))
+               summit_send_IPI_mask(&mask, vector);
+}
+
+static void summit_send_IPI_all(int vector)
+{
+       summit_send_IPI_mask(&cpu_online_map, vector);
+}
+
+#include <asm/tsc.h>
+
+extern int use_cyclone;
+
+#ifdef CONFIG_X86_SUMMIT_NUMA
+static void setup_summit(void);
+#else
+static inline void setup_summit(void) {}
+#endif
+
+static int summit_mps_oem_check(struct mpc_table *mpc, char *oem,
+               char *productid)
+{
+       if (!strncmp(oem, "IBM ENSW", 8) &&
+                       (!strncmp(productid, "VIGIL SMP", 9)
+                        || !strncmp(productid, "EXA", 3)
+                        || !strncmp(productid, "RUTHLESS SMP", 12))){
+               mark_tsc_unstable("Summit based system");
+               use_cyclone = 1; /*enable cyclone-timer*/
+               setup_summit();
+               return 1;
+       }
+       return 0;
+}
+
+/* Hook from generic ACPI tables.c */
+static int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       if (!strncmp(oem_id, "IBM", 3) &&
+           (!strncmp(oem_table_id, "SERVIGIL", 8)
+            || !strncmp(oem_table_id, "EXA", 3))){
+               mark_tsc_unstable("Summit based system");
+               use_cyclone = 1; /*enable cyclone-timer*/
+               setup_summit();
+               return 1;
+       }
+       return 0;
+}
+
+struct rio_table_hdr {
+       unsigned char version;      /* Version number of this data structure           */
+                                   /* Version 3 adds chassis_num & WP_index           */
+       unsigned char num_scal_dev; /* # of Scalability devices (Twisters for Vigil)   */
+       unsigned char num_rio_dev;  /* # of RIO I/O devices (Cyclones and Winnipegs)   */
+} __attribute__((packed));
+
+struct scal_detail {
+       unsigned char node_id;      /* Scalability Node ID                             */
+       unsigned long CBAR;         /* Address of 1MB register space                   */
+       unsigned char port0node;    /* Node ID port connected to: 0xFF=None            */
+       unsigned char port0port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+       unsigned char port1node;    /* Node ID port connected to: 0xFF = None          */
+       unsigned char port1port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+       unsigned char port2node;    /* Node ID port connected to: 0xFF = None          */
+       unsigned char port2port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+       unsigned char chassis_num;  /* 1 based Chassis number (1 = boot node)          */
+} __attribute__((packed));
+
+struct rio_detail {
+       unsigned char node_id;      /* RIO Node ID                                     */
+       unsigned long BBAR;         /* Address of 1MB register space                   */
+       unsigned char type;         /* Type of device                                  */
+       unsigned char owner_id;     /* For WPEG: Node ID of Cyclone that owns this WPEG*/
+                                   /* For CYC:  Node ID of Twister that owns this CYC */
+       unsigned char port0node;    /* Node ID port connected to: 0xFF=None            */
+       unsigned char port0port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+       unsigned char port1node;    /* Node ID port connected to: 0xFF=None            */
+       unsigned char port1port;    /* Port num port connected to: 0,1,2, or 0xFF=None */
+       unsigned char first_slot;   /* For WPEG: Lowest slot number below this WPEG    */
+                                   /* For CYC:  0                                     */
+       unsigned char status;       /* For WPEG: Bit 0 = 1 : the XAPIC is used         */
+                                   /*                 = 0 : the XAPIC is not used, ie:*/
+                                   /*                     ints fwded to another XAPIC */
+                                   /*           Bits1:7 Reserved                      */
+                                   /* For CYC:  Bits0:7 Reserved                      */
+       unsigned char WP_index;     /* For WPEG: WPEG instance index - lower ones have */
+                                   /*           lower slot numbers/PCI bus numbers    */
+                                   /* For CYC:  No meaning                            */
+       unsigned char chassis_num;  /* 1 based Chassis number                          */
+                                   /* For LookOut WPEGs this field indicates the      */
+                                   /* Expansion Chassis #, enumerated from Boot       */
+                                   /* Node WPEG external port, then Boot Node CYC     */
+                                   /* external port, then Next Vigil chassis WPEG     */
+                                   /* external port, etc.                             */
+                                   /* Shared Lookouts have only 1 chassis number (the */
+                                   /* first one assigned)                             */
+} __attribute__((packed));
+
+
+typedef enum {
+       CompatTwister = 0,  /* Compatibility Twister               */
+       AltTwister    = 1,  /* Alternate Twister of internal 8-way */
+       CompatCyclone = 2,  /* Compatibility Cyclone               */
+       AltCyclone    = 3,  /* Alternate Cyclone of internal 8-way */
+       CompatWPEG    = 4,  /* Compatibility WPEG                  */
+       AltWPEG       = 5,  /* Second Planar WPEG                  */
+       LookOutAWPEG  = 6,  /* LookOut WPEG                        */
+       LookOutBWPEG  = 7,  /* LookOut WPEG                        */
+} node_type;
+
+static inline int is_WPEG(struct rio_detail *rio){
+       return (rio->type == CompatWPEG || rio->type == AltWPEG ||
+               rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
+}
+
+
+/* In clustered mode, the high nibble of APIC ID is a cluster number.
+ * The low nibble is a 4-bit bitmap. */
+#define XAPIC_DEST_CPUS_SHIFT  4
+#define XAPIC_DEST_CPUS_MASK   ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
+#define XAPIC_DEST_CLUSTER_MASK        (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
+
+#define SUMMIT_APIC_DFR_VALUE  (APIC_DFR_CLUSTER)
+
+static const cpumask_t *summit_target_cpus(void)
+{
+       /* CPU_MASK_ALL (0xff) has undefined behaviour with
+        * dest_LowestPrio mode logical clustered apic interrupt routing
+        * Just start on cpu 0.  IRQ balancing will spread load
+        */
+       return &cpumask_of_cpu(0);
+}
+
+static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
+{
+       return 0;
+}
+
+/* we don't use the phys_cpu_present_map to indicate apicid presence */
+static unsigned long summit_check_apicid_present(int bit)
+{
+       return 1;
+}
+
+static void summit_init_apic_ldr(void)
+{
+       unsigned long val, id;
+       int count = 0;
+       u8 my_id = (u8)hard_smp_processor_id();
+       u8 my_cluster = APIC_CLUSTER(my_id);
+#ifdef CONFIG_SMP
+       u8 lid;
+       int i;
+
+       /* Create logical APIC IDs by counting CPUs already in cluster. */
+       for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
+               lid = cpu_2_logical_apicid[i];
+               if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
+                       ++count;
+       }
+#endif
+       /* We only have a 4 wide bitmap in cluster mode.  If a deranged
+        * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
+       BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
+       id = my_cluster | (1UL << count);
+       apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
+       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
+       val |= SET_APIC_LOGICAL_ID(id);
+       apic_write(APIC_LDR, val);
+}
+
+static int summit_apic_id_registered(void)
+{
+       return 1;
+}
+
+static void summit_setup_apic_routing(void)
+{
+       printk("Enabling APIC mode:  Summit.  Using %d I/O APICs\n",
+                                               nr_ioapics);
+}
+
+static int summit_apicid_to_node(int logical_apicid)
+{
+#ifdef CONFIG_SMP
+       return apicid_2_node[hard_smp_processor_id()];
+#else
+       return 0;
+#endif
+}
+
+/* Mapping from cpu number to logical apicid */
+static inline int summit_cpu_to_logical_apicid(int cpu)
+{
+#ifdef CONFIG_SMP
+       if (cpu >= nr_cpu_ids)
+               return BAD_APICID;
+       return cpu_2_logical_apicid[cpu];
+#else
+       return logical_smp_processor_id();
+#endif
+}
+
+static int summit_cpu_present_to_apicid(int mps_cpu)
+{
+       if (mps_cpu < nr_cpu_ids)
+               return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
+       else
+               return BAD_APICID;
+}
+
+static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
+{
+       /* For clustered we don't have a good way to do this yet - hack */
+       return physids_promote(0x0F);
+}
+
+static physid_mask_t summit_apicid_to_cpu_present(int apicid)
+{
+       return physid_mask_of_physid(0);
+}
+
+static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return 1;
+}
+
+static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
+{
+       unsigned int round = 0;
+       int cpu, apicid = 0;
+
+       /*
+        * The cpus in the mask must all be on the apic cluster.
+        */
+       for_each_cpu(cpu, cpumask) {
+               int new_apicid = summit_cpu_to_logical_apicid(cpu);
+
+               if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
+                       printk("%s: Not a valid mask!\n", __func__);
+                       return BAD_APICID;
+               }
+               apicid |= new_apicid;
+               round++;
+       }
+       return apicid;
+}
+
+static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
+                             const struct cpumask *andmask)
+{
+       int apicid = summit_cpu_to_logical_apicid(0);
+       cpumask_var_t cpumask;
+
+       if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
+               return apicid;
+
+       cpumask_and(cpumask, inmask, andmask);
+       cpumask_and(cpumask, cpumask, cpu_online_mask);
+       apicid = summit_cpu_mask_to_apicid(cpumask);
+
+       free_cpumask_var(cpumask);
+
+       return apicid;
+}
+
+/*
+ * cpuid returns the value latched in the HW at reset, not the APIC ID
+ * register's value.  For any box whose BIOS changes APIC IDs, like
+ * clustered APIC systems, we must use hard_smp_processor_id.
+ *
+ * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
+ */
+static int summit_phys_pkg_id(int cpuid_apic, int index_msb)
+{
+       return hard_smp_processor_id() >> index_msb;
+}
+
+static int probe_summit(void)
+{
+       /* probed later in mptable/ACPI hooks */
+       return 0;
+}
+
+static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
+{
+       /* Careful. Some cpus do not strictly honor the set of cpus
+        * specified in the interrupt destination when using lowest
+        * priority interrupt delivery mode.
+        *
+        * In particular there was a hyperthreading cpu observed to
+        * deliver interrupts to the wrong hyperthread when only one
+        * hyperthread was specified in the interrupt desitination.
+        */
+       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
+}
+
+#ifdef CONFIG_X86_SUMMIT_NUMA
+static struct rio_table_hdr *rio_table_hdr;
+static struct scal_detail   *scal_devs[MAX_NUMNODES];
+static struct rio_detail    *rio_devs[MAX_NUMNODES*4];
+
+#ifndef CONFIG_X86_NUMAQ
+static int mp_bus_id_to_node[MAX_MP_BUSSES];
+#endif
+
+static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
+{
+       int twister = 0, node = 0;
+       int i, bus, num_buses;
+
+       for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
+               if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) {
+                       twister = rio_devs[i]->owner_id;
+                       break;
+               }
+       }
+       if (i == rio_table_hdr->num_rio_dev) {
+               printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__);
+               return last_bus;
+       }
+
+       for (i = 0; i < rio_table_hdr->num_scal_dev; i++) {
+               if (scal_devs[i]->node_id == twister) {
+                       node = scal_devs[i]->node_id;
+                       break;
+               }
+       }
+       if (i == rio_table_hdr->num_scal_dev) {
+               printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__);
+               return last_bus;
+       }
+
+       switch (rio_devs[wpeg_num]->type) {
+       case CompatWPEG:
+               /*
+                * The Compatibility Winnipeg controls the 2 legacy buses,
+                * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case
+                * a PCI-PCI bridge card is used in either slot: total 5 buses.
+                */
+               num_buses = 5;
+               break;
+       case AltWPEG:
+               /*
+                * The Alternate Winnipeg controls the 2 133MHz buses [1 slot
+                * each], their 2 "extra" buses, the 100MHz bus [2 slots] and
+                * the "extra" buses for each of those slots: total 7 buses.
+                */
+               num_buses = 7;
+               break;
+       case LookOutAWPEG:
+       case LookOutBWPEG:
+               /*
+                * A Lookout Winnipeg controls 3 100MHz buses [2 slots each]
+                * & the "extra" buses for each of those slots: total 9 buses.
+                */
+               num_buses = 9;
+               break;
+       default:
+               printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__);
+               return last_bus;
+       }
+
+       for (bus = last_bus; bus < last_bus + num_buses; bus++)
+               mp_bus_id_to_node[bus] = node;
+       return bus;
+}
+
+static int build_detail_arrays(void)
+{
+       unsigned long ptr;
+       int i, scal_detail_size, rio_detail_size;
+
+       if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) {
+               printk(KERN_WARNING "%s: MAX_NUMNODES too low!  Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev);
+               return 0;
+       }
+
+       switch (rio_table_hdr->version) {
+       default:
+               printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version);
+               return 0;
+       case 2:
+               scal_detail_size = 11;
+               rio_detail_size = 13;
+               break;
+       case 3:
+               scal_detail_size = 12;
+               rio_detail_size = 15;
+               break;
+       }
+
+       ptr = (unsigned long)rio_table_hdr + 3;
+       for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size)
+               scal_devs[i] = (struct scal_detail *)ptr;
+
+       for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size)
+               rio_devs[i] = (struct rio_detail *)ptr;
+
+       return 1;
+}
+
+void setup_summit(void)
+{
+       unsigned long           ptr;
+       unsigned short          offset;
+       int                     i, next_wpeg, next_bus = 0;
+
+       /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */
+       ptr = get_bios_ebda();
+       ptr = (unsigned long)phys_to_virt(ptr);
+
+       rio_table_hdr = NULL;
+       offset = 0x180;
+       while (offset) {
+               /* The block id is stored in the 2nd word */
+               if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) {
+                       /* set the pointer past the offset & block id */
+                       rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
+                       break;
+               }
+               /* The next offset is stored in the 1st word.  0 means no more */
+               offset = *((unsigned short *)(ptr + offset));
+       }
+       if (!rio_table_hdr) {
+               printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__);
+               return;
+       }
+
+       if (!build_detail_arrays())
+               return;
+
+       /* The first Winnipeg we're looking for has an index of 0 */
+       next_wpeg = 0;
+       do {
+               for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
+                       if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) {
+                               /* It's the Winnipeg we're looking for! */
+                               next_bus = setup_pci_node_map_for_wpeg(i, next_bus);
+                               next_wpeg++;
+                               break;
+                       }
+               }
+               /*
+                * If we go through all Rio devices and don't find one with
+                * the next index, it means we've found all the Winnipegs,
+                * and thus all the PCI buses.
+                */
+               if (i == rio_table_hdr->num_rio_dev)
+                       next_wpeg = 0;
+       } while (next_wpeg != 0);
+}
+#endif
+
+struct apic apic_summit = {
+
+       .name                           = "summit",
+       .probe                          = probe_summit,
+       .acpi_madt_oem_check            = summit_acpi_madt_oem_check,
+       .apic_id_registered             = summit_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       /* logical delivery broadcast to all CPUs: */
+       .irq_dest_mode                  = 1,
+
+       .target_cpus                    = summit_target_cpus,
+       .disable_esr                    = 1,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = summit_check_apicid_used,
+       .check_apicid_present           = summit_check_apicid_present,
+
+       .vector_allocation_domain       = summit_vector_allocation_domain,
+       .init_apic_ldr                  = summit_init_apic_ldr,
+
+       .ioapic_phys_id_map             = summit_ioapic_phys_id_map,
+       .setup_apic_routing             = summit_setup_apic_routing,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = summit_apicid_to_node,
+       .cpu_to_logical_apicid          = summit_cpu_to_logical_apicid,
+       .cpu_present_to_apicid          = summit_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = summit_apicid_to_cpu_present,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = summit_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = summit_phys_pkg_id,
+       .mps_oem_check                  = summit_mps_oem_check,
+
+       .get_apic_id                    = summit_get_apic_id,
+       .set_apic_id                    = NULL,
+       .apic_id_mask                   = 0xFF << 24,
+
+       .cpu_mask_to_apicid             = summit_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = summit_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = summit_send_IPI_mask,
+       .send_IPI_mask_allbutself       = NULL,
+       .send_IPI_allbutself            = summit_send_IPI_allbutself,
+       .send_IPI_all                   = summit_send_IPI_all,
+       .send_IPI_self                  = default_send_IPI_self,
+
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+
+       .wait_for_init_deassert         = default_wait_for_init_deassert,
+
+       .smp_callin_clear_local_apic    = NULL,
+       .inquire_remote_apic            = default_inquire_remote_apic,
+
+       .read                           = native_apic_mem_read,
+       .write                          = native_apic_mem_write,
+       .icr_read                       = native_apic_icr_read,
+       .icr_write                      = native_apic_icr_write,
+       .wait_icr_idle                  = native_apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_apic_wait_icr_idle,
+};
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
new file mode 100644 (file)
index 0000000..8fb87b6
--- /dev/null
@@ -0,0 +1,239 @@
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/dmar.h>
+
+#include <asm/smp.h>
+#include <asm/apic.h>
+#include <asm/ipi.h>
+
+DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
+
+static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       return x2apic_enabled();
+}
+
+/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
+
+static const struct cpumask *x2apic_target_cpus(void)
+{
+       return cpumask_of(0);
+}
+
+/*
+ * for now each logical cpu is in its own vector allocation domain.
+ */
+static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
+{
+       cpumask_clear(retmask);
+       cpumask_set_cpu(cpu, retmask);
+}
+
+static void
+ __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
+{
+       unsigned long cfg;
+
+       cfg = __prepare_ICR(0, vector, dest);
+
+       /*
+        * send the IPI.
+        */
+       native_x2apic_icr_write(cfg, apicid);
+}
+
+/*
+ * for now, we send the IPI's one by one in the cpumask.
+ * TBD: Based on the cpu mask, we can send the IPI's to the cluster group
+ * at once. We have 16 cpu's in a cluster. This will minimize IPI register
+ * writes.
+ */
+static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       unsigned long query_cpu;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               __x2apic_send_IPI_dest(
+                       per_cpu(x86_cpu_to_logical_apicid, query_cpu),
+                       vector, apic->dest_logical);
+       }
+       local_irq_restore(flags);
+}
+
+static void
+ x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
+{
+       unsigned long this_cpu = smp_processor_id();
+       unsigned long query_cpu;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __x2apic_send_IPI_dest(
+                               per_cpu(x86_cpu_to_logical_apicid, query_cpu),
+                               vector, apic->dest_logical);
+       }
+       local_irq_restore(flags);
+}
+
+static void x2apic_send_IPI_allbutself(int vector)
+{
+       unsigned long this_cpu = smp_processor_id();
+       unsigned long query_cpu;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       for_each_online_cpu(query_cpu) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __x2apic_send_IPI_dest(
+                               per_cpu(x86_cpu_to_logical_apicid, query_cpu),
+                               vector, apic->dest_logical);
+       }
+       local_irq_restore(flags);
+}
+
+static void x2apic_send_IPI_all(int vector)
+{
+       x2apic_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static int x2apic_apic_id_registered(void)
+{
+       return 1;
+}
+
+static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+       /*
+        * We're using fixed IRQ delivery, can only return one logical APIC ID.
+        * May as well be the first.
+        */
+       int cpu = cpumask_first(cpumask);
+
+       if ((unsigned)cpu < nr_cpu_ids)
+               return per_cpu(x86_cpu_to_logical_apicid, cpu);
+       else
+               return BAD_APICID;
+}
+
+static unsigned int
+x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                             const struct cpumask *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one logical APIC ID.
+        * May as well be the first.
+        */
+       for_each_cpu_and(cpu, cpumask, andmask) {
+               if (cpumask_test_cpu(cpu, cpu_online_mask))
+                       break;
+       }
+
+       if (cpu < nr_cpu_ids)
+               return per_cpu(x86_cpu_to_logical_apicid, cpu);
+
+       return BAD_APICID;
+}
+
+static unsigned int x2apic_cluster_phys_get_apic_id(unsigned long x)
+{
+       unsigned int id;
+
+       id = x;
+       return id;
+}
+
+static unsigned long set_apic_id(unsigned int id)
+{
+       unsigned long x;
+
+       x = id;
+       return x;
+}
+
+static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
+{
+       return current_cpu_data.initial_apicid >> index_msb;
+}
+
+static void x2apic_send_IPI_self(int vector)
+{
+       apic_write(APIC_SELF_IPI, vector);
+}
+
+static void init_x2apic_ldr(void)
+{
+       int cpu = smp_processor_id();
+
+       per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
+}
+
+struct apic apic_x2apic_cluster = {
+
+       .name                           = "cluster x2apic",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = x2apic_acpi_madt_oem_check,
+       .apic_id_registered             = x2apic_apic_id_registered,
+
+       .irq_delivery_mode              = dest_LowestPrio,
+       .irq_dest_mode                  = 1, /* logical */
+
+       .target_cpus                    = x2apic_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = x2apic_vector_allocation_domain,
+       .init_apic_ldr                  = init_x2apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = x2apic_cluster_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = x2apic_cluster_phys_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFFFFFFFu,
+
+       .cpu_mask_to_apicid             = x2apic_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = x2apic_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = x2apic_send_IPI_mask,
+       .send_IPI_mask_allbutself       = x2apic_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = x2apic_send_IPI_allbutself,
+       .send_IPI_all                   = x2apic_send_IPI_all,
+       .send_IPI_self                  = x2apic_send_IPI_self,
+
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .inquire_remote_apic            = NULL,
+
+       .read                           = native_apic_msr_read,
+       .write                          = native_apic_msr_write,
+       .icr_read                       = native_x2apic_icr_read,
+       .icr_write                      = native_x2apic_icr_write,
+       .wait_icr_idle                  = native_x2apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_x2apic_wait_icr_idle,
+};
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
new file mode 100644 (file)
index 0000000..23625b9
--- /dev/null
@@ -0,0 +1,228 @@
+#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/dmar.h>
+
+#include <asm/smp.h>
+#include <asm/apic.h>
+#include <asm/ipi.h>
+
+int x2apic_phys;
+
+static int set_x2apic_phys_mode(char *arg)
+{
+       x2apic_phys = 1;
+       return 0;
+}
+early_param("x2apic_phys", set_x2apic_phys_mode);
+
+static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       if (x2apic_phys)
+               return x2apic_enabled();
+       else
+               return 0;
+}
+
+/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
+
+static const struct cpumask *x2apic_target_cpus(void)
+{
+       return cpumask_of(0);
+}
+
+static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
+{
+       cpumask_clear(retmask);
+       cpumask_set_cpu(cpu, retmask);
+}
+
+static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
+                                  unsigned int dest)
+{
+       unsigned long cfg;
+
+       cfg = __prepare_ICR(0, vector, dest);
+
+       /*
+        * send the IPI.
+        */
+       native_x2apic_icr_write(cfg, apicid);
+}
+
+static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       unsigned long query_cpu;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
+                                      vector, APIC_DEST_PHYSICAL);
+       }
+       local_irq_restore(flags);
+}
+
+static void
+ x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
+{
+       unsigned long this_cpu = smp_processor_id();
+       unsigned long query_cpu;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       for_each_cpu(query_cpu, mask) {
+               if (query_cpu != this_cpu)
+                       __x2apic_send_IPI_dest(
+                               per_cpu(x86_cpu_to_apicid, query_cpu),
+                               vector, APIC_DEST_PHYSICAL);
+       }
+       local_irq_restore(flags);
+}
+
+static void x2apic_send_IPI_allbutself(int vector)
+{
+       unsigned long this_cpu = smp_processor_id();
+       unsigned long query_cpu;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       for_each_online_cpu(query_cpu) {
+               if (query_cpu == this_cpu)
+                       continue;
+               __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
+                                      vector, APIC_DEST_PHYSICAL);
+       }
+       local_irq_restore(flags);
+}
+
+static void x2apic_send_IPI_all(int vector)
+{
+       x2apic_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static int x2apic_apic_id_registered(void)
+{
+       return 1;
+}
+
+static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       int cpu = cpumask_first(cpumask);
+
+       if ((unsigned)cpu < nr_cpu_ids)
+               return per_cpu(x86_cpu_to_apicid, cpu);
+       else
+               return BAD_APICID;
+}
+
+static unsigned int
+x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                             const struct cpumask *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       for_each_cpu_and(cpu, cpumask, andmask) {
+               if (cpumask_test_cpu(cpu, cpu_online_mask))
+                       break;
+       }
+
+       if (cpu < nr_cpu_ids)
+               return per_cpu(x86_cpu_to_apicid, cpu);
+
+       return BAD_APICID;
+}
+
+static unsigned int x2apic_phys_get_apic_id(unsigned long x)
+{
+       return x;
+}
+
+static unsigned long set_apic_id(unsigned int id)
+{
+       return id;
+}
+
+static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
+{
+       return current_cpu_data.initial_apicid >> index_msb;
+}
+
+static void x2apic_send_IPI_self(int vector)
+{
+       apic_write(APIC_SELF_IPI, vector);
+}
+
+static void init_x2apic_ldr(void)
+{
+}
+
+struct apic apic_x2apic_phys = {
+
+       .name                           = "physical x2apic",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = x2apic_acpi_madt_oem_check,
+       .apic_id_registered             = x2apic_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       .irq_dest_mode                  = 0, /* physical */
+
+       .target_cpus                    = x2apic_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = 0,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = x2apic_vector_allocation_domain,
+       .init_apic_ldr                  = init_x2apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = x2apic_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = x2apic_phys_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFFFFFFFu,
+
+       .cpu_mask_to_apicid             = x2apic_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = x2apic_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = x2apic_send_IPI_mask,
+       .send_IPI_mask_allbutself       = x2apic_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = x2apic_send_IPI_allbutself,
+       .send_IPI_all                   = x2apic_send_IPI_all,
+       .send_IPI_self                  = x2apic_send_IPI_self,
+
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .inquire_remote_apic            = NULL,
+
+       .read                           = native_apic_msr_read,
+       .write                          = native_apic_msr_write,
+       .icr_read                       = native_x2apic_icr_read,
+       .icr_write                      = native_x2apic_icr_write,
+       .wait_icr_idle                  = native_x2apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_x2apic_wait_icr_idle,
+};
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
new file mode 100644 (file)
index 0000000..1bd6da1
--- /dev/null
@@ -0,0 +1,647 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV APIC functions (note: not an Intel compatible APIC)
+ *
+ * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
+ */
+#include <linux/cpumask.h>
+#include <linux/hardirq.h>
+#include <linux/proc_fs.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/cpu.h>
+#include <linux/init.h>
+
+#include <asm/uv/uv_mmrs.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/current.h>
+#include <asm/pgtable.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+#include <asm/apic.h>
+#include <asm/ipi.h>
+#include <asm/smp.h>
+
+DEFINE_PER_CPU(int, x2apic_extra_bits);
+
+static enum uv_system_type uv_system_type;
+
+static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
+{
+       if (!strcmp(oem_id, "SGI")) {
+               if (!strcmp(oem_table_id, "UVL"))
+                       uv_system_type = UV_LEGACY_APIC;
+               else if (!strcmp(oem_table_id, "UVX"))
+                       uv_system_type = UV_X2APIC;
+               else if (!strcmp(oem_table_id, "UVH")) {
+                       uv_system_type = UV_NON_UNIQUE_APIC;
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+enum uv_system_type get_uv_system_type(void)
+{
+       return uv_system_type;
+}
+
+int is_uv_system(void)
+{
+       return uv_system_type != UV_NONE;
+}
+EXPORT_SYMBOL_GPL(is_uv_system);
+
+DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
+EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
+
+struct uv_blade_info *uv_blade_info;
+EXPORT_SYMBOL_GPL(uv_blade_info);
+
+short *uv_node_to_blade;
+EXPORT_SYMBOL_GPL(uv_node_to_blade);
+
+short *uv_cpu_to_blade;
+EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
+
+short uv_possible_blades;
+EXPORT_SYMBOL_GPL(uv_possible_blades);
+
+unsigned long sn_rtc_cycles_per_second;
+EXPORT_SYMBOL(sn_rtc_cycles_per_second);
+
+/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
+
+static const struct cpumask *uv_target_cpus(void)
+{
+       return cpumask_of(0);
+}
+
+static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
+{
+       cpumask_clear(retmask);
+       cpumask_set_cpu(cpu, retmask);
+}
+
+static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
+{
+#ifdef CONFIG_SMP
+       unsigned long val;
+       int pnode;
+
+       pnode = uv_apicid_to_pnode(phys_apicid);
+       val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+           (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
+           ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
+           APIC_DM_INIT;
+       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+       mdelay(10);
+
+       val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+           (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
+           ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
+           APIC_DM_STARTUP;
+       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+
+       atomic_set(&init_deasserted, 1);
+#endif
+       return 0;
+}
+
+static void uv_send_IPI_one(int cpu, int vector)
+{
+       unsigned long val, apicid;
+       int pnode;
+
+       apicid = per_cpu(x86_cpu_to_apicid, cpu);
+       pnode = uv_apicid_to_pnode(apicid);
+
+       val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+             (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
+             (vector << UVH_IPI_INT_VECTOR_SHFT);
+
+       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+}
+
+static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
+{
+       unsigned int cpu;
+
+       for_each_cpu(cpu, mask)
+               uv_send_IPI_one(cpu, vector);
+}
+
+static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
+{
+       unsigned int this_cpu = smp_processor_id();
+       unsigned int cpu;
+
+       for_each_cpu(cpu, mask) {
+               if (cpu != this_cpu)
+                       uv_send_IPI_one(cpu, vector);
+       }
+}
+
+static void uv_send_IPI_allbutself(int vector)
+{
+       unsigned int this_cpu = smp_processor_id();
+       unsigned int cpu;
+
+       for_each_online_cpu(cpu) {
+               if (cpu != this_cpu)
+                       uv_send_IPI_one(cpu, vector);
+       }
+}
+
+static void uv_send_IPI_all(int vector)
+{
+       uv_send_IPI_mask(cpu_online_mask, vector);
+}
+
+static int uv_apic_id_registered(void)
+{
+       return 1;
+}
+
+static void uv_init_apic_ldr(void)
+{
+}
+
+static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
+{
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       int cpu = cpumask_first(cpumask);
+
+       if ((unsigned)cpu < nr_cpu_ids)
+               return per_cpu(x86_cpu_to_apicid, cpu);
+       else
+               return BAD_APICID;
+}
+
+static unsigned int
+uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
+                         const struct cpumask *andmask)
+{
+       int cpu;
+
+       /*
+        * We're using fixed IRQ delivery, can only return one phys APIC ID.
+        * May as well be the first.
+        */
+       for_each_cpu_and(cpu, cpumask, andmask) {
+               if (cpumask_test_cpu(cpu, cpu_online_mask))
+                       break;
+       }
+       if (cpu < nr_cpu_ids)
+               return per_cpu(x86_cpu_to_apicid, cpu);
+
+       return BAD_APICID;
+}
+
+static unsigned int x2apic_get_apic_id(unsigned long x)
+{
+       unsigned int id;
+
+       WARN_ON(preemptible() && num_online_cpus() > 1);
+       id = x | __get_cpu_var(x2apic_extra_bits);
+
+       return id;
+}
+
+static unsigned long set_apic_id(unsigned int id)
+{
+       unsigned long x;
+
+       /* maskout x2apic_extra_bits ? */
+       x = id;
+       return x;
+}
+
+static unsigned int uv_read_apic_id(void)
+{
+
+       return x2apic_get_apic_id(apic_read(APIC_ID));
+}
+
+static int uv_phys_pkg_id(int initial_apicid, int index_msb)
+{
+       return uv_read_apic_id() >> index_msb;
+}
+
+static void uv_send_IPI_self(int vector)
+{
+       apic_write(APIC_SELF_IPI, vector);
+}
+
+struct apic apic_x2apic_uv_x = {
+
+       .name                           = "UV large system",
+       .probe                          = NULL,
+       .acpi_madt_oem_check            = uv_acpi_madt_oem_check,
+       .apic_id_registered             = uv_apic_id_registered,
+
+       .irq_delivery_mode              = dest_Fixed,
+       .irq_dest_mode                  = 1, /* logical */
+
+       .target_cpus                    = uv_target_cpus,
+       .disable_esr                    = 0,
+       .dest_logical                   = APIC_DEST_LOGICAL,
+       .check_apicid_used              = NULL,
+       .check_apicid_present           = NULL,
+
+       .vector_allocation_domain       = uv_vector_allocation_domain,
+       .init_apic_ldr                  = uv_init_apic_ldr,
+
+       .ioapic_phys_id_map             = NULL,
+       .setup_apic_routing             = NULL,
+       .multi_timer_check              = NULL,
+       .apicid_to_node                 = NULL,
+       .cpu_to_logical_apicid          = NULL,
+       .cpu_present_to_apicid          = default_cpu_present_to_apicid,
+       .apicid_to_cpu_present          = NULL,
+       .setup_portio_remap             = NULL,
+       .check_phys_apicid_present      = default_check_phys_apicid_present,
+       .enable_apic_mode               = NULL,
+       .phys_pkg_id                    = uv_phys_pkg_id,
+       .mps_oem_check                  = NULL,
+
+       .get_apic_id                    = x2apic_get_apic_id,
+       .set_apic_id                    = set_apic_id,
+       .apic_id_mask                   = 0xFFFFFFFFu,
+
+       .cpu_mask_to_apicid             = uv_cpu_mask_to_apicid,
+       .cpu_mask_to_apicid_and         = uv_cpu_mask_to_apicid_and,
+
+       .send_IPI_mask                  = uv_send_IPI_mask,
+       .send_IPI_mask_allbutself       = uv_send_IPI_mask_allbutself,
+       .send_IPI_allbutself            = uv_send_IPI_allbutself,
+       .send_IPI_all                   = uv_send_IPI_all,
+       .send_IPI_self                  = uv_send_IPI_self,
+
+       .wakeup_secondary_cpu           = uv_wakeup_secondary,
+       .trampoline_phys_low            = DEFAULT_TRAMPOLINE_PHYS_LOW,
+       .trampoline_phys_high           = DEFAULT_TRAMPOLINE_PHYS_HIGH,
+       .wait_for_init_deassert         = NULL,
+       .smp_callin_clear_local_apic    = NULL,
+       .inquire_remote_apic            = NULL,
+
+       .read                           = native_apic_msr_read,
+       .write                          = native_apic_msr_write,
+       .icr_read                       = native_x2apic_icr_read,
+       .icr_write                      = native_x2apic_icr_write,
+       .wait_icr_idle                  = native_x2apic_wait_icr_idle,
+       .safe_wait_icr_idle             = native_safe_x2apic_wait_icr_idle,
+};
+
+static __cpuinit void set_x2apic_extra_bits(int pnode)
+{
+       __get_cpu_var(x2apic_extra_bits) = (pnode << 6);
+}
+
+/*
+ * Called on boot cpu.
+ */
+static __init int boot_pnode_to_blade(int pnode)
+{
+       int blade;
+
+       for (blade = 0; blade < uv_num_possible_blades(); blade++)
+               if (pnode == uv_blade_info[blade].pnode)
+                       return blade;
+       BUG();
+}
+
+struct redir_addr {
+       unsigned long redirect;
+       unsigned long alias;
+};
+
+#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
+
+static __initdata struct redir_addr redir_addrs[] = {
+       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
+       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
+       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
+};
+
+static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
+{
+       union uvh_si_alias0_overlay_config_u alias;
+       union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
+               alias.v = uv_read_local_mmr(redir_addrs[i].alias);
+               if (alias.s.base == 0) {
+                       *size = (1UL << alias.s.m_alias);
+                       redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
+                       *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
+                       return;
+               }
+       }
+       BUG();
+}
+
+static __init void map_low_mmrs(void)
+{
+       init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
+       init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
+}
+
+enum map_type {map_wb, map_uc};
+
+static __init void map_high(char *id, unsigned long base, int shift,
+                           int max_pnode, enum map_type map_type)
+{
+       unsigned long bytes, paddr;
+
+       paddr = base << shift;
+       bytes = (1UL << shift) * (max_pnode + 1);
+       printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
+                                               paddr + bytes);
+       if (map_type == map_uc)
+               init_extra_mapping_uc(paddr, bytes);
+       else
+               init_extra_mapping_wb(paddr, bytes);
+
+}
+static __init void map_gru_high(int max_pnode)
+{
+       union uvh_rh_gam_gru_overlay_config_mmr_u gru;
+       int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
+
+       gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
+       if (gru.s.enable)
+               map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
+}
+
+static __init void map_config_high(int max_pnode)
+{
+       union uvh_rh_gam_cfg_overlay_config_mmr_u cfg;
+       int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT;
+
+       cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR);
+       if (cfg.s.enable)
+               map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc);
+}
+
+static __init void map_mmr_high(int max_pnode)
+{
+       union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
+       int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
+
+       mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
+       if (mmr.s.enable)
+               map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
+}
+
+static __init void map_mmioh_high(int max_pnode)
+{
+       union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
+       int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
+
+       mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
+       if (mmioh.s.enable)
+               map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc);
+}
+
+static __init void uv_rtc_init(void)
+{
+       long status;
+       u64 ticks_per_sec;
+
+       status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
+                                       &ticks_per_sec);
+       if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
+               printk(KERN_WARNING
+                       "unable to determine platform RTC clock frequency, "
+                       "guessing.\n");
+               /* BIOS gives wrong value for clock freq. so guess */
+               sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
+       } else
+               sn_rtc_cycles_per_second = ticks_per_sec;
+}
+
+/*
+ * percpu heartbeat timer
+ */
+static void uv_heartbeat(unsigned long ignored)
+{
+       struct timer_list *timer = &uv_hub_info->scir.timer;
+       unsigned char bits = uv_hub_info->scir.state;
+
+       /* flip heartbeat bit */
+       bits ^= SCIR_CPU_HEARTBEAT;
+
+       /* is this cpu idle? */
+       if (idle_cpu(raw_smp_processor_id()))
+               bits &= ~SCIR_CPU_ACTIVITY;
+       else
+               bits |= SCIR_CPU_ACTIVITY;
+
+       /* update system controller interface reg */
+       uv_set_scir_bits(bits);
+
+       /* enable next timer period */
+       mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
+}
+
+static void __cpuinit uv_heartbeat_enable(int cpu)
+{
+       if (!uv_cpu_hub_info(cpu)->scir.enabled) {
+               struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
+
+               uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
+               setup_timer(timer, uv_heartbeat, cpu);
+               timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
+               add_timer_on(timer, cpu);
+               uv_cpu_hub_info(cpu)->scir.enabled = 1;
+       }
+
+       /* check boot cpu */
+       if (!uv_cpu_hub_info(0)->scir.enabled)
+               uv_heartbeat_enable(0);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void __cpuinit uv_heartbeat_disable(int cpu)
+{
+       if (uv_cpu_hub_info(cpu)->scir.enabled) {
+               uv_cpu_hub_info(cpu)->scir.enabled = 0;
+               del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
+       }
+       uv_set_cpu_scir_bits(cpu, 0xff);
+}
+
+/*
+ * cpu hotplug notifier
+ */
+static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
+                                      unsigned long action, void *hcpu)
+{
+       long cpu = (long)hcpu;
+
+       switch (action) {
+       case CPU_ONLINE:
+               uv_heartbeat_enable(cpu);
+               break;
+       case CPU_DOWN_PREPARE:
+               uv_heartbeat_disable(cpu);
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static __init void uv_scir_register_cpu_notifier(void)
+{
+       hotcpu_notifier(uv_scir_cpu_notify, 0);
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+
+static __init void uv_scir_register_cpu_notifier(void)
+{
+}
+
+static __init int uv_init_heartbeat(void)
+{
+       int cpu;
+
+       if (is_uv_system())
+               for_each_online_cpu(cpu)
+                       uv_heartbeat_enable(cpu);
+       return 0;
+}
+
+late_initcall(uv_init_heartbeat);
+
+#endif /* !CONFIG_HOTPLUG_CPU */
+
+/*
+ * Called on each cpu to initialize the per_cpu UV data area.
+ * FIXME: hotplug not supported yet
+ */
+void __cpuinit uv_cpu_init(void)
+{
+       /* CPU 0 initilization will be done via uv_system_init. */
+       if (!uv_blade_info)
+               return;
+
+       uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
+
+       if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
+               set_x2apic_extra_bits(uv_hub_info->pnode);
+}
+
+
+void __init uv_system_init(void)
+{
+       union uvh_si_addr_map_config_u m_n_config;
+       union uvh_node_id_u node_id;
+       unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
+       int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
+       int max_pnode = 0;
+       unsigned long mmr_base, present;
+
+       map_low_mmrs();
+
+       m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
+       m_val = m_n_config.s.m_skt;
+       n_val = m_n_config.s.n_skt;
+       mmr_base =
+           uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
+           ~UV_MMR_ENABLE;
+       printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
+
+       for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
+               uv_possible_blades +=
+                 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
+       printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
+
+       bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
+       uv_blade_info = kmalloc(bytes, GFP_KERNEL);
+
+       get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
+
+       bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
+       uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
+       memset(uv_node_to_blade, 255, bytes);
+
+       bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
+       uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
+       memset(uv_cpu_to_blade, 255, bytes);
+
+       blade = 0;
+       for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
+               present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
+               for (j = 0; j < 64; j++) {
+                       if (!test_bit(j, &present))
+                               continue;
+                       uv_blade_info[blade].pnode = (i * 64 + j);
+                       uv_blade_info[blade].nr_possible_cpus = 0;
+                       uv_blade_info[blade].nr_online_cpus = 0;
+                       blade++;
+               }
+       }
+
+       node_id.v = uv_read_local_mmr(UVH_NODE_ID);
+       gnode_upper = (((unsigned long)node_id.s.node_id) &
+                      ~((1 << n_val) - 1)) << m_val;
+
+       uv_bios_init();
+       uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
+                           &sn_coherency_id, &sn_region_size);
+       uv_rtc_init();
+
+       for_each_present_cpu(cpu) {
+               nid = cpu_to_node(cpu);
+               pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
+               blade = boot_pnode_to_blade(pnode);
+               lcpu = uv_blade_info[blade].nr_possible_cpus;
+               uv_blade_info[blade].nr_possible_cpus++;
+
+               uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
+               uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
+               uv_cpu_hub_info(cpu)->m_val = m_val;
+               uv_cpu_hub_info(cpu)->n_val = m_val;
+               uv_cpu_hub_info(cpu)->numa_blade_id = blade;
+               uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
+               uv_cpu_hub_info(cpu)->pnode = pnode;
+               uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1;
+               uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
+               uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
+               uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
+               uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
+               uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
+               uv_node_to_blade[nid] = blade;
+               uv_cpu_to_blade[cpu] = blade;
+               max_pnode = max(pnode, max_pnode);
+
+               printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
+                       "lcpu %d, blade %d\n",
+                       cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
+                       lcpu, blade);
+       }
+
+       map_gru_high(max_pnode);
+       map_mmr_high(max_pnode);
+       map_config_high(max_pnode);
+       map_mmioh_high(max_pnode);
+
+       uv_cpu_init();
+       uv_scir_register_cpu_notifier();
+       proc_mkdir("sgi_uv", NULL);
+}
index 266ec6c18b6c6b9aea5696d796c7dc565105286b..10033fe718e0ad574f57a4f54c9d67efb72cb23e 100644 (file)
@@ -301,7 +301,7 @@ extern int (*console_blank_hook)(int);
  */
 #define APM_ZERO_SEGS
 
-#include "apm.h"
+#include <asm/apm.h>
 
 /*
  * Define to re-initialize the interrupt 0 timer to 100 Hz after a suspend.
index ee4df08feee6b0a64b62ca639d39cffbdd0cc87a..fbf2f33e3080c24c3b17db173fd023fcc66527ae 100644 (file)
@@ -75,6 +75,7 @@ void foo(void)
        OFFSET(PT_DS,  pt_regs, ds);
        OFFSET(PT_ES,  pt_regs, es);
        OFFSET(PT_FS,  pt_regs, fs);
+       OFFSET(PT_GS,  pt_regs, gs);
        OFFSET(PT_ORIG_EAX, pt_regs, orig_ax);
        OFFSET(PT_EIP, pt_regs, ip);
        OFFSET(PT_CS,  pt_regs, cs);
index 1d41d3f1edbcaa3d39450561d25b8ef0fb801e38..8793ab33e2c16406632513ecdc2febefe26c66a3 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/hardirq.h>
 #include <linux/suspend.h>
 #include <linux/kbuild.h>
-#include <asm/pda.h>
 #include <asm/processor.h>
 #include <asm/segment.h>
 #include <asm/thread_info.h>
@@ -48,16 +47,6 @@ int main(void)
 #endif
        BLANK();
 #undef ENTRY
-#define ENTRY(entry) DEFINE(pda_ ## entry, offsetof(struct x8664_pda, entry))
-       ENTRY(kernelstack); 
-       ENTRY(oldrsp); 
-       ENTRY(pcurrent); 
-       ENTRY(irqcount);
-       ENTRY(cpunumber);
-       ENTRY(irqstackptr);
-       ENTRY(data_offset);
-       BLANK();
-#undef ENTRY
 #ifdef CONFIG_PARAVIRT
        BLANK();
        OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
index 2ac0ab71412a59f9c9e0b025d88f0acf9e7e39ff..b617b1164f1e08908b86d30284ceec0ff1b491f0 100644 (file)
@@ -83,7 +83,7 @@ void __init setup_bios_corruption_check(void)
                u64 size;
                addr = find_e820_area_size(addr, &size, PAGE_SIZE);
 
-               if (addr == 0)
+               if (!(addr + 1))
                        break;
 
                if ((addr + size) > corruption_check_size)
index 82db7f45e2de655430009b58fb776c11fc9b74de..d4356f8b7522a5ec747a8e913080610013f98ac0 100644 (file)
@@ -14,6 +14,8 @@ obj-y                 += vmware.o hypervisor.o
 obj-$(CONFIG_X86_32)   += bugs.o cmpxchg.o
 obj-$(CONFIG_X86_64)   += bugs_64.o
 
+obj-$(CONFIG_X86_CPU_DEBUG)            += cpu_debug.o
+
 obj-$(CONFIG_CPU_SUP_INTEL)            += intel.o
 obj-$(CONFIG_CPU_SUP_AMD)              += amd.o
 obj-$(CONFIG_CPU_SUP_CYRIX_32)         += cyrix.o
index 2cf23634b6d94d60e239d3f63cd9d4d21526b3fd..8220ae69849d4aa3e5405a412a6bca2b03c4b958 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/pat.h>
 #include <asm/processor.h>
 
-#include <mach_apic.h>
+#include <asm/apic.h>
 
 struct cpuid_bit {
        u16 feature;
@@ -29,7 +29,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
        u32 regs[4];
        const struct cpuid_bit *cb;
 
-       static const struct cpuid_bit cpuid_bits[] = {
+       static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
                { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
                { 0, 0, 0, 0 }
        };
@@ -69,7 +69,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
  */
 void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
        unsigned int eax, ebx, ecx, edx, sub_index;
        unsigned int ht_mask_width, core_plus_mask_width;
        unsigned int core_select_mask, core_level_siblings;
@@ -116,22 +116,14 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
 
        core_select_mask = (~(-1 << core_plus_mask_width)) >> ht_mask_width;
 
-#ifdef CONFIG_X86_32
-       c->cpu_core_id = phys_pkg_id(c->initial_apicid, ht_mask_width)
+       c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, ht_mask_width)
                                                 & core_select_mask;
-       c->phys_proc_id = phys_pkg_id(c->initial_apicid, core_plus_mask_width);
+       c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, core_plus_mask_width);
        /*
         * Reinit the apicid, now that we have extended initial_apicid.
         */
-       c->apicid = phys_pkg_id(c->initial_apicid, 0);
-#else
-       c->cpu_core_id = phys_pkg_id(ht_mask_width) & core_select_mask;
-       c->phys_proc_id = phys_pkg_id(core_plus_mask_width);
-       /*
-        * Reinit the apicid, now that we have extended initial_apicid.
-        */
-       c->apicid = phys_pkg_id(0);
-#endif
+       c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
+
        c->x86_max_cores = (core_level_siblings / smp_num_siblings);
 
 
@@ -143,37 +135,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c)
        return;
 #endif
 }
-
-#ifdef CONFIG_X86_PAT
-void __cpuinit validate_pat_support(struct cpuinfo_x86 *c)
-{
-       if (!cpu_has_pat)
-               pat_disable("PAT not supported by CPU.");
-
-       switch (c->x86_vendor) {
-       case X86_VENDOR_INTEL:
-               /*
-                * There is a known erratum on Pentium III and Core Solo
-                * and Core Duo CPUs.
-                * " Page with PAT set to WC while associated MTRR is UC
-                *   may consolidate to UC "
-                * Because of this erratum, it is better to stick with
-                * setting WC in MTRR rather than using PAT on these CPUs.
-                *
-                * Enable PAT WC only on P4, Core 2 or later CPUs.
-                */
-               if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15))
-                       return;
-
-               pat_disable("PAT WC disabled due to known CPU erratum.");
-               return;
-
-       case X86_VENDOR_AMD:
-       case X86_VENDOR_CENTAUR:
-       case X86_VENDOR_TRANSMETA:
-               return;
-       }
-
-       pat_disable("PAT disabled. Not yet verified on this CPU type.");
-}
-#endif
index 7c878f6aa919182033001cdc6eaa4d1cbefc6543..7e4a459daa644f30309f50e9ca65ece4be85fad7 100644 (file)
@@ -5,6 +5,7 @@
 #include <asm/io.h>
 #include <asm/processor.h>
 #include <asm/apic.h>
+#include <asm/cpu.h>
 
 #ifdef CONFIG_X86_64
 # include <asm/numa_64.h>
@@ -12,8 +13,6 @@
 # include <asm/cacheflush.h>
 #endif
 
-#include <mach_apic.h>
-
 #include "cpu.h"
 
 #ifdef CONFIG_X86_32
@@ -143,6 +142,55 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
        }
 }
 
+static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+       /* calling is from identify_secondary_cpu() ? */
+       if (c->cpu_index == boot_cpu_id)
+               return;
+
+       /*
+        * Certain Athlons might work (for various values of 'work') in SMP
+        * but they are not certified as MP capable.
+        */
+       /* Athlon 660/661 is valid. */
+       if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
+           (c->x86_mask == 1)))
+               goto valid_k7;
+
+       /* Duron 670 is valid */
+       if ((c->x86_model == 7) && (c->x86_mask == 0))
+               goto valid_k7;
+
+       /*
+        * Athlon 662, Duron 671, and Athlon >model 7 have capability
+        * bit. It's worth noting that the A5 stepping (662) of some
+        * Athlon XP's have the MP bit set.
+        * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
+        * more.
+        */
+       if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
+           ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
+            (c->x86_model > 7))
+               if (cpu_has_mp)
+                       goto valid_k7;
+
+       /* If we get here, not a certified SMP capable AMD system. */
+
+       /*
+        * Don't taint if we are running SMP kernel on a single non-MP
+        * approved Athlon
+        */
+       WARN_ONCE(1, "WARNING: This combination of AMD"
+               "processors is not suitable for SMP.\n");
+       if (!test_taint(TAINT_UNSAFE_SMP))
+               add_taint(TAINT_UNSAFE_SMP);
+
+valid_k7:
+       ;
+#endif
+}
+
 static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
 {
        u32 l, h;
@@ -177,6 +225,8 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
        }
 
        set_cpu_cap(c, X86_FEATURE_K7);
+
+       amd_k7_smp_check(c);
 }
 #endif
 
@@ -452,7 +502,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int
 }
 #endif
 
-static struct cpu_dev amd_cpu_dev __cpuinitdata = {
+static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
        .c_vendor       = "AMD",
        .c_ident        = { "AuthenticAMD" },
 #ifdef CONFIG_X86_32
index 89bfdd9cacc69a363fc0d9f024dc45b97310bf9e..983e0830f0dac85d08998a9bdd313f63f8232408 100644 (file)
@@ -468,7 +468,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
        return size;
 }
 
-static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
+static const struct cpu_dev __cpuinitconst centaur_cpu_dev = {
        .c_vendor       = "Centaur",
        .c_ident        = { "CentaurHauls" },
        .c_early_init   = early_init_centaur,
index a1625f5a1e78d41f607ceae9652dc8711c491712..51b09c48c9c7bbcb31f9ca7e441940e209ab8381 100644 (file)
@@ -25,7 +25,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
        set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
 }
 
-static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
+static const struct cpu_dev centaur_cpu_dev __cpuinitconst = {
        .c_vendor       = "Centaur",
        .c_ident        = { "CentaurHauls" },
        .c_early_init   = early_init_centaur,
index 83492b1f93b11c5e0b851300ffdb3e314eaacc9e..e2962cc1e27b742965f6af45a8cfdcf9b4c1a0b8 100644 (file)
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/string.h>
 #include <linux/bootmem.h>
+#include <linux/linkage.h>
 #include <linux/bitops.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/kgdb.h>
-#include <linux/topology.h>
+#include <linux/percpu.h>
+#include <linux/string.h>
 #include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/kgdb.h>
 #include <linux/smp.h>
-#include <linux/percpu.h>
-#include <asm/i387.h>
-#include <asm/msr.h>
-#include <asm/io.h>
-#include <asm/linkage.h>
+#include <linux/io.h>
+
+#include <asm/stackprotector.h>
 #include <asm/mmu_context.h>
+#include <asm/hypervisor.h>
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/topology.h>
+#include <asm/cpumask.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+#include <asm/proto.h>
+#include <asm/setup.h>
+#include <asm/apic.h>
+#include <asm/desc.h>
+#include <asm/i387.h>
 #include <asm/mtrr.h>
+#include <asm/numa.h>
+#include <asm/asm.h>
+#include <asm/cpu.h>
 #include <asm/mce.h>
+#include <asm/msr.h>
 #include <asm/pat.h>
-#include <asm/asm.h>
-#include <asm/numa.h>
 #include <asm/smp.h>
+
 #ifdef CONFIG_X86_LOCAL_APIC
-#include <asm/mpspec.h>
-#include <asm/apic.h>
-#include <mach_apic.h>
-#include <asm/genapic.h>
+#include <asm/uv/uv.h>
 #endif
 
-#include <asm/pda.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/desc.h>
-#include <asm/atomic.h>
-#include <asm/proto.h>
-#include <asm/sections.h>
-#include <asm/setup.h>
-#include <asm/hypervisor.h>
-
 #include "cpu.h"
 
 #ifdef CONFIG_X86_64
 
 /* all of these masks are initialized in setup_cpu_local_masks() */
-cpumask_var_t cpu_callin_mask;
-cpumask_var_t cpu_callout_mask;
 cpumask_var_t cpu_initialized_mask;
+cpumask_var_t cpu_callout_mask;
+cpumask_var_t cpu_callin_mask;
 
 /* representing cpus for which sibling maps can be computed */
 cpumask_var_t cpu_sibling_setup_mask;
 
+/* correctly size the local cpu masks */
+void __init setup_cpu_local_masks(void)
+{
+       alloc_bootmem_cpumask_var(&cpu_initialized_mask);
+       alloc_bootmem_cpumask_var(&cpu_callin_mask);
+       alloc_bootmem_cpumask_var(&cpu_callout_mask);
+       alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
+}
+
 #else /* CONFIG_X86_32 */
 
-cpumask_t cpu_callin_map;
+cpumask_t cpu_sibling_setup_map;
 cpumask_t cpu_callout_map;
 cpumask_t cpu_initialized;
-cpumask_t cpu_sibling_setup_map;
+cpumask_t cpu_callin_map;
 
 #endif /* CONFIG_X86_32 */
 
 
-static struct cpu_dev *this_cpu __cpuinitdata;
+static const struct cpu_dev *this_cpu __cpuinitdata;
 
+DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 #ifdef CONFIG_X86_64
-/* We need valid kernel segments for data and code in long mode too
- * IRET will check the segment types  kkeil 2000/10/28
- * Also sysret mandates a special GDT layout
- */
-/* The TLS descriptors are currently at a different place compared to i386.
-   Hopefully nobody expects them at a fixed place (Wine?) */
-DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
-       [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
-       [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
-       [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
-       [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
-       [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
-       [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
-} };
+       /*
+        * We need valid kernel segments for data and code in long mode too
+        * IRET will check the segment types  kkeil 2000/10/28
+        * Also sysret mandates a special GDT layout
+        *
+        * TLS descriptors are currently at a different place compared to i386.
+        * Hopefully nobody expects them at a fixed place (Wine?)
+        */
+       [GDT_ENTRY_KERNEL32_CS]         = { { { 0x0000ffff, 0x00cf9b00 } } },
+       [GDT_ENTRY_KERNEL_CS]           = { { { 0x0000ffff, 0x00af9b00 } } },
+       [GDT_ENTRY_KERNEL_DS]           = { { { 0x0000ffff, 0x00cf9300 } } },
+       [GDT_ENTRY_DEFAULT_USER32_CS]   = { { { 0x0000ffff, 0x00cffb00 } } },
+       [GDT_ENTRY_DEFAULT_USER_DS]     = { { { 0x0000ffff, 0x00cff300 } } },
+       [GDT_ENTRY_DEFAULT_USER_CS]     = { { { 0x0000ffff, 0x00affb00 } } },
 #else
-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
-       [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
-       [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
-       [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
-       [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
+       [GDT_ENTRY_KERNEL_CS]           = { { { 0x0000ffff, 0x00cf9a00 } } },
+       [GDT_ENTRY_KERNEL_DS]           = { { { 0x0000ffff, 0x00cf9200 } } },
+       [GDT_ENTRY_DEFAULT_USER_CS]     = { { { 0x0000ffff, 0x00cffa00 } } },
+       [GDT_ENTRY_DEFAULT_USER_DS]     = { { { 0x0000ffff, 0x00cff200 } } },
        /*
         * Segments used for calling PnP BIOS have byte granularity.
         * They code segments and data segments have fixed 64k limits,
         * the transfer segment sizes are set at run time.
         */
        /* 32-bit code */
-       [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
+       [GDT_ENTRY_PNPBIOS_CS32]        = { { { 0x0000ffff, 0x00409a00 } } },
        /* 16-bit code */
-       [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
+       [GDT_ENTRY_PNPBIOS_CS16]        = { { { 0x0000ffff, 0x00009a00 } } },
        /* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
+       [GDT_ENTRY_PNPBIOS_DS]          = { { { 0x0000ffff, 0x00009200 } } },
        /* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
+       [GDT_ENTRY_PNPBIOS_TS1]         = { { { 0x00000000, 0x00009200 } } },
        /* 16-bit data */
-       [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
+       [GDT_ENTRY_PNPBIOS_TS2]         = { { { 0x00000000, 0x00009200 } } },
        /*
         * The APM segments have byte granularity and their bases
         * are set at run time.  All have 64k limits.
         */
        /* 32-bit code */
-       [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
+       [GDT_ENTRY_APMBIOS_BASE]        = { { { 0x0000ffff, 0x00409a00 } } },
        /* 16-bit code */
-       [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
+       [GDT_ENTRY_APMBIOS_BASE+1]      = { { { 0x0000ffff, 0x00009a00 } } },
        /* data */
-       [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
+       [GDT_ENTRY_APMBIOS_BASE+2]      = { { { 0x0000ffff, 0x00409200 } } },
 
-       [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
-       [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } },
-} };
+       [GDT_ENTRY_ESPFIX_SS]           = { { { 0x00000000, 0x00c09200 } } },
+       [GDT_ENTRY_PERCPU]              = { { { 0x0000ffff, 0x00cf9200 } } },
+       GDT_STACK_CANARY_INIT
 #endif
+} };
 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 
 #ifdef CONFIG_X86_32
@@ -153,16 +164,17 @@ static inline int flag_is_changeable_p(u32 flag)
         * the CPUID. Add "volatile" to not allow gcc to
         * optimize the subsequent calls to this function.
         */
-       asm volatile ("pushfl\n\t"
-                     "pushfl\n\t"
-                     "popl %0\n\t"
-                     "movl %0,%1\n\t"
-                     "xorl %2,%0\n\t"
-                     "pushl %0\n\t"
-                     "popfl\n\t"
-                     "pushfl\n\t"
-                     "popl %0\n\t"
-                     "popfl\n\t"
+       asm volatile ("pushfl           \n\t"
+                     "pushfl           \n\t"
+                     "popl %0          \n\t"
+                     "movl %0, %1      \n\t"
+                     "xorl %2, %0      \n\t"
+                     "pushl %0         \n\t"
+                     "popfl            \n\t"
+                     "pushfl           \n\t"
+                     "popl %0          \n\t"
+                     "popfl            \n\t"
+
                      : "=&r" (f1), "=&r" (f2)
                      : "ir" (flag));
 
@@ -177,18 +189,22 @@ static int __cpuinit have_cpuid_p(void)
 
 static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 {
-       if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
-               /* Disable processor serial number */
-               unsigned long lo, hi;
-               rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
-               lo |= 0x200000;
-               wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
-               printk(KERN_NOTICE "CPU serial number disabled.\n");
-               clear_cpu_cap(c, X86_FEATURE_PN);
-
-               /* Disabling the serial number may affect the cpuid level */
-               c->cpuid_level = cpuid_eax(0);
-       }
+       unsigned long lo, hi;
+
+       if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
+               return;
+
+       /* Disable processor serial number: */
+
+       rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
+       lo |= 0x200000;
+       wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
+
+       printk(KERN_NOTICE "CPU serial number disabled.\n");
+       clear_cpu_cap(c, X86_FEATURE_PN);
+
+       /* Disabling the serial number may affect the cpuid level */
+       c->cpuid_level = cpuid_eax(0);
 }
 
 static int __init x86_serial_nr_setup(char *s)
@@ -212,17 +228,65 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
 }
 #endif
 
+/*
+ * Some CPU features depend on higher CPUID levels, which may not always
+ * be available due to CPUID level capping or broken virtualization
+ * software.  Add those features to this table to auto-disable them.
+ */
+struct cpuid_dependent_feature {
+       u32 feature;
+       u32 level;
+};
+
+static const struct cpuid_dependent_feature __cpuinitconst
+cpuid_dependent_features[] = {
+       { X86_FEATURE_MWAIT,            0x00000005 },
+       { X86_FEATURE_DCA,              0x00000009 },
+       { X86_FEATURE_XSAVE,            0x0000000d },
+       { 0, 0 }
+};
+
+static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
+{
+       const struct cpuid_dependent_feature *df;
+
+       for (df = cpuid_dependent_features; df->feature; df++) {
+
+               if (!cpu_has(c, df->feature))
+                       continue;
+               /*
+                * Note: cpuid_level is set to -1 if unavailable, but
+                * extended_extended_level is set to 0 if unavailable
+                * and the legitimate extended levels are all negative
+                * when signed; hence the weird messing around with
+                * signs here...
+                */
+               if (!((s32)df->level < 0 ?
+                    (u32)df->level > (u32)c->extended_cpuid_level :
+                    (s32)df->level > (s32)c->cpuid_level))
+                       continue;
+
+               clear_cpu_cap(c, df->feature);
+               if (!warn)
+                       continue;
+
+               printk(KERN_WARNING
+                      "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
+                               x86_cap_flags[df->feature], df->level);
+       }
+}
+
 /*
  * Naming convention should be: <Name> [(<Codename>)]
  * This table only is used unless init_<vendor>() below doesn't set it;
- * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
- *
+ * in particular, if CPUID levels 0x80000002..4 are supported, this
+ * isn't used
  */
 
 /* Look up CPU names by table lookup. */
-static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
+static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
 {
-       struct cpu_model_info *info;
+       const struct cpu_model_info *info;
 
        if (c->x86_model >= 16)
                return NULL;    /* Range check */
@@ -242,21 +306,34 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
 
 __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
 
-/* Current gdt points %fs at the "master" per-cpu area: after this,
- * it's on the real one. */
-void switch_to_new_gdt(void)
+void load_percpu_segment(int cpu)
+{
+#ifdef CONFIG_X86_32
+       loadsegment(fs, __KERNEL_PERCPU);
+#else
+       loadsegment(gs, 0);
+       wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
+#endif
+       load_stack_canary_segment();
+}
+
+/*
+ * Current gdt points %fs at the "master" per-cpu area: after this,
+ * it's on the real one.
+ */
+void switch_to_new_gdt(int cpu)
 {
        struct desc_ptr gdt_descr;
 
-       gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
+       gdt_descr.address = (long)get_cpu_gdt_table(cpu);
        gdt_descr.size = GDT_SIZE - 1;
        load_gdt(&gdt_descr);
-#ifdef CONFIG_X86_32
-       asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
-#endif
+       /* Reload the per-cpu base */
+
+       load_percpu_segment(cpu);
 }
 
-static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
+static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
 
 static void __cpuinit default_init(struct cpuinfo_x86 *c)
 {
@@ -275,7 +352,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
 #endif
 }
 
-static struct cpu_dev __cpuinitdata default_cpu = {
+static const struct cpu_dev __cpuinitconst default_cpu = {
        .c_init = default_init,
        .c_vendor = "Unknown",
        .c_x86_vendor = X86_VENDOR_UNKNOWN,
@@ -289,22 +366,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
        if (c->extended_cpuid_level < 0x80000004)
                return;
 
-       v = (unsigned int *) c->x86_model_id;
+       v = (unsigned int *)c->x86_model_id;
        cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
        cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
        cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
        c->x86_model_id[48] = 0;
 
-       /* Intel chips right-justify this string for some dumb reason;
-          undo that brain damage */
+       /*
+        * Intel chips right-justify this string for some dumb reason;
+        * undo that brain damage:
+        */
        p = q = &c->x86_model_id[0];
        while (*p == ' ')
-            p++;
+               p++;
        if (p != q) {
-            while (*p)
-                 *q++ = *p++;
-            while (q <= &c->x86_model_id[48])
-                 *q++ = '\0';  /* Zero-pad the rest */
+               while (*p)
+                       *q++ = *p++;
+               while (q <= &c->x86_model_id[48])
+                       *q++ = '\0';    /* Zero-pad the rest */
        }
 }
 
@@ -373,36 +452,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
 
        if (smp_num_siblings == 1) {
                printk(KERN_INFO  "CPU: Hyper-Threading is disabled\n");
-       } else if (smp_num_siblings > 1) {
+               goto out;
+       }
 
-               if (smp_num_siblings > nr_cpu_ids) {
-                       printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
-                                       smp_num_siblings);
-                       smp_num_siblings = 1;
-                       return;
-               }
+       if (smp_num_siblings <= 1)
+               goto out;
 
-               index_msb = get_count_order(smp_num_siblings);
-#ifdef CONFIG_X86_64
-               c->phys_proc_id = phys_pkg_id(index_msb);
-#else
-               c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
-#endif
+       if (smp_num_siblings > nr_cpu_ids) {
+               pr_warning("CPU: Unsupported number of siblings %d",
+                          smp_num_siblings);
+               smp_num_siblings = 1;
+               return;
+       }
 
-               smp_num_siblings = smp_num_siblings / c->x86_max_cores;
+       index_msb = get_count_order(smp_num_siblings);
+       c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
 
-               index_msb = get_count_order(smp_num_siblings);
+       smp_num_siblings = smp_num_siblings / c->x86_max_cores;
 
-               core_bits = get_count_order(c->x86_max_cores);
+       index_msb = get_count_order(smp_num_siblings);
 
-#ifdef CONFIG_X86_64
-               c->cpu_core_id = phys_pkg_id(index_msb) &
-                                              ((1 << core_bits) - 1);
-#else
-               c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
-                                              ((1 << core_bits) - 1);
-#endif
-       }
+       core_bits = get_count_order(c->x86_max_cores);
+
+       c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
+                                      ((1 << core_bits) - 1);
 
 out:
        if ((c->x86_max_cores * smp_num_siblings) > 1) {
@@ -417,8 +490,8 @@ out:
 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
 {
        char *v = c->x86_vendor_id;
-       int i;
        static int printed;
+       int i;
 
        for (i = 0; i < X86_VENDOR_NUM; i++) {
                if (!cpu_devs[i])
@@ -427,6 +500,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
                if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
                    (cpu_devs[i]->c_ident[1] &&
                     !strcmp(v, cpu_devs[i]->c_ident[1]))) {
+
                        this_cpu = cpu_devs[i];
                        c->x86_vendor = this_cpu->c_x86_vendor;
                        return;
@@ -435,7 +509,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
 
        if (!printed) {
                printed++;
-               printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v);
+               printk(KERN_ERR
+                   "CPU: vendor_id '%s' unknown, using generic init.\n", v);
+
                printk(KERN_ERR "CPU: Your system may be unstable.\n");
        }
 
@@ -455,14 +531,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
        /* Intel-defined flags: level 0x00000001 */
        if (c->cpuid_level >= 0x00000001) {
                u32 junk, tfms, cap0, misc;
+
                cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
                c->x86 = (tfms >> 8) & 0xf;
                c->x86_model = (tfms >> 4) & 0xf;
                c->x86_mask = tfms & 0xf;
+
                if (c->x86 == 0xf)
                        c->x86 += (tfms >> 20) & 0xff;
                if (c->x86 >= 0x6)
                        c->x86_model += ((tfms >> 16) & 0xf) << 4;
+
                if (cap0 & (1<<19)) {
                        c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
                        c->x86_cache_alignment = c->x86_clflush_size;
@@ -478,6 +557,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
        /* Intel-defined flags: level 0x00000001 */
        if (c->cpuid_level >= 0x00000001) {
                u32 capability, excap;
+
                cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
                c->x86_capability[0] = capability;
                c->x86_capability[4] = excap;
@@ -486,6 +566,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
        /* AMD-defined flags: level 0x80000001 */
        xlvl = cpuid_eax(0x80000000);
        c->extended_cpuid_level = xlvl;
+
        if ((xlvl & 0xffff0000) == 0x80000000) {
                if (xlvl >= 0x80000001) {
                        c->x86_capability[1] = cpuid_edx(0x80000001);
@@ -493,13 +574,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
                }
        }
 
-#ifdef CONFIG_X86_64
        if (c->extended_cpuid_level >= 0x80000008) {
                u32 eax = cpuid_eax(0x80000008);
 
                c->x86_virt_bits = (eax >> 8) & 0xff;
                c->x86_phys_bits = eax & 0xff;
        }
+#ifdef CONFIG_X86_32
+       else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))
+               c->x86_phys_bits = 36;
 #endif
 
        if (c->extended_cpuid_level >= 0x80000007)
@@ -546,8 +629,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_X86_64
        c->x86_clflush_size = 64;
+       c->x86_phys_bits = 36;
+       c->x86_virt_bits = 48;
 #else
        c->x86_clflush_size = 32;
+       c->x86_phys_bits = 32;
+       c->x86_virt_bits = 32;
 #endif
        c->x86_cache_alignment = c->x86_clflush_size;
 
@@ -570,21 +657,20 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        if (this_cpu->c_early_init)
                this_cpu->c_early_init(c);
 
-       validate_pat_support(c);
-
 #ifdef CONFIG_SMP
        c->cpu_index = boot_cpu_id;
 #endif
+       filter_cpuid_features(c, false);
 }
 
 void __init early_cpu_init(void)
 {
-       struct cpu_dev **cdev;
+       const struct cpu_dev *const *cdev;
        int count = 0;
 
-       printk("KERNEL supported cpus:\n");
+       printk(KERN_INFO "KERNEL supported cpus:\n");
        for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
-               struct cpu_dev *cpudev = *cdev;
+               const struct cpu_dev *cpudev = *cdev;
                unsigned int j;
 
                if (count >= X86_VENDOR_NUM)
@@ -595,7 +681,7 @@ void __init early_cpu_init(void)
                for (j = 0; j < 2; j++) {
                        if (!cpudev->c_ident[j])
                                continue;
-                       printk("  %s %s\n", cpudev->c_vendor,
+                       printk(KERN_INFO "  %s %s\n", cpudev->c_vendor,
                                cpudev->c_ident[j]);
                }
        }
@@ -637,7 +723,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
                c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 #ifdef CONFIG_X86_32
 # ifdef CONFIG_X86_HT
-               c->apicid = phys_pkg_id(c->initial_apicid, 0);
+               c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 # else
                c->apicid = c->initial_apicid;
 # endif
@@ -671,9 +757,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
        c->x86_coreid_bits = 0;
 #ifdef CONFIG_X86_64
        c->x86_clflush_size = 64;
+       c->x86_phys_bits = 36;
+       c->x86_virt_bits = 48;
 #else
        c->cpuid_level = -1;    /* CPUID not detected */
        c->x86_clflush_size = 32;
+       c->x86_phys_bits = 32;
+       c->x86_virt_bits = 32;
 #endif
        c->x86_cache_alignment = c->x86_clflush_size;
        memset(&c->x86_capability, 0, sizeof c->x86_capability);
@@ -684,7 +774,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
                this_cpu->c_identify(c);
 
 #ifdef CONFIG_X86_64
-       c->apicid = phys_pkg_id(0);
+       c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 #endif
 
        /*
@@ -704,13 +794,16 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
        squash_the_stupid_serial_number(c);
 
        /*
-        * The vendor-specific functions might have changed features.  Now
-        * we do "generic changes."
+        * The vendor-specific functions might have changed features.
+        * Now we do "generic changes."
         */
 
+       /* Filter out anything that depends on CPUID levels we don't have */
+       filter_cpuid_features(c, true);
+
        /* If the model name is still unset, do table lookup. */
        if (!c->x86_model_id[0]) {
-               char *p;
+               const char *p;
                p = table_lookup_model(c);
                if (p)
                        strcpy(c->x86_model_id, p);
@@ -785,11 +878,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
 }
 
 struct msr_range {
-       unsigned min;
-       unsigned max;
+       unsigned        min;
+       unsigned        max;
 };
 
-static struct msr_range msr_range_array[] __cpuinitdata = {
+static const struct msr_range msr_range_array[] __cpuinitconst = {
        { 0x00000000, 0x00000418},
        { 0xc0000000, 0xc000040b},
        { 0xc0010000, 0xc0010142},
@@ -798,14 +891,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = {
 
 static void __cpuinit print_cpu_msr(void)
 {
+       unsigned index_min, index_max;
        unsigned index;
        u64 val;
        int i;
-       unsigned index_min, index_max;
 
        for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
                index_min = msr_range_array[i].min;
                index_max = msr_range_array[i].max;
+
                for (index = index_min; index < index_max; index++) {
                        if (rdmsrl_amd_safe(index, &val))
                                continue;
@@ -815,6 +909,7 @@ static void __cpuinit print_cpu_msr(void)
 }
 
 static int show_msr __cpuinitdata;
+
 static __init int setup_show_msr(char *arg)
 {
        int num;
@@ -836,12 +931,14 @@ __setup("noclflush", setup_noclflush);
 
 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
 {
-       char *vendor = NULL;
+       const char *vendor = NULL;
 
-       if (c->x86_vendor < X86_VENDOR_NUM)
+       if (c->x86_vendor < X86_VENDOR_NUM) {
                vendor = this_cpu->c_vendor;
-       else if (c->cpuid_level >= 0)
-               vendor = c->x86_vendor_id;
+       } else {
+               if (c->cpuid_level >= 0)
+                       vendor = c->x86_vendor_id;
+       }
 
        if (vendor && !strstr(c->x86_model_id, vendor))
                printk(KERN_CONT "%s ", vendor);
@@ -868,65 +965,45 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
 static __init int setup_disablecpuid(char *arg)
 {
        int bit;
+
        if (get_option(&arg, &bit) && bit < NCAPINTS*32)
                setup_clear_cpu_cap(bit);
        else
                return 0;
+
        return 1;
 }
 __setup("clearcpuid=", setup_disablecpuid);
 
 #ifdef CONFIG_X86_64
-struct x8664_pda **_cpu_pda __read_mostly;
-EXPORT_SYMBOL(_cpu_pda);
-
 struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
 
-static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
+DEFINE_PER_CPU_FIRST(union irq_stack_union,
+                    irq_stack_union) __aligned(PAGE_SIZE);
 
-void __cpuinit pda_init(int cpu)
-{
-       struct x8664_pda *pda = cpu_pda(cpu);
+DEFINE_PER_CPU(char *, irq_stack_ptr) =
+       init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
 
-       /* Setup up data that may be needed in __get_free_pages early */
-       loadsegment(fs, 0);
-       loadsegment(gs, 0);
-       /* Memory clobbers used to order PDA accessed */
-       mb();
-       wrmsrl(MSR_GS_BASE, pda);
-       mb();
-
-       pda->cpunumber = cpu;
-       pda->irqcount = -1;
-       pda->kernelstack = (unsigned long)stack_thread_info() -
-                                PDA_STACKOFFSET + THREAD_SIZE;
-       pda->active_mm = &init_mm;
-       pda->mmu_state = 0;
-
-       if (cpu == 0) {
-               /* others are initialized in smpboot.c */
-               pda->pcurrent = &init_task;
-               pda->irqstackptr = boot_cpu_stack;
-               pda->irqstackptr += IRQSTACKSIZE - 64;
-       } else {
-               if (!pda->irqstackptr) {
-                       pda->irqstackptr = (char *)
-                               __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
-                       if (!pda->irqstackptr)
-                               panic("cannot allocate irqstack for cpu %d",
-                                     cpu);
-                       pda->irqstackptr += IRQSTACKSIZE - 64;
-               }
+DEFINE_PER_CPU(unsigned long, kernel_stack) =
+       (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
+EXPORT_PER_CPU_SYMBOL(kernel_stack);
 
-               if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE)
-                       pda->nodenumber = cpu_to_node(cpu);
-       }
-}
+DEFINE_PER_CPU(unsigned int, irq_count) = -1;
 
-static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ +
-                                 DEBUG_STKSZ] __page_aligned_bss;
+/*
+ * Special IST stacks which the CPU switches to when it calls
+ * an IST-marked descriptor entry. Up to 7 stacks (hardware
+ * limit), all of them are 4K, except the debug stack which
+ * is 8K.
+ */
+static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
+         [0 ... N_EXCEPTION_STACKS - 1]        = EXCEPTION_STKSZ,
+         [DEBUG_STACK - 1]                     = DEBUG_STKSZ
+};
 
-extern asmlinkage void ignore_sysret(void);
+static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
+       [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
+       __aligned(PAGE_SIZE);
 
 /* May not be marked __init: used by software suspend */
 void syscall_init(void)
@@ -957,16 +1034,38 @@ unsigned long kernel_eflags;
  */
 DEFINE_PER_CPU(struct orig_ist, orig_ist);
 
-#else
+#else  /* CONFIG_X86_64 */
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+DEFINE_PER_CPU(unsigned long, stack_canary);
+#endif
 
-/* Make sure %fs is initialized properly in idle threads */
+/* Make sure %fs and %gs are initialized properly in idle threads */
 struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
 {
        memset(regs, 0, sizeof(struct pt_regs));
        regs->fs = __KERNEL_PERCPU;
+       regs->gs = __KERNEL_STACK_CANARY;
+
        return regs;
 }
-#endif
+#endif /* CONFIG_X86_64 */
+
+/*
+ * Clear all 6 debug registers:
+ */
+static void clear_all_debug_regs(void)
+{
+       int i;
+
+       for (i = 0; i < 8; i++) {
+               /* Ignore db4, db5 */
+               if ((i == 4) || (i == 5))
+                       continue;
+
+               set_debugreg(0, i);
+       }
+}
 
 /*
  * cpu_init() initializes state that is per-CPU. Some data is already
@@ -976,21 +1075,25 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
  * A lot of state is already set up in PDA init for 64 bit
  */
 #ifdef CONFIG_X86_64
+
 void __cpuinit cpu_init(void)
 {
-       int cpu = stack_smp_processor_id();
-       struct tss_struct *t = &per_cpu(init_tss, cpu);
-       struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
-       unsigned long v;
-       char *estacks = NULL;
+       struct orig_ist *orig_ist;
        struct task_struct *me;
+       struct tss_struct *t;
+       unsigned long v;
+       int cpu;
        int i;
 
-       /* CPU 0 is initialised in head64.c */
-       if (cpu != 0)
-               pda_init(cpu);
-       else
-               estacks = boot_exception_stacks;
+       cpu = stack_smp_processor_id();
+       t = &per_cpu(init_tss, cpu);
+       orig_ist = &per_cpu(orig_ist, cpu);
+
+#ifdef CONFIG_NUMA
+       if (cpu != 0 && percpu_read(node_number) == 0 &&
+           cpu_to_node(cpu) != NUMA_NO_NODE)
+               percpu_write(node_number, cpu_to_node(cpu));
+#endif
 
        me = current;
 
@@ -1006,7 +1109,9 @@ void __cpuinit cpu_init(void)
         * and set up the GDT descriptor:
         */
 
-       switch_to_new_gdt();
+       switch_to_new_gdt(cpu);
+       loadsegment(fs, 0);
+
        load_idt((const struct desc_ptr *)&idt_descr);
 
        memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
@@ -1017,31 +1122,24 @@ void __cpuinit cpu_init(void)
        barrier();
 
        check_efer();
-       if (cpu != 0 && x2apic)
+       if (cpu != 0)
                enable_x2apic();
 
        /*
         * set up and load the per-CPU TSS
         */
        if (!orig_ist->ist[0]) {
-               static const unsigned int order[N_EXCEPTION_STACKS] = {
-                 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
-                 [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
-               };
+               char *estacks = per_cpu(exception_stacks, cpu);
+
                for (v = 0; v < N_EXCEPTION_STACKS; v++) {
-                       if (cpu) {
-                               estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
-                               if (!estacks)
-                                       panic("Cannot allocate exception "
-                                             "stack %ld %d\n", v, cpu);
-                       }
-                       estacks += PAGE_SIZE << order[v];
+                       estacks += exception_stack_sizes[v];
                        orig_ist->ist[v] = t->x86_tss.ist[v] =
                                        (unsigned long)estacks;
                }
        }
 
        t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
+
        /*
         * <= is required because the CPU will access up to
         * 8 bits beyond the end of the IO permission bitmap.
@@ -1051,8 +1149,7 @@ void __cpuinit cpu_init(void)
 
        atomic_inc(&init_mm.mm_count);
        me->active_mm = &init_mm;
-       if (me->mm)
-               BUG();
+       BUG_ON(me->mm);
        enter_lazy_tlb(&init_mm, me);
 
        load_sp0(t, &current->thread);
@@ -1069,22 +1166,9 @@ void __cpuinit cpu_init(void)
         */
        if (kgdb_connected && arch_kgdb_ops.correct_hw_break)
                arch_kgdb_ops.correct_hw_break();
-       else {
-#endif
-       /*
-        * Clear all 6 debug registers:
-        */
-
-       set_debugreg(0UL, 0);
-       set_debugreg(0UL, 1);
-       set_debugreg(0UL, 2);
-       set_debugreg(0UL, 3);
-       set_debugreg(0UL, 6);
-       set_debugreg(0UL, 7);
-#ifdef CONFIG_KGDB
-       /* If the kgdb is connected no debug regs should be altered. */
-       }
+       else
 #endif
+               clear_all_debug_regs();
 
        fpu_init();
 
@@ -1105,7 +1189,8 @@ void __cpuinit cpu_init(void)
 
        if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
                printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
-               for (;;) local_irq_enable();
+               for (;;)
+                       local_irq_enable();
        }
 
        printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1114,15 +1199,14 @@ void __cpuinit cpu_init(void)
                clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
 
        load_idt(&idt_descr);
-       switch_to_new_gdt();
+       switch_to_new_gdt(cpu);
 
        /*
         * Set up and load the per-CPU TSS and LDT
         */
        atomic_inc(&init_mm.mm_count);
        curr->active_mm = &init_mm;
-       if (curr->mm)
-               BUG();
+       BUG_ON(curr->mm);
        enter_lazy_tlb(&init_mm, curr);
 
        load_sp0(t, thread);
@@ -1135,16 +1219,7 @@ void __cpuinit cpu_init(void)
        __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
 #endif
 
-       /* Clear %gs. */
-       asm volatile ("mov %0, %%gs" : : "r" (0));
-
-       /* Clear all 6 debug registers: */
-       set_debugreg(0, 0);
-       set_debugreg(0, 1);
-       set_debugreg(0, 2);
-       set_debugreg(0, 3);
-       set_debugreg(0, 6);
-       set_debugreg(0, 7);
+       clear_all_debug_regs();
 
        /*
         * Force FPU initialization:
@@ -1164,6 +1239,4 @@ void __cpuinit cpu_init(void)
 
        xsave_init();
 }
-
-
 #endif
index de4094a3921071b94b9b71a1bcd2d85158c6c957..9469ecb5aeb8ddcd69dd2a5ebc26ef0c1e6ddf41 100644 (file)
@@ -5,15 +5,15 @@
 struct cpu_model_info {
        int vendor;
        int family;
-       char *model_names[16];
+       const char *model_names[16];
 };
 
 /* attempt to consolidate cpu attributes */
 struct cpu_dev {
-       char    * c_vendor;
+       const char      * c_vendor;
 
        /* some have two possibilities for cpuid string */
-       char    * c_ident[2];   
+       const char      * c_ident[2];
 
        struct          cpu_model_info c_models[4];
 
@@ -25,11 +25,12 @@ struct cpu_dev {
 };
 
 #define cpu_dev_register(cpu_devX) \
-       static struct cpu_dev *__cpu_dev_##cpu_devX __used \
+       static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
        __attribute__((__section__(".x86_cpu_dev.init"))) = \
        &cpu_devX;
 
-extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[];
+extern const struct cpu_dev *const __x86_cpu_dev_start[],
+                           *const __x86_cpu_dev_end[];
 
 extern void display_cacheinfo(struct cpuinfo_x86 *c);
 
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
new file mode 100755 (executable)
index 0000000..21c0cf8
--- /dev/null
@@ -0,0 +1,839 @@
+/*
+ * CPU x86 architecture debug code
+ *
+ * Copyright(C) 2009 Jaswinder Singh Rajput
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+
+#include <asm/cpu_debug.h>
+#include <asm/paravirt.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/apic.h>
+#include <asm/desc.h>
+
+static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
+static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
+static DEFINE_PER_CPU(unsigned, cpu_modelflag);
+static DEFINE_PER_CPU(int, cpu_priv_count);
+static DEFINE_PER_CPU(unsigned, cpu_model);
+
+static DEFINE_MUTEX(cpu_debug_lock);
+
+static struct dentry *cpu_debugfs_dir;
+
+static struct cpu_debug_base cpu_base[] = {
+       { "mc",         CPU_MC,         0       },
+       { "monitor",    CPU_MONITOR,    0       },
+       { "time",       CPU_TIME,       0       },
+       { "pmc",        CPU_PMC,        1       },
+       { "platform",   CPU_PLATFORM,   0       },
+       { "apic",       CPU_APIC,       0       },
+       { "poweron",    CPU_POWERON,    0       },
+       { "control",    CPU_CONTROL,    0       },
+       { "features",   CPU_FEATURES,   0       },
+       { "lastbranch", CPU_LBRANCH,    0       },
+       { "bios",       CPU_BIOS,       0       },
+       { "freq",       CPU_FREQ,       0       },
+       { "mtrr",       CPU_MTRR,       0       },
+       { "perf",       CPU_PERF,       0       },
+       { "cache",      CPU_CACHE,      0       },
+       { "sysenter",   CPU_SYSENTER,   0       },
+       { "therm",      CPU_THERM,      0       },
+       { "misc",       CPU_MISC,       0       },
+       { "debug",      CPU_DEBUG,      0       },
+       { "pat",        CPU_PAT,        0       },
+       { "vmx",        CPU_VMX,        0       },
+       { "call",       CPU_CALL,       0       },
+       { "base",       CPU_BASE,       0       },
+       { "smm",        CPU_SMM,        0       },
+       { "svm",        CPU_SVM,        0       },
+       { "osvm",       CPU_OSVM,       0       },
+       { "tss",        CPU_TSS,        0       },
+       { "cr",         CPU_CR,         0       },
+       { "dt",         CPU_DT,         0       },
+       { "registers",  CPU_REG_ALL,    0       },
+};
+
+static struct cpu_file_base cpu_file[] = {
+       { "index",      CPU_REG_ALL,    0       },
+       { "value",      CPU_REG_ALL,    1       },
+};
+
+/* Intel Registers Range */
+static struct cpu_debug_range cpu_intel_range[] = {
+       { 0x00000000, 0x00000001, CPU_MC,       CPU_INTEL_ALL           },
+       { 0x00000006, 0x00000007, CPU_MONITOR,  CPU_CX_AT_XE            },
+       { 0x00000010, 0x00000010, CPU_TIME,     CPU_INTEL_ALL           },
+       { 0x00000011, 0x00000013, CPU_PMC,      CPU_INTEL_PENTIUM       },
+       { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE         },
+       { 0x0000001B, 0x0000001B, CPU_APIC,     CPU_P6_CX_AT_XE         },
+
+       { 0x0000002A, 0x0000002A, CPU_POWERON,  CPU_PX_CX_AT_XE         },
+       { 0x0000002B, 0x0000002B, CPU_POWERON,  CPU_INTEL_XEON          },
+       { 0x0000002C, 0x0000002C, CPU_FREQ,     CPU_INTEL_XEON          },
+       { 0x0000003A, 0x0000003A, CPU_CONTROL,  CPU_CX_AT_XE            },
+
+       { 0x00000040, 0x00000043, CPU_LBRANCH,  CPU_PM_CX_AT_XE         },
+       { 0x00000044, 0x00000047, CPU_LBRANCH,  CPU_PM_CO_AT            },
+       { 0x00000060, 0x00000063, CPU_LBRANCH,  CPU_C2_AT               },
+       { 0x00000064, 0x00000067, CPU_LBRANCH,  CPU_INTEL_ATOM          },
+
+       { 0x00000079, 0x00000079, CPU_BIOS,     CPU_P6_CX_AT_XE         },
+       { 0x00000088, 0x0000008A, CPU_CACHE,    CPU_INTEL_P6            },
+       { 0x0000008B, 0x0000008B, CPU_BIOS,     CPU_P6_CX_AT_XE         },
+       { 0x0000009B, 0x0000009B, CPU_MONITOR,  CPU_INTEL_XEON          },
+
+       { 0x000000C1, 0x000000C2, CPU_PMC,      CPU_P6_CX_AT            },
+       { 0x000000CD, 0x000000CD, CPU_FREQ,     CPU_CX_AT               },
+       { 0x000000E7, 0x000000E8, CPU_PERF,     CPU_CX_AT               },
+       { 0x000000FE, 0x000000FE, CPU_MTRR,     CPU_P6_CX_XE            },
+
+       { 0x00000116, 0x00000116, CPU_CACHE,    CPU_INTEL_P6            },
+       { 0x00000118, 0x00000118, CPU_CACHE,    CPU_INTEL_P6            },
+       { 0x00000119, 0x00000119, CPU_CACHE,    CPU_INTEL_PX            },
+       { 0x0000011A, 0x0000011B, CPU_CACHE,    CPU_INTEL_P6            },
+       { 0x0000011E, 0x0000011E, CPU_CACHE,    CPU_PX_CX_AT            },
+
+       { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE         },
+       { 0x00000179, 0x0000017A, CPU_MC,       CPU_PX_CX_AT_XE         },
+       { 0x0000017B, 0x0000017B, CPU_MC,       CPU_P6_XE               },
+       { 0x00000186, 0x00000187, CPU_PMC,      CPU_P6_CX_AT            },
+       { 0x00000198, 0x00000199, CPU_PERF,     CPU_PM_CX_AT_XE         },
+       { 0x0000019A, 0x0000019A, CPU_TIME,     CPU_PM_CX_AT_XE         },
+       { 0x0000019B, 0x0000019D, CPU_THERM,    CPU_PM_CX_AT_XE         },
+       { 0x000001A0, 0x000001A0, CPU_MISC,     CPU_PM_CX_AT_XE         },
+
+       { 0x000001C9, 0x000001C9, CPU_LBRANCH,  CPU_PM_CX_AT            },
+       { 0x000001D7, 0x000001D8, CPU_LBRANCH,  CPU_INTEL_XEON          },
+       { 0x000001D9, 0x000001D9, CPU_DEBUG,    CPU_CX_AT_XE            },
+       { 0x000001DA, 0x000001DA, CPU_LBRANCH,  CPU_INTEL_XEON          },
+       { 0x000001DB, 0x000001DB, CPU_LBRANCH,  CPU_P6_XE               },
+       { 0x000001DC, 0x000001DC, CPU_LBRANCH,  CPU_INTEL_P6            },
+       { 0x000001DD, 0x000001DE, CPU_LBRANCH,  CPU_PX_CX_AT_XE         },
+       { 0x000001E0, 0x000001E0, CPU_LBRANCH,  CPU_INTEL_P6            },
+
+       { 0x00000200, 0x0000020F, CPU_MTRR,     CPU_P6_CX_XE            },
+       { 0x00000250, 0x00000250, CPU_MTRR,     CPU_P6_CX_XE            },
+       { 0x00000258, 0x00000259, CPU_MTRR,     CPU_P6_CX_XE            },
+       { 0x00000268, 0x0000026F, CPU_MTRR,     CPU_P6_CX_XE            },
+       { 0x00000277, 0x00000277, CPU_PAT,      CPU_C2_AT_XE            },
+       { 0x000002FF, 0x000002FF, CPU_MTRR,     CPU_P6_CX_XE            },
+
+       { 0x00000300, 0x00000308, CPU_PMC,      CPU_INTEL_XEON          },
+       { 0x00000309, 0x0000030B, CPU_PMC,      CPU_C2_AT_XE            },
+       { 0x0000030C, 0x00000311, CPU_PMC,      CPU_INTEL_XEON          },
+       { 0x00000345, 0x00000345, CPU_PMC,      CPU_C2_AT               },
+       { 0x00000360, 0x00000371, CPU_PMC,      CPU_INTEL_XEON          },
+       { 0x0000038D, 0x00000390, CPU_PMC,      CPU_C2_AT               },
+       { 0x000003A0, 0x000003BE, CPU_PMC,      CPU_INTEL_XEON          },
+       { 0x000003C0, 0x000003CD, CPU_PMC,      CPU_INTEL_XEON          },
+       { 0x000003E0, 0x000003E1, CPU_PMC,      CPU_INTEL_XEON          },
+       { 0x000003F0, 0x000003F0, CPU_PMC,      CPU_INTEL_XEON          },
+       { 0x000003F1, 0x000003F1, CPU_PMC,      CPU_C2_AT_XE            },
+       { 0x000003F2, 0x000003F2, CPU_PMC,      CPU_INTEL_XEON          },
+
+       { 0x00000400, 0x00000402, CPU_MC,       CPU_PM_CX_AT_XE         },
+       { 0x00000403, 0x00000403, CPU_MC,       CPU_INTEL_XEON          },
+       { 0x00000404, 0x00000406, CPU_MC,       CPU_PM_CX_AT_XE         },
+       { 0x00000407, 0x00000407, CPU_MC,       CPU_INTEL_XEON          },
+       { 0x00000408, 0x0000040A, CPU_MC,       CPU_PM_CX_AT_XE         },
+       { 0x0000040B, 0x0000040B, CPU_MC,       CPU_INTEL_XEON          },
+       { 0x0000040C, 0x0000040E, CPU_MC,       CPU_PM_CX_XE            },
+       { 0x0000040F, 0x0000040F, CPU_MC,       CPU_INTEL_XEON          },
+       { 0x00000410, 0x00000412, CPU_MC,       CPU_PM_CX_AT_XE         },
+       { 0x00000413, 0x00000417, CPU_MC,       CPU_CX_AT_XE            },
+       { 0x00000480, 0x0000048B, CPU_VMX,      CPU_CX_AT_XE            },
+
+       { 0x00000600, 0x00000600, CPU_DEBUG,    CPU_PM_CX_AT_XE         },
+       { 0x00000680, 0x0000068F, CPU_LBRANCH,  CPU_INTEL_XEON          },
+       { 0x000006C0, 0x000006CF, CPU_LBRANCH,  CPU_INTEL_XEON          },
+
+       { 0x000107CC, 0x000107D3, CPU_PMC,      CPU_INTEL_XEON_MP       },
+
+       { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON          },
+       { 0xC0000081, 0xC0000082, CPU_CALL,     CPU_INTEL_XEON          },
+       { 0xC0000084, 0xC0000084, CPU_CALL,     CPU_INTEL_XEON          },
+       { 0xC0000100, 0xC0000102, CPU_BASE,     CPU_INTEL_XEON          },
+};
+
+/* AMD Registers Range */
+static struct cpu_debug_range cpu_amd_range[] = {
+       { 0x00000010, 0x00000010, CPU_TIME,     CPU_ALL,                },
+       { 0x0000001B, 0x0000001B, CPU_APIC,     CPU_ALL,                },
+       { 0x000000FE, 0x000000FE, CPU_MTRR,     CPU_ALL,                },
+
+       { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_ALL,                },
+       { 0x00000179, 0x0000017A, CPU_MC,       CPU_ALL,                },
+       { 0x0000017B, 0x0000017B, CPU_MC,       CPU_ALL,                },
+       { 0x000001D9, 0x000001D9, CPU_DEBUG,    CPU_ALL,                },
+       { 0x000001DB, 0x000001DE, CPU_LBRANCH,  CPU_ALL,                },
+
+       { 0x00000200, 0x0000020F, CPU_MTRR,     CPU_ALL,                },
+       { 0x00000250, 0x00000250, CPU_MTRR,     CPU_ALL,                },
+       { 0x00000258, 0x00000259, CPU_MTRR,     CPU_ALL,                },
+       { 0x00000268, 0x0000026F, CPU_MTRR,     CPU_ALL,                },
+       { 0x00000277, 0x00000277, CPU_PAT,      CPU_ALL,                },
+       { 0x000002FF, 0x000002FF, CPU_MTRR,     CPU_ALL,                },
+
+       { 0x00000400, 0x00000417, CPU_MC,       CPU_ALL,                },
+
+       { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_ALL,                },
+       { 0xC0000081, 0xC0000084, CPU_CALL,     CPU_ALL,                },
+       { 0xC0000100, 0xC0000102, CPU_BASE,     CPU_ALL,                },
+       { 0xC0000103, 0xC0000103, CPU_TIME,     CPU_ALL,                },
+
+       { 0xC0000408, 0xC000040A, CPU_MC,       CPU_ALL,                },
+
+       { 0xc0010000, 0xc0010007, CPU_PMC,      CPU_ALL,                },
+       { 0xc0010010, 0xc0010010, CPU_MTRR,     CPU_ALL,                },
+       { 0xc0010016, 0xc001001A, CPU_MTRR,     CPU_ALL,                },
+       { 0xc001001D, 0xc001001D, CPU_MTRR,     CPU_ALL,                },
+       { 0xc0010030, 0xc0010035, CPU_BIOS,     CPU_ALL,                },
+       { 0xc0010056, 0xc0010056, CPU_SMM,      CPU_ALL,                },
+       { 0xc0010061, 0xc0010063, CPU_SMM,      CPU_ALL,                },
+       { 0xc0010074, 0xc0010074, CPU_MC,       CPU_ALL,                },
+       { 0xc0010111, 0xc0010113, CPU_SMM,      CPU_ALL,                },
+       { 0xc0010114, 0xc0010118, CPU_SVM,      CPU_ALL,                },
+       { 0xc0010119, 0xc001011A, CPU_SMM,      CPU_ALL,                },
+       { 0xc0010140, 0xc0010141, CPU_OSVM,     CPU_ALL,                },
+       { 0xc0010156, 0xc0010156, CPU_SMM,      CPU_ALL,                },
+};
+
+
+static int get_cpu_modelflag(unsigned cpu)
+{
+       int flag;
+
+       switch (per_cpu(cpu_model, cpu)) {
+       /* Intel */
+       case 0x0501:
+       case 0x0502:
+       case 0x0504:
+               flag = CPU_INTEL_PENTIUM;
+               break;
+       case 0x0601:
+       case 0x0603:
+       case 0x0605:
+       case 0x0607:
+       case 0x0608:
+       case 0x060A:
+       case 0x060B:
+               flag = CPU_INTEL_P6;
+               break;
+       case 0x0609:
+       case 0x060D:
+               flag = CPU_INTEL_PENTIUM_M;
+               break;
+       case 0x060E:
+               flag = CPU_INTEL_CORE;
+               break;
+       case 0x060F:
+       case 0x0617:
+               flag = CPU_INTEL_CORE2;
+               break;
+       case 0x061C:
+               flag = CPU_INTEL_ATOM;
+               break;
+       case 0x0F00:
+       case 0x0F01:
+       case 0x0F02:
+       case 0x0F03:
+       case 0x0F04:
+               flag = CPU_INTEL_XEON_P4;
+               break;
+       case 0x0F06:
+               flag = CPU_INTEL_XEON_MP;
+               break;
+       default:
+               flag = CPU_NONE;
+               break;
+       }
+
+       return flag;
+}
+
+static int get_cpu_range_count(unsigned cpu)
+{
+       int index;
+
+       switch (per_cpu(cpu_model, cpu) >> 16) {
+       case X86_VENDOR_INTEL:
+               index = ARRAY_SIZE(cpu_intel_range);
+               break;
+       case X86_VENDOR_AMD:
+               index = ARRAY_SIZE(cpu_amd_range);
+               break;
+       default:
+               index = 0;
+               break;
+       }
+
+       return index;
+}
+
+static int is_typeflag_valid(unsigned cpu, unsigned flag)
+{
+       unsigned vendor, modelflag;
+       int i, index;
+
+       /* Standard Registers should be always valid */
+       if (flag >= CPU_TSS)
+               return 1;
+
+       modelflag = per_cpu(cpu_modelflag, cpu);
+       vendor = per_cpu(cpu_model, cpu) >> 16;
+       index = get_cpu_range_count(cpu);
+
+       for (i = 0; i < index; i++) {
+               switch (vendor) {
+               case X86_VENDOR_INTEL:
+                       if ((cpu_intel_range[i].model & modelflag) &&
+                           (cpu_intel_range[i].flag & flag))
+                               return 1;
+                       break;
+               case X86_VENDOR_AMD:
+                       if (cpu_amd_range[i].flag & flag)
+                               return 1;
+                       break;
+               }
+       }
+
+       /* Invalid */
+       return 0;
+}
+
+static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
+                             int index, unsigned flag)
+{
+       unsigned modelflag;
+
+       modelflag = per_cpu(cpu_modelflag, cpu);
+       *max = 0;
+       switch (per_cpu(cpu_model, cpu) >> 16) {
+       case X86_VENDOR_INTEL:
+               if ((cpu_intel_range[index].model & modelflag) &&
+                   (cpu_intel_range[index].flag & flag)) {
+                       *min = cpu_intel_range[index].min;
+                       *max = cpu_intel_range[index].max;
+               }
+               break;
+       case X86_VENDOR_AMD:
+               if (cpu_amd_range[index].flag & flag) {
+                       *min = cpu_amd_range[index].min;
+                       *max = cpu_amd_range[index].max;
+               }
+               break;
+       }
+
+       return *max;
+}
+
+/* This function can also be called with seq = NULL for printk */
+static void print_cpu_data(struct seq_file *seq, unsigned type,
+                          u32 low, u32 high)
+{
+       struct cpu_private *priv;
+       u64 val = high;
+
+       if (seq) {
+               priv = seq->private;
+               if (priv->file) {
+                       val = (val << 32) | low;
+                       seq_printf(seq, "0x%llx\n", val);
+               } else
+                       seq_printf(seq, " %08x: %08x_%08x\n",
+                                  type, high, low);
+       } else
+               printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
+}
+
+/* This function can also be called with seq = NULL for printk */
+static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
+{
+       unsigned msr, msr_min, msr_max;
+       struct cpu_private *priv;
+       u32 low, high;
+       int i, range;
+
+       if (seq) {
+               priv = seq->private;
+               if (priv->file) {
+                       if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
+                                              &low, &high))
+                               print_cpu_data(seq, priv->reg, low, high);
+                       return;
+               }
+       }
+
+       range = get_cpu_range_count(cpu);
+
+       for (i = 0; i < range; i++) {
+               if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
+                       continue;
+
+               for (msr = msr_min; msr <= msr_max; msr++) {
+                       if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
+                               continue;
+                       print_cpu_data(seq, msr, low, high);
+               }
+       }
+}
+
+static void print_tss(void *arg)
+{
+       struct pt_regs *regs = task_pt_regs(current);
+       struct seq_file *seq = arg;
+       unsigned int seg;
+
+       seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
+       seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
+       seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
+       seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
+
+       seq_printf(seq, " RSI\t: %016lx\n", regs->si);
+       seq_printf(seq, " RDI\t: %016lx\n", regs->di);
+       seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
+       seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
+
+#ifdef CONFIG_X86_64
+       seq_printf(seq, " R08\t: %016lx\n", regs->r8);
+       seq_printf(seq, " R09\t: %016lx\n", regs->r9);
+       seq_printf(seq, " R10\t: %016lx\n", regs->r10);
+       seq_printf(seq, " R11\t: %016lx\n", regs->r11);
+       seq_printf(seq, " R12\t: %016lx\n", regs->r12);
+       seq_printf(seq, " R13\t: %016lx\n", regs->r13);
+       seq_printf(seq, " R14\t: %016lx\n", regs->r14);
+       seq_printf(seq, " R15\t: %016lx\n", regs->r15);
+#endif
+
+       asm("movl %%cs,%0" : "=r" (seg));
+       seq_printf(seq, " CS\t:             %04x\n", seg);
+       asm("movl %%ds,%0" : "=r" (seg));
+       seq_printf(seq, " DS\t:             %04x\n", seg);
+       seq_printf(seq, " SS\t:             %04lx\n", regs->ss & 0xffff);
+       asm("movl %%es,%0" : "=r" (seg));
+       seq_printf(seq, " ES\t:             %04x\n", seg);
+       asm("movl %%fs,%0" : "=r" (seg));
+       seq_printf(seq, " FS\t:             %04x\n", seg);
+       asm("movl %%gs,%0" : "=r" (seg));
+       seq_printf(seq, " GS\t:             %04x\n", seg);
+
+       seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
+
+       seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
+}
+
+static void print_cr(void *arg)
+{
+       struct seq_file *seq = arg;
+
+       seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
+       seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
+       seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
+       seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
+#ifdef CONFIG_X86_64
+       seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
+#endif
+}
+
+static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
+{
+       seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
+}
+
+static void print_dt(void *seq)
+{
+       struct desc_ptr dt;
+       unsigned long ldt;
+
+       /* IDT */
+       store_idt((struct desc_ptr *)&dt);
+       print_desc_ptr("IDT", seq, dt);
+
+       /* GDT */
+       store_gdt((struct desc_ptr *)&dt);
+       print_desc_ptr("GDT", seq, dt);
+
+       /* LDT */
+       store_ldt(ldt);
+       seq_printf(seq, " LDT\t: %016lx\n", ldt);
+
+       /* TR */
+       store_tr(ldt);
+       seq_printf(seq, " TR\t: %016lx\n", ldt);
+}
+
+static void print_dr(void *arg)
+{
+       struct seq_file *seq = arg;
+       unsigned long dr;
+       int i;
+
+       for (i = 0; i < 8; i++) {
+               /* Ignore db4, db5 */
+               if ((i == 4) || (i == 5))
+                       continue;
+               get_debugreg(dr, i);
+               seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
+       }
+
+       seq_printf(seq, "\n MSR\t:\n");
+}
+
+static void print_apic(void *arg)
+{
+       struct seq_file *seq = arg;
+
+#ifdef CONFIG_X86_LOCAL_APIC
+       seq_printf(seq, " LAPIC\t:\n");
+       seq_printf(seq, " ID\t\t: %08x\n",  apic_read(APIC_ID) >> 24);
+       seq_printf(seq, " LVR\t\t: %08x\n",  apic_read(APIC_LVR));
+       seq_printf(seq, " TASKPRI\t: %08x\n",  apic_read(APIC_TASKPRI));
+       seq_printf(seq, " ARBPRI\t\t: %08x\n",  apic_read(APIC_ARBPRI));
+       seq_printf(seq, " PROCPRI\t: %08x\n",  apic_read(APIC_PROCPRI));
+       seq_printf(seq, " LDR\t\t: %08x\n",  apic_read(APIC_LDR));
+       seq_printf(seq, " DFR\t\t: %08x\n",  apic_read(APIC_DFR));
+       seq_printf(seq, " SPIV\t\t: %08x\n",  apic_read(APIC_SPIV));
+       seq_printf(seq, " ISR\t\t: %08x\n",  apic_read(APIC_ISR));
+       seq_printf(seq, " ESR\t\t: %08x\n",  apic_read(APIC_ESR));
+       seq_printf(seq, " ICR\t\t: %08x\n",  apic_read(APIC_ICR));
+       seq_printf(seq, " ICR2\t\t: %08x\n",  apic_read(APIC_ICR2));
+       seq_printf(seq, " LVTT\t\t: %08x\n",  apic_read(APIC_LVTT));
+       seq_printf(seq, " LVTTHMR\t: %08x\n",  apic_read(APIC_LVTTHMR));
+       seq_printf(seq, " LVTPC\t\t: %08x\n",  apic_read(APIC_LVTPC));
+       seq_printf(seq, " LVT0\t\t: %08x\n",  apic_read(APIC_LVT0));
+       seq_printf(seq, " LVT1\t\t: %08x\n",  apic_read(APIC_LVT1));
+       seq_printf(seq, " LVTERR\t\t: %08x\n",  apic_read(APIC_LVTERR));
+       seq_printf(seq, " TMICT\t\t: %08x\n",  apic_read(APIC_TMICT));
+       seq_printf(seq, " TMCCT\t\t: %08x\n",  apic_read(APIC_TMCCT));
+       seq_printf(seq, " TDCR\t\t: %08x\n",  apic_read(APIC_TDCR));
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+       seq_printf(seq, "\n MSR\t:\n");
+}
+
+static int cpu_seq_show(struct seq_file *seq, void *v)
+{
+       struct cpu_private *priv = seq->private;
+
+       if (priv == NULL)
+               return -EINVAL;
+
+       switch (cpu_base[priv->type].flag) {
+       case CPU_TSS:
+               smp_call_function_single(priv->cpu, print_tss, seq, 1);
+               break;
+       case CPU_CR:
+               smp_call_function_single(priv->cpu, print_cr, seq, 1);
+               break;
+       case CPU_DT:
+               smp_call_function_single(priv->cpu, print_dt, seq, 1);
+               break;
+       case CPU_DEBUG:
+               if (priv->file == CPU_INDEX_BIT)
+                       smp_call_function_single(priv->cpu, print_dr, seq, 1);
+               print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
+               break;
+       case CPU_APIC:
+               if (priv->file == CPU_INDEX_BIT)
+                       smp_call_function_single(priv->cpu, print_apic, seq, 1);
+               print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
+               break;
+
+       default:
+               print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
+               break;
+       }
+       seq_printf(seq, "\n");
+
+       return 0;
+}
+
+static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       if (*pos == 0) /* One time is enough ;-) */
+               return seq;
+
+       return NULL;
+}
+
+static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       (*pos)++;
+
+       return cpu_seq_start(seq, pos);
+}
+
+static void cpu_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations cpu_seq_ops = {
+       .start          = cpu_seq_start,
+       .next           = cpu_seq_next,
+       .stop           = cpu_seq_stop,
+       .show           = cpu_seq_show,
+};
+
+static int cpu_seq_open(struct inode *inode, struct file *file)
+{
+       struct cpu_private *priv = inode->i_private;
+       struct seq_file *seq;
+       int err;
+
+       err = seq_open(file, &cpu_seq_ops);
+       if (!err) {
+               seq = file->private_data;
+               seq->private = priv;
+       }
+
+       return err;
+}
+
+static int write_msr(struct cpu_private *priv, u64 val)
+{
+       u32 low, high;
+
+       high = (val >> 32) & 0xffffffff;
+       low = val & 0xffffffff;
+
+       if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
+               return 0;
+
+       return -EPERM;
+}
+
+static int write_cpu_register(struct cpu_private *priv, const char *buf)
+{
+       int ret = -EPERM;
+       u64 val;
+
+       ret = strict_strtoull(buf, 0, &val);
+       if (ret < 0)
+               return ret;
+
+       /* Supporting only MSRs */
+       if (priv->type < CPU_TSS_BIT)
+               return write_msr(priv, val);
+
+       return ret;
+}
+
+static ssize_t cpu_write(struct file *file, const char __user *ubuf,
+                            size_t count, loff_t *off)
+{
+       struct seq_file *seq = file->private_data;
+       struct cpu_private *priv = seq->private;
+       char buf[19];
+
+       if ((priv == NULL) || (count >= sizeof(buf)))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, count))
+               return -EFAULT;
+
+       buf[count] = 0;
+
+       if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
+               if (!write_cpu_register(priv, buf))
+                       return count;
+
+       return -EACCES;
+}
+
+static const struct file_operations cpu_fops = {
+       .owner          = THIS_MODULE,
+       .open           = cpu_seq_open,
+       .read           = seq_read,
+       .write          = cpu_write,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
+static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
+                          unsigned file, struct dentry *dentry)
+{
+       struct cpu_private *priv = NULL;
+
+       /* Already intialized */
+       if (file == CPU_INDEX_BIT)
+               if (per_cpu(cpu_arr[type].init, cpu))
+                       return 0;
+
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (priv == NULL)
+               return -ENOMEM;
+
+       priv->cpu = cpu;
+       priv->type = type;
+       priv->reg = reg;
+       priv->file = file;
+       mutex_lock(&cpu_debug_lock);
+       per_cpu(priv_arr[type], cpu) = priv;
+       per_cpu(cpu_priv_count, cpu)++;
+       mutex_unlock(&cpu_debug_lock);
+
+       if (file)
+               debugfs_create_file(cpu_file[file].name, S_IRUGO,
+                                   dentry, (void *)priv, &cpu_fops);
+       else {
+               debugfs_create_file(cpu_base[type].name, S_IRUGO,
+                                   per_cpu(cpu_arr[type].dentry, cpu),
+                                   (void *)priv, &cpu_fops);
+               mutex_lock(&cpu_debug_lock);
+               per_cpu(cpu_arr[type].init, cpu) = 1;
+               mutex_unlock(&cpu_debug_lock);
+       }
+
+       return 0;
+}
+
+static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
+                            struct dentry *dentry)
+{
+       unsigned file;
+       int err = 0;
+
+       for (file = 0; file <  ARRAY_SIZE(cpu_file); file++) {
+               err = cpu_create_file(cpu, type, reg, file, dentry);
+               if (err)
+                       return err;
+       }
+
+       return err;
+}
+
+static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
+{
+       struct dentry *cpu_dentry = NULL;
+       unsigned reg, reg_min, reg_max;
+       int i, range, err = 0;
+       char reg_dir[12];
+       u32 low, high;
+
+       range = get_cpu_range_count(cpu);
+
+       for (i = 0; i < range; i++) {
+               if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
+                                  cpu_base[type].flag))
+                       continue;
+
+               for (reg = reg_min; reg <= reg_max; reg++) {
+                       if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
+                               continue;
+
+                       sprintf(reg_dir, "0x%x", reg);
+                       cpu_dentry = debugfs_create_dir(reg_dir, dentry);
+                       err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
+                       if (err)
+                               return err;
+               }
+       }
+
+       return err;
+}
+
+static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
+{
+       struct dentry *cpu_dentry = NULL;
+       unsigned type;
+       int err = 0;
+
+       for (type = 0; type <  ARRAY_SIZE(cpu_base) - 1; type++) {
+               if (!is_typeflag_valid(cpu, cpu_base[type].flag))
+                       continue;
+               cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
+               per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
+
+               if (type < CPU_TSS_BIT)
+                       err = cpu_init_msr(cpu, type, cpu_dentry);
+               else
+                       err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
+                                             cpu_dentry);
+               if (err)
+                       return err;
+       }
+
+       return err;
+}
+
+static int cpu_init_cpu(void)
+{
+       struct dentry *cpu_dentry = NULL;
+       struct cpuinfo_x86 *cpui;
+       char cpu_dir[12];
+       unsigned cpu;
+       int err = 0;
+
+       for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+               cpui = &cpu_data(cpu);
+               if (!cpu_has(cpui, X86_FEATURE_MSR))
+                       continue;
+               per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
+                                          (cpui->x86 << 8) |
+                                          (cpui->x86_model));
+               per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
+
+               sprintf(cpu_dir, "cpu%d", cpu);
+               cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
+               err = cpu_init_allreg(cpu, cpu_dentry);
+
+               pr_info("cpu%d(%d) debug files %d\n",
+                       cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
+               if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
+                       pr_err("Register files count %d exceeds limit %d\n",
+                               per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
+                       per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
+                       err = -ENFILE;
+               }
+               if (err)
+                       return err;
+       }
+
+       return err;
+}
+
+static int __init cpu_debug_init(void)
+{
+       cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
+
+       return cpu_init_cpu();
+}
+
+static void __exit cpu_debug_exit(void)
+{
+       int i, cpu;
+
+       if (cpu_debugfs_dir)
+               debugfs_remove_recursive(cpu_debugfs_dir);
+
+       for (cpu = 0; cpu <  nr_cpu_ids; cpu++)
+               for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
+                       kfree(per_cpu(priv_arr[i], cpu));
+}
+
+module_init(cpu_debug_init);
+module_exit(cpu_debug_exit);
+
+MODULE_AUTHOR("Jaswinder Singh Rajput");
+MODULE_DESCRIPTION("CPU Debug module");
+MODULE_LICENSE("GPL");
index 4b1c319d30c368592e990663fb1c8efd5abea963..22590cf688aedd45f9165c5ee6d9ea571f20a61c 100644 (file)
@@ -601,7 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        if (!data)
                return -ENOMEM;
 
-       data->acpi_data = percpu_ptr(acpi_perf_data, cpu);
+       data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
        per_cpu(drv_data, cpu) = data;
 
        if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
index c2f930d8664091932c1a7b614d860f8f365b7f10..41ab3f064cb14fc26bca09b8fc3f55fd98824a87 100644 (file)
@@ -204,12 +204,12 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
        }
        /* Enable Enhanced PowerSaver */
        rdmsrl(MSR_IA32_MISC_ENABLE, val);
-       if (!(val & 1 << 16)) {
-               val |= 1 << 16;
+       if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+               val |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
                wrmsrl(MSR_IA32_MISC_ENABLE, val);
                /* Can be locked at 0 */
                rdmsrl(MSR_IA32_MISC_ENABLE, val);
-               if (!(val & 1 << 16)) {
+               if (!(val & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
                        printk(KERN_INFO "eps: Can't enable Enhanced PowerSaver\n");
                        return -ENODEV;
                }
index f08998278a3a7eb359d9cc979a04464d21a5aeb9..c9f1fdc02830f1e6fd1748648bda58c5dd2cb51d 100644 (file)
@@ -390,14 +390,14 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
           enable it if not. */
        rdmsr(MSR_IA32_MISC_ENABLE, l, h);
 
-       if (!(l & (1<<16))) {
-               l |= (1<<16);
+       if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
+               l |= MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP;
                dprintk("trying to enable Enhanced SpeedStep (%x)\n", l);
                wrmsr(MSR_IA32_MISC_ENABLE, l, h);
 
                /* check to see if it stuck */
                rdmsr(MSR_IA32_MISC_ENABLE, l, h);
-               if (!(l & (1<<16))) {
+               if (!(l & MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP)) {
                        printk(KERN_INFO PFX
                                "couldn't enable Enhanced SpeedStep\n");
                        return -ENODEV;
index ffd0f5ed071a705e5f55e279ac12f0c2d0cf9419..593171e967ef2009a8f109e8a1f9f81f63c469c5 100644 (file)
@@ -61,23 +61,23 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
  */
 static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
 
-static char Cx86_model[][9] __cpuinitdata = {
+static const char __cpuinitconst Cx86_model[][9] = {
        "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
        "M II ", "Unknown"
 };
-static char Cx486_name[][5] __cpuinitdata = {
+static const char __cpuinitconst Cx486_name[][5] = {
        "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
        "SRx2", "DRx2"
 };
-static char Cx486S_name[][4] __cpuinitdata = {
+static const char __cpuinitconst Cx486S_name[][4] = {
        "S", "S2", "Se", "S2e"
 };
-static char Cx486D_name[][4] __cpuinitdata = {
+static const char __cpuinitconst Cx486D_name[][4] = {
        "DX", "DX2", "?", "?", "?", "DX4"
 };
 static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
-static char cyrix_model_mult1[] __cpuinitdata = "12??43";
-static char cyrix_model_mult2[] __cpuinitdata = "12233445";
+static const char __cpuinitconst cyrix_model_mult1[] = "12??43";
+static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
 
 /*
  * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -435,7 +435,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
        }
 }
 
-static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
+static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
        .c_vendor       = "Cyrix",
        .c_ident        = { "CyrixInstead" },
        .c_early_init   = early_init_cyrix,
@@ -446,7 +446,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
 
 cpu_dev_register(cyrix_cpu_dev);
 
-static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
+static const struct cpu_dev __cpuinitconst nsc_cpu_dev = {
        .c_vendor       = "NSC",
        .c_ident        = { "Geode by NSC" },
        .c_init         = init_nsc,
index 24ff26a38adecff5ef27d4e76fb74caebd1fbb88..b09d4eb52bb95f89d8ba0f65968006fa5564ee67 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/uaccess.h>
 #include <asm/ds.h>
 #include <asm/bugs.h>
+#include <asm/cpu.h>
 
 #ifdef CONFIG_X86_64
 #include <asm/topology.h>
@@ -24,7 +25,6 @@
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/mpspec.h>
 #include <asm/apic.h>
-#include <mach_apic.h>
 #endif
 
 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
@@ -54,6 +54,11 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
                c->x86_cache_alignment = 128;
 #endif
 
+       /* CPUID workaround for 0F33/0F34 CPU */
+       if (c->x86 == 0xF && c->x86_model == 0x3
+           && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
+               c->x86_phys_bits = 36;
+
        /*
         * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
         * with P/T states and does not stop in deep C-states
@@ -63,6 +68,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
        }
 
+       /*
+        * There is a known erratum on Pentium III and Core Solo
+        * and Core Duo CPUs.
+        * " Page with PAT set to WC while associated MTRR is UC
+        *   may consolidate to UC "
+        * Because of this erratum, it is better to stick with
+        * setting WC in MTRR rather than using PAT on these CPUs.
+        *
+        * Enable PAT WC only on P4, Core 2 or later CPUs.
+        */
+       if (c->x86 == 6 && c->x86_model < 15)
+               clear_cpu_cap(c, X86_FEATURE_PAT);
 }
 
 #ifdef CONFIG_X86_32
@@ -99,6 +116,28 @@ static void __cpuinit trap_init_f00f_bug(void)
 }
 #endif
 
+static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
+{
+#ifdef CONFIG_SMP
+       /* calling is from identify_secondary_cpu() ? */
+       if (c->cpu_index == boot_cpu_id)
+               return;
+
+       /*
+        * Mask B, Pentium, but not Pentium MMX
+        */
+       if (c->x86 == 5 &&
+           c->x86_mask >= 1 && c->x86_mask <= 4 &&
+           c->x86_model <= 3) {
+               /*
+                * Remember we have B step Pentia with bugs
+                */
+               WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
+                                   "with B stepping processors.\n");
+       }
+#endif
+}
+
 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
 {
        unsigned long lo, hi;
@@ -135,10 +174,10 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
         */
        if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
                rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
-               if ((lo & (1<<9)) == 0) {
+               if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
                        printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
                        printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
-                       lo |= (1<<9);   /* Disable hw prefetching */
+                       lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
                        wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
                }
        }
@@ -175,6 +214,8 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_NUMAQ
        numaq_tsc_disable();
 #endif
+
+       intel_smp_check(c);
 }
 #else
 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
@@ -374,7 +415,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
 }
 #endif
 
-static struct cpu_dev intel_cpu_dev __cpuinitdata = {
+static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
        .c_vendor       = "Intel",
        .c_ident        = { "GenuineIntel" },
 #ifdef CONFIG_X86_32
index da299eb85fc09b8086e0dc9e883fcdd1a59db12a..c471eb1a389cc02c4f788d5a828f3f4bfe56c93d 100644 (file)
@@ -32,7 +32,7 @@ struct _cache_table
 };
 
 /* all the cache descriptor types we care about (no TLB or trace cache entries) */
-static struct _cache_table cache_table[] __cpuinitdata =
+static const struct _cache_table __cpuinitconst cache_table[] =
 {
        { 0x06, LVL_1_INST, 8 },        /* 4-way set assoc, 32 byte line size */
        { 0x08, LVL_1_INST, 16 },       /* 4-way set assoc, 32 byte line size */
@@ -147,7 +147,16 @@ struct _cpuid4_info {
        union _cpuid4_leaf_ecx ecx;
        unsigned long size;
        unsigned long can_disable;
-       cpumask_t shared_cpu_map;       /* future?: only cpus/node is needed */
+       DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
+};
+
+/* subset of above _cpuid4_info w/o shared_cpu_map */
+struct _cpuid4_info_regs {
+       union _cpuid4_leaf_eax eax;
+       union _cpuid4_leaf_ebx ebx;
+       union _cpuid4_leaf_ecx ecx;
+       unsigned long size;
+       unsigned long can_disable;
 };
 
 #ifdef CONFIG_PCI
@@ -197,15 +206,15 @@ union l3_cache {
        unsigned val;
 };
 
-static unsigned short assocs[] __cpuinitdata = {
+static const unsigned short __cpuinitconst assocs[] = {
        [1] = 1, [2] = 2, [4] = 4, [6] = 8,
        [8] = 16, [0xa] = 32, [0xb] = 48,
        [0xc] = 64,
        [0xf] = 0xffff // ??
 };
 
-static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 };
-static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 };
+static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
+static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
 
 static void __cpuinit
 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
@@ -278,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
 }
 
 static void __cpuinit
-amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
+amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
 {
        if (index < 3)
                return;
@@ -286,7 +295,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf)
 }
 
 static int
-__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+__cpuinit cpuid4_cache_lookup_regs(int index,
+                                  struct _cpuid4_info_regs *this_leaf)
 {
        union _cpuid4_leaf_eax  eax;
        union _cpuid4_leaf_ebx  ebx;
@@ -314,6 +324,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
        return 0;
 }
 
+static int
+__cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
+{
+       struct _cpuid4_info_regs *leaf_regs =
+               (struct _cpuid4_info_regs *)this_leaf;
+
+       return cpuid4_cache_lookup_regs(index, leaf_regs);
+}
+
 static int __cpuinit find_num_cache_leaves(void)
 {
        unsigned int            eax, ebx, ecx, edx;
@@ -353,11 +372,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
                 * parameters cpuid leaf to find the cache details
                 */
                for (i = 0; i < num_cache_leaves; i++) {
-                       struct _cpuid4_info this_leaf;
-
+                       struct _cpuid4_info_regs this_leaf;
                        int retval;
 
-                       retval = cpuid4_cache_lookup(i, &this_leaf);
+                       retval = cpuid4_cache_lookup_regs(i, &this_leaf);
                        if (retval >= 0) {
                                switch(this_leaf.eax.split.level) {
                                    case 1:
@@ -506,17 +524,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
        num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
 
        if (num_threads_sharing == 1)
-               cpu_set(cpu, this_leaf->shared_cpu_map);
+               cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
        else {
                index_msb = get_count_order(num_threads_sharing);
 
                for_each_online_cpu(i) {
                        if (cpu_data(i).apicid >> index_msb ==
                            c->apicid >> index_msb) {
-                               cpu_set(i, this_leaf->shared_cpu_map);
+                               cpumask_set_cpu(i,
+                                       to_cpumask(this_leaf->shared_cpu_map));
                                if (i != cpu && per_cpu(cpuid4_info, i))  {
-                                       sibling_leaf = CPUID4_INFO_IDX(i, index);
-                                       cpu_set(cpu, sibling_leaf->shared_cpu_map);
+                                       sibling_leaf =
+                                               CPUID4_INFO_IDX(i, index);
+                                       cpumask_set_cpu(cpu, to_cpumask(
+                                               sibling_leaf->shared_cpu_map));
                                }
                        }
                }
@@ -528,9 +549,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
        int sibling;
 
        this_leaf = CPUID4_INFO_IDX(cpu, index);
-       for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) {
+       for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
                sibling_leaf = CPUID4_INFO_IDX(sibling, index);
-               cpu_clear(cpu, sibling_leaf->shared_cpu_map);
+               cpumask_clear_cpu(cpu,
+                                 to_cpumask(sibling_leaf->shared_cpu_map));
        }
 }
 #else
@@ -635,8 +657,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
        int n = 0;
 
        if (len > 1) {
-               cpumask_t *mask = &this_leaf->shared_cpu_map;
+               const struct cpumask *mask;
 
+               mask = to_cpumask(this_leaf->shared_cpu_map);
                n = type?
                        cpulist_scnprintf(buf, len-2, mask) :
                        cpumask_scnprintf(buf, len-2, mask);
@@ -699,7 +722,8 @@ static struct pci_dev *get_k8_northbridge(int node)
 
 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
 {
-       int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+       const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
+       int node = cpu_to_node(cpumask_first(mask));
        struct pci_dev *dev = NULL;
        ssize_t ret = 0;
        int i;
@@ -733,7 +757,8 @@ static ssize_t
 store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
                    size_t count)
 {
-       int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map));
+       const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
+       int node = cpu_to_node(cpumask_first(mask));
        struct pci_dev *dev = NULL;
        unsigned int ret, index, val;
 
@@ -878,7 +903,7 @@ err_out:
        return -ENOMEM;
 }
 
-static cpumask_t cache_dev_map = CPU_MASK_NONE;
+static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
 
 /* Add/Remove cache interface for CPU device */
 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
@@ -918,7 +943,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
                }
                kobject_uevent(&(this_object->kobj), KOBJ_ADD);
        }
-       cpu_set(cpu, cache_dev_map);
+       cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
 
        kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
        return 0;
@@ -931,9 +956,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
 
        if (per_cpu(cpuid4_info, cpu) == NULL)
                return;
-       if (!cpu_isset(cpu, cache_dev_map))
+       if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
                return;
-       cpu_clear(cpu, cache_dev_map);
+       cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
 
        for (i = 0; i < num_cache_leaves; i++)
                kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
index d7d2323bbb6976ffa603246bd06845d280150371..b2f89829bbe824d2cecfd74d8c09b2923406493b 100644 (file)
@@ -4,3 +4,4 @@ obj-$(CONFIG_X86_32)            += k7.o p4.o p5.o p6.o winchip.o
 obj-$(CONFIG_X86_MCE_INTEL)    += mce_intel_64.o
 obj-$(CONFIG_X86_MCE_AMD)      += mce_amd_64.o
 obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o
+obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o
index dfaebce3633e3f9b8415e79f29ad9f012276825a..3552119b091da51e65ce933c9852eee694920b9b 100644 (file)
@@ -60,20 +60,6 @@ void mcheck_init(struct cpuinfo_x86 *c)
        }
 }
 
-static unsigned long old_cr4 __initdata;
-
-void __init stop_mce(void)
-{
-       old_cr4 = read_cr4();
-       clear_in_cr4(X86_CR4_MCE);
-}
-
-void __init restart_mce(void)
-{
-       if (old_cr4 & X86_CR4_MCE)
-               set_in_cr4(X86_CR4_MCE);
-}
-
 static int __init mcheck_disable(char *str)
 {
        mce_disabled = 1;
index fe79985ce0f2f6fb4ae23f3bc92a791994ed9f62..ca14604611ec7ccc6217afc008e50d88af6ad88f 100644 (file)
@@ -3,6 +3,8 @@
  * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs.
  * Rest from unknown author(s).
  * 2004 Andi Kleen. Rewrote most of it.
+ * Copyright 2008 Intel Corporation
+ * Author: Andi Kleen
  */
 
 #include <linux/init.h>
@@ -24,6 +26,9 @@
 #include <linux/ctype.h>
 #include <linux/kmod.h>
 #include <linux/kdebug.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/ratelimit.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
 #include <asm/mce.h>
@@ -32,7 +37,6 @@
 #include <asm/idle.h>
 
 #define MISC_MCELOG_MINOR 227
-#define NR_SYSFS_BANKS 6
 
 atomic_t mce_entry;
 
@@ -47,7 +51,7 @@ static int mce_dont_init;
  */
 static int tolerant = 1;
 static int banks;
-static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL };
+static u64 *bank;
 static unsigned long notify_user;
 static int rip_msr;
 static int mce_bootlog = -1;
@@ -58,6 +62,19 @@ static char *trigger_argv[2] = { trigger, NULL };
 
 static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
 
+/* MCA banks polled by the period polling timer for corrected events */
+DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
+       [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
+};
+
+/* Do initial initialization of a struct mce */
+void mce_setup(struct mce *m)
+{
+       memset(m, 0, sizeof(struct mce));
+       m->cpu = smp_processor_id();
+       rdtscll(m->tsc);
+}
+
 /*
  * Lockless MCE logging infrastructure.
  * This avoids deadlocks on printk locks without having to break locks. Also
@@ -119,11 +136,11 @@ static void print_mce(struct mce *m)
                        print_symbol("{%s}", m->ip);
                printk("\n");
        }
-       printk(KERN_EMERG "TSC %Lx ", m->tsc);
+       printk(KERN_EMERG "TSC %llx ", m->tsc);
        if (m->addr)
-               printk("ADDR %Lx ", m->addr);
+               printk("ADDR %llx ", m->addr);
        if (m->misc)
-               printk("MISC %Lx ", m->misc);
+               printk("MISC %llx ", m->misc);
        printk("\n");
        printk(KERN_EMERG "This is not a software problem!\n");
        printk(KERN_EMERG "Run through mcelog --ascii to decode "
@@ -149,8 +166,10 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start)
        panic(msg);
 }
 
-static int mce_available(struct cpuinfo_x86 *c)
+int mce_available(struct cpuinfo_x86 *c)
 {
+       if (mce_dont_init)
+               return 0;
        return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA);
 }
 
@@ -172,7 +191,77 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs)
 }
 
 /*
- * The actual machine check handler
+ * Poll for corrected events or events that happened before reset.
+ * Those are just logged through /dev/mcelog.
+ *
+ * This is executed in standard interrupt context.
+ */
+void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
+{
+       struct mce m;
+       int i;
+
+       mce_setup(&m);
+
+       rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
+       for (i = 0; i < banks; i++) {
+               if (!bank[i] || !test_bit(i, *b))
+                       continue;
+
+               m.misc = 0;
+               m.addr = 0;
+               m.bank = i;
+               m.tsc = 0;
+
+               barrier();
+               rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
+               if (!(m.status & MCI_STATUS_VAL))
+                       continue;
+
+               /*
+                * Uncorrected events are handled by the exception handler
+                * when it is enabled. But when the exception is disabled log
+                * everything.
+                *
+                * TBD do the same check for MCI_STATUS_EN here?
+                */
+               if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC))
+                       continue;
+
+               if (m.status & MCI_STATUS_MISCV)
+                       rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc);
+               if (m.status & MCI_STATUS_ADDRV)
+                       rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
+
+               if (!(flags & MCP_TIMESTAMP))
+                       m.tsc = 0;
+               /*
+                * Don't get the IP here because it's unlikely to
+                * have anything to do with the actual error location.
+                */
+
+               mce_log(&m);
+               add_taint(TAINT_MACHINE_CHECK);
+
+               /*
+                * Clear state for this bank.
+                */
+               wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
+       }
+
+       /*
+        * Don't clear MCG_STATUS here because it's only defined for
+        * exceptions.
+        */
+}
+
+/*
+ * The actual machine check handler. This only handles real
+ * exceptions when something got corrupted coming in through int 18.
+ *
+ * This is executed in NMI context not subject to normal locking rules. This
+ * implies that most kernel services cannot be safely used. Don't even
+ * think about putting a printk in there!
  */
 void do_machine_check(struct pt_regs * regs, long error_code)
 {
@@ -190,17 +279,18 @@ void do_machine_check(struct pt_regs * regs, long error_code)
         * error.
         */
        int kill_it = 0;
+       DECLARE_BITMAP(toclear, MAX_NR_BANKS);
 
        atomic_inc(&mce_entry);
 
-       if ((regs
-            && notify_die(DIE_NMI, "machine check", regs, error_code,
+       if (notify_die(DIE_NMI, "machine check", regs, error_code,
                           18, SIGKILL) == NOTIFY_STOP)
-           || !banks)
+               goto out2;
+       if (!banks)
                goto out2;
 
-       memset(&m, 0, sizeof(struct mce));
-       m.cpu = smp_processor_id();
+       mce_setup(&m);
+
        rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus);
        /* if the restart IP is not valid, we're done for */
        if (!(m.mcgstatus & MCG_STATUS_RIPV))
@@ -210,18 +300,32 @@ void do_machine_check(struct pt_regs * regs, long error_code)
        barrier();
 
        for (i = 0; i < banks; i++) {
-               if (i < NR_SYSFS_BANKS && !bank[i])
+               __clear_bit(i, toclear);
+               if (!bank[i])
                        continue;
 
                m.misc = 0;
                m.addr = 0;
                m.bank = i;
-               m.tsc = 0;
 
                rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status);
                if ((m.status & MCI_STATUS_VAL) == 0)
                        continue;
 
+               /*
+                * Non uncorrected errors are handled by machine_check_poll
+                * Leave them alone.
+                */
+               if ((m.status & MCI_STATUS_UC) == 0)
+                       continue;
+
+               /*
+                * Set taint even when machine check was not enabled.
+                */
+               add_taint(TAINT_MACHINE_CHECK);
+
+               __set_bit(i, toclear);
+
                if (m.status & MCI_STATUS_EN) {
                        /* if PCC was set, there's no way out */
                        no_way_out |= !!(m.status & MCI_STATUS_PCC);
@@ -235,6 +339,12 @@ void do_machine_check(struct pt_regs * regs, long error_code)
                                        no_way_out = 1;
                                kill_it = 1;
                        }
+               } else {
+                       /*
+                        * Machine check event was not enabled. Clear, but
+                        * ignore.
+                        */
+                       continue;
                }
 
                if (m.status & MCI_STATUS_MISCV)
@@ -243,10 +353,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
                        rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr);
 
                mce_get_rip(&m, regs);
-               if (error_code >= 0)
-                       rdtscll(m.tsc);
-               if (error_code != -2)
-                       mce_log(&m);
+               mce_log(&m);
 
                /* Did this bank cause the exception? */
                /* Assume that the bank with uncorrectable errors did it,
@@ -255,14 +362,8 @@ void do_machine_check(struct pt_regs * regs, long error_code)
                        panicm = m;
                        panicm_found = 1;
                }
-
-               add_taint(TAINT_MACHINE_CHECK);
        }
 
-       /* Never do anything final in the polling timer */
-       if (!regs)
-               goto out;
-
        /* If we didn't find an uncorrectable error, pick
           the last one (shouldn't happen, just being safe). */
        if (!panicm_found)
@@ -309,10 +410,11 @@ void do_machine_check(struct pt_regs * regs, long error_code)
        /* notify userspace ASAP */
        set_thread_flag(TIF_MCE_NOTIFY);
 
- out:
        /* the last thing we do is clear state */
-       for (i = 0; i < banks; i++)
-               wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
+       for (i = 0; i < banks; i++) {
+               if (test_bit(i, toclear))
+                       wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
+       }
        wrmsrl(MSR_IA32_MCG_STATUS, 0);
  out2:
        atomic_dec(&mce_entry);
@@ -332,15 +434,13 @@ void do_machine_check(struct pt_regs * regs, long error_code)
  * and historically has been the register value of the
  * MSR_IA32_THERMAL_STATUS (Intel) msr.
  */
-void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
+void mce_log_therm_throt_event(__u64 status)
 {
        struct mce m;
 
-       memset(&m, 0, sizeof(m));
-       m.cpu = cpu;
+       mce_setup(&m);
        m.bank = MCE_THERMAL_BANK;
        m.status = status;
-       rdtscll(m.tsc);
        mce_log(&m);
 }
 #endif /* CONFIG_X86_MCE_INTEL */
@@ -353,18 +453,18 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status)
 
 static int check_interval = 5 * 60; /* 5 minutes */
 static int next_interval; /* in jiffies */
-static void mcheck_timer(struct work_struct *work);
-static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer);
+static void mcheck_timer(unsigned long);
+static DEFINE_PER_CPU(struct timer_list, mce_timer);
 
-static void mcheck_check_cpu(void *info)
+static void mcheck_timer(unsigned long data)
 {
-       if (mce_available(&current_cpu_data))
-               do_machine_check(NULL, 0);
-}
+       struct timer_list *t = &per_cpu(mce_timer, data);
 
-static void mcheck_timer(struct work_struct *work)
-{
-       on_each_cpu(mcheck_check_cpu, NULL, 1);
+       WARN_ON(smp_processor_id() != data);
+
+       if (mce_available(&current_cpu_data))
+               machine_check_poll(MCP_TIMESTAMP,
+                               &__get_cpu_var(mce_poll_banks));
 
        /*
         * Alert userspace if needed.  If we logged an MCE, reduce the
@@ -377,31 +477,41 @@ static void mcheck_timer(struct work_struct *work)
                                (int)round_jiffies_relative(check_interval*HZ));
        }
 
-       schedule_delayed_work(&mcheck_work, next_interval);
+       t->expires = jiffies + next_interval;
+       add_timer(t);
+}
+
+static void mce_do_trigger(struct work_struct *work)
+{
+       call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT);
 }
 
+static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
+
 /*
- * This is only called from process context.  This is where we do
- * anything we need to alert userspace about new MCEs.  This is called
- * directly from the poller and also from entry.S and idle, thanks to
- * TIF_MCE_NOTIFY.
+ * Notify the user(s) about new machine check events.
+ * Can be called from interrupt context, but not from machine check/NMI
+ * context.
  */
 int mce_notify_user(void)
 {
+       /* Not more than two messages every minute */
+       static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+
        clear_thread_flag(TIF_MCE_NOTIFY);
        if (test_and_clear_bit(0, &notify_user)) {
-               static unsigned long last_print;
-               unsigned long now = jiffies;
-
                wake_up_interruptible(&mce_wait);
-               if (trigger[0])
-                       call_usermodehelper(trigger, trigger_argv, NULL,
-                                               UMH_NO_WAIT);
 
-               if (time_after_eq(now, last_print + (check_interval*HZ))) {
-                       last_print = now;
+               /*
+                * There is no risk of missing notifications because
+                * work_pending is always cleared before the function is
+                * executed.
+                */
+               if (trigger[0] && !work_pending(&mce_trigger_work))
+                       schedule_work(&mce_trigger_work);
+
+               if (__ratelimit(&ratelimit))
                        printk(KERN_INFO "Machine check events logged\n");
-               }
 
                return 1;
        }
@@ -425,63 +535,78 @@ static struct notifier_block mce_idle_notifier = {
 
 static __init int periodic_mcheck_init(void)
 {
-       next_interval = check_interval * HZ;
-       if (next_interval)
-               schedule_delayed_work(&mcheck_work,
-                                     round_jiffies_relative(next_interval));
-       idle_notifier_register(&mce_idle_notifier);
-       return 0;
+       idle_notifier_register(&mce_idle_notifier);
+       return 0;
 }
 __initcall(periodic_mcheck_init);
 
-
 /*
  * Initialize Machine Checks for a CPU.
  */
-static void mce_init(void *dummy)
+static int mce_cap_init(void)
 {
        u64 cap;
-       int i;
+       unsigned b;
 
        rdmsrl(MSR_IA32_MCG_CAP, cap);
-       banks = cap & 0xff;
-       if (banks > MCE_EXTENDED_BANK) {
-               banks = MCE_EXTENDED_BANK;
-               printk(KERN_INFO "MCE: warning: using only %d banks\n",
-                      MCE_EXTENDED_BANK);
+       b = cap & 0xff;
+       if (b > MAX_NR_BANKS) {
+               printk(KERN_WARNING
+                      "MCE: Using only %u machine check banks out of %u\n",
+                       MAX_NR_BANKS, b);
+               b = MAX_NR_BANKS;
        }
+
+       /* Don't support asymmetric configurations today */
+       WARN_ON(banks != 0 && b != banks);
+       banks = b;
+       if (!bank) {
+               bank = kmalloc(banks * sizeof(u64), GFP_KERNEL);
+               if (!bank)
+                       return -ENOMEM;
+               memset(bank, 0xff, banks * sizeof(u64));
+       }
+
        /* Use accurate RIP reporting if available. */
        if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9)
                rip_msr = MSR_IA32_MCG_EIP;
 
-       /* Log the machine checks left over from the previous reset.
-          This also clears all registers */
-       do_machine_check(NULL, mce_bootlog ? -1 : -2);
+       return 0;
+}
+
+static void mce_init(void *dummy)
+{
+       u64 cap;
+       int i;
+       mce_banks_t all_banks;
+
+       /*
+        * Log the machine checks left over from the previous reset.
+        */
+       bitmap_fill(all_banks, MAX_NR_BANKS);
+       machine_check_poll(MCP_UC, &all_banks);
 
        set_in_cr4(X86_CR4_MCE);
 
+       rdmsrl(MSR_IA32_MCG_CAP, cap);
        if (cap & MCG_CTL_P)
                wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
 
        for (i = 0; i < banks; i++) {
-               if (i < NR_SYSFS_BANKS)
-                       wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
-               else
-                       wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL);
-
+               wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]);
                wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0);
        }
 }
 
 /* Add per CPU specific workarounds here */
-static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
+static void mce_cpu_quirks(struct cpuinfo_x86 *c)
 {
        /* This should be disabled by the BIOS, but isn't always */
        if (c->x86_vendor == X86_VENDOR_AMD) {
-               if(c->x86 == 15)
+               if (c->x86 == 15 && banks > 4)
                        /* disable GART TBL walk error reporting, which trips off
                           incorrectly with the IOMMU & 3ware & Cerberus. */
-                       clear_bit(10, &bank[4]);
+                       clear_bit(10, (unsigned long *)&bank[4]);
                if(c->x86 <= 17 && mce_bootlog < 0)
                        /* Lots of broken BIOS around that don't clear them
                           by default and leave crap in there. Don't log. */
@@ -504,20 +629,38 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
        }
 }
 
+static void mce_init_timer(void)
+{
+       struct timer_list *t = &__get_cpu_var(mce_timer);
+
+       /* data race harmless because everyone sets to the same value */
+       if (!next_interval)
+               next_interval = check_interval * HZ;
+       if (!next_interval)
+               return;
+       setup_timer(t, mcheck_timer, smp_processor_id());
+       t->expires = round_jiffies(jiffies + next_interval);
+       add_timer(t);
+}
+
 /*
  * Called for each booted CPU to set up machine checks.
  * Must be called with preempt off.
  */
 void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
 {
-       mce_cpu_quirks(c);
+       if (!mce_available(c))
+               return;
 
-       if (mce_dont_init ||
-           !mce_available(c))
+       if (mce_cap_init() < 0) {
+               mce_dont_init = 1;
                return;
+       }
+       mce_cpu_quirks(c);
 
        mce_init(NULL);
        mce_cpu_features(c);
+       mce_init_timer();
 }
 
 /*
@@ -573,7 +716,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
 {
        unsigned long *cpu_tsc;
        static DEFINE_MUTEX(mce_read_mutex);
-       unsigned next;
+       unsigned prev, next;
        char __user *buf = ubuf;
        int i, err;
 
@@ -592,25 +735,32 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize,
        }
 
        err = 0;
-       for (i = 0; i < next; i++) {
-               unsigned long start = jiffies;
-
-               while (!mcelog.entry[i].finished) {
-                       if (time_after_eq(jiffies, start + 2)) {
-                               memset(mcelog.entry + i,0, sizeof(struct mce));
-                               goto timeout;
+       prev = 0;
+       do {
+               for (i = prev; i < next; i++) {
+                       unsigned long start = jiffies;
+
+                       while (!mcelog.entry[i].finished) {
+                               if (time_after_eq(jiffies, start + 2)) {
+                                       memset(mcelog.entry + i, 0,
+                                              sizeof(struct mce));
+                                       goto timeout;
+                               }
+                               cpu_relax();
                        }
-                       cpu_relax();
+                       smp_rmb();
+                       err |= copy_to_user(buf, mcelog.entry + i,
+                                           sizeof(struct mce));
+                       buf += sizeof(struct mce);
+timeout:
+                       ;
                }
-               smp_rmb();
-               err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce));
-               buf += sizeof(struct mce);
- timeout:
-               ;
-       }
 
-       memset(mcelog.entry, 0, next * sizeof(struct mce));
-       mcelog.next = 0;
+               memset(mcelog.entry + prev, 0,
+                      (next - prev) * sizeof(struct mce));
+               prev = next;
+               next = cmpxchg(&mcelog.next, prev, 0);
+       } while (next != prev);
 
        synchronize_sched();
 
@@ -680,20 +830,6 @@ static struct miscdevice mce_log_device = {
        &mce_chrdev_ops,
 };
 
-static unsigned long old_cr4 __initdata;
-
-void __init stop_mce(void)
-{
-       old_cr4 = read_cr4();
-       clear_in_cr4(X86_CR4_MCE);
-}
-
-void __init restart_mce(void)
-{
-       if (old_cr4 & X86_CR4_MCE)
-               set_in_cr4(X86_CR4_MCE);
-}
-
 /*
  * Old style boot options parsing. Only for compatibility.
  */
@@ -703,8 +839,7 @@ static int __init mcheck_disable(char *str)
        return 1;
 }
 
-/* mce=off disables machine check. Note you can re-enable it later
-   using sysfs.
+/* mce=off disables machine check.
    mce=TOLERANCELEVEL (number, see above)
    mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
    mce=nobootlog Don't log MCEs from before booting. */
@@ -728,6 +863,29 @@ __setup("mce=", mcheck_enable);
  * Sysfs support
  */
 
+/*
+ * Disable machine checks on suspend and shutdown. We can't really handle
+ * them later.
+ */
+static int mce_disable(void)
+{
+       int i;
+
+       for (i = 0; i < banks; i++)
+               wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
+       return 0;
+}
+
+static int mce_suspend(struct sys_device *dev, pm_message_t state)
+{
+       return mce_disable();
+}
+
+static int mce_shutdown(struct sys_device *dev)
+{
+       return mce_disable();
+}
+
 /* On resume clear all MCE state. Don't want to see leftovers from the BIOS.
    Only one CPU is active at this time, the others get readded later using
    CPU hotplug. */
@@ -738,20 +896,24 @@ static int mce_resume(struct sys_device *dev)
        return 0;
 }
 
+static void mce_cpu_restart(void *data)
+{
+       del_timer_sync(&__get_cpu_var(mce_timer));
+       if (mce_available(&current_cpu_data))
+               mce_init(NULL);
+       mce_init_timer();
+}
+
 /* Reinit MCEs after user configuration changes */
 static void mce_restart(void)
 {
-       if (next_interval)
-               cancel_delayed_work(&mcheck_work);
-       /* Timer race is harmless here */
-       on_each_cpu(mce_init, NULL, 1);
        next_interval = check_interval * HZ;
-       if (next_interval)
-               schedule_delayed_work(&mcheck_work,
-                                     round_jiffies_relative(next_interval));
+       on_each_cpu(mce_cpu_restart, NULL, 1);
 }
 
 static struct sysdev_class mce_sysclass = {
+       .suspend = mce_suspend,
+       .shutdown = mce_shutdown,
        .resume = mce_resume,
        .name = "machinecheck",
 };
@@ -778,16 +940,26 @@ void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinit
        }                                                               \
        static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name);
 
-/*
- * TBD should generate these dynamically based on number of available banks.
- * Have only 6 contol banks in /sysfs until then.
- */
-ACCESSOR(bank0ctl,bank[0],mce_restart())
-ACCESSOR(bank1ctl,bank[1],mce_restart())
-ACCESSOR(bank2ctl,bank[2],mce_restart())
-ACCESSOR(bank3ctl,bank[3],mce_restart())
-ACCESSOR(bank4ctl,bank[4],mce_restart())
-ACCESSOR(bank5ctl,bank[5],mce_restart())
+static struct sysdev_attribute *bank_attrs;
+
+static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr,
+                        char *buf)
+{
+       u64 b = bank[attr - bank_attrs];
+       return sprintf(buf, "%llx\n", b);
+}
+
+static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr,
+                       const char *buf, size_t siz)
+{
+       char *end;
+       u64 new = simple_strtoull(buf, &end, 0);
+       if (end == buf)
+               return -EINVAL;
+       bank[attr - bank_attrs] = new;
+       mce_restart();
+       return end-buf;
+}
 
 static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr,
                                char *buf)
@@ -814,8 +986,6 @@ static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger);
 static SYSDEV_INT_ATTR(tolerant, 0644, tolerant);
 ACCESSOR(check_interval,check_interval,mce_restart())
 static struct sysdev_attribute *mce_attributes[] = {
-       &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl,
-       &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl,
        &attr_tolerant.attr, &attr_check_interval, &attr_trigger,
        NULL
 };
@@ -845,11 +1015,22 @@ static __cpuinit int mce_create_device(unsigned int cpu)
                if (err)
                        goto error;
        }
+       for (i = 0; i < banks; i++) {
+               err = sysdev_create_file(&per_cpu(device_mce, cpu),
+                                       &bank_attrs[i]);
+               if (err)
+                       goto error2;
+       }
        cpu_set(cpu, mce_device_initialized);
 
        return 0;
+error2:
+       while (--i >= 0) {
+               sysdev_remove_file(&per_cpu(device_mce, cpu),
+                                       &bank_attrs[i]);
+       }
 error:
-       while (i--) {
+       while (--i >= 0) {
                sysdev_remove_file(&per_cpu(device_mce,cpu),
                                   mce_attributes[i]);
        }
@@ -868,15 +1049,46 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
        for (i = 0; mce_attributes[i]; i++)
                sysdev_remove_file(&per_cpu(device_mce,cpu),
                        mce_attributes[i]);
+       for (i = 0; i < banks; i++)
+               sysdev_remove_file(&per_cpu(device_mce, cpu),
+                       &bank_attrs[i]);
        sysdev_unregister(&per_cpu(device_mce,cpu));
        cpu_clear(cpu, mce_device_initialized);
 }
 
+/* Make sure there are no machine checks on offlined CPUs. */
+static void mce_disable_cpu(void *h)
+{
+       int i;
+       unsigned long action = *(unsigned long *)h;
+
+       if (!mce_available(&current_cpu_data))
+               return;
+       if (!(action & CPU_TASKS_FROZEN))
+               cmci_clear();
+       for (i = 0; i < banks; i++)
+               wrmsrl(MSR_IA32_MC0_CTL + i*4, 0);
+}
+
+static void mce_reenable_cpu(void *h)
+{
+       int i;
+       unsigned long action = *(unsigned long *)h;
+
+       if (!mce_available(&current_cpu_data))
+               return;
+       if (!(action & CPU_TASKS_FROZEN))
+               cmci_reenable();
+       for (i = 0; i < banks; i++)
+               wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]);
+}
+
 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
 static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
                                      unsigned long action, void *hcpu)
 {
        unsigned int cpu = (unsigned long)hcpu;
+       struct timer_list *t = &per_cpu(mce_timer, cpu);
 
        switch (action) {
        case CPU_ONLINE:
@@ -891,6 +1103,21 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb,
                        threshold_cpu_callback(action, cpu);
                mce_remove_device(cpu);
                break;
+       case CPU_DOWN_PREPARE:
+       case CPU_DOWN_PREPARE_FROZEN:
+               del_timer_sync(t);
+               smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
+               break;
+       case CPU_DOWN_FAILED:
+       case CPU_DOWN_FAILED_FROZEN:
+               t->expires = round_jiffies(jiffies + next_interval);
+               add_timer_on(t, cpu);
+               smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
+               break;
+       case CPU_POST_DEAD:
+               /* intentionally ignoring frozen here */
+               cmci_rediscover(cpu);
+               break;
        }
        return NOTIFY_OK;
 }
@@ -899,6 +1126,34 @@ static struct notifier_block mce_cpu_notifier __cpuinitdata = {
        .notifier_call = mce_cpu_callback,
 };
 
+static __init int mce_init_banks(void)
+{
+       int i;
+
+       bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks,
+                               GFP_KERNEL);
+       if (!bank_attrs)
+               return -ENOMEM;
+
+       for (i = 0; i < banks; i++) {
+               struct sysdev_attribute *a = &bank_attrs[i];
+               a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i);
+               if (!a->attr.name)
+                       goto nomem;
+               a->attr.mode = 0644;
+               a->show = show_bank;
+               a->store = set_bank;
+       }
+       return 0;
+
+nomem:
+       while (--i >= 0)
+               kfree(bank_attrs[i].attr.name);
+       kfree(bank_attrs);
+       bank_attrs = NULL;
+       return -ENOMEM;
+}
+
 static __init int mce_init_device(void)
 {
        int err;
@@ -906,6 +1161,11 @@ static __init int mce_init_device(void)
 
        if (!mce_available(&boot_cpu_data))
                return -EIO;
+
+       err = mce_init_banks();
+       if (err)
+               return err;
+
        err = sysdev_class_register(&mce_sysclass);
        if (err)
                return err;
index f2ee0ae29bd6e00b7755ab0573deae02e17ae592..c5a32f92d07ecc41b55b290f16fb7a0889c07c41 100644 (file)
@@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = {
 struct threshold_bank {
        struct kobject *kobj;
        struct threshold_block *blocks;
-       cpumask_t cpus;
+       cpumask_var_t cpus;
 };
 static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
 
@@ -79,6 +79,8 @@ static unsigned char shared_bank[NR_BANKS] = {
 
 static DEFINE_PER_CPU(unsigned char, bank_map);        /* see which banks are on */
 
+static void amd_threshold_interrupt(void);
+
 /*
  * CPU Initialization
  */
@@ -174,6 +176,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
                        tr.reset = 0;
                        tr.old_limit = 0;
                        threshold_restart_bank(&tr);
+
+                       mce_threshold_vector = amd_threshold_interrupt;
                }
        }
 }
@@ -187,19 +191,13 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
  * the interrupt goes off when error_count reaches threshold_limit.
  * the handler will simply log mcelog w/ software defined bank number.
  */
-asmlinkage void mce_threshold_interrupt(void)
+static void amd_threshold_interrupt(void)
 {
        unsigned int bank, block;
        struct mce m;
        u32 low = 0, high = 0, address = 0;
 
-       ack_APIC_irq();
-       exit_idle();
-       irq_enter();
-
-       memset(&m, 0, sizeof(m));
-       rdtscll(m.tsc);
-       m.cpu = smp_processor_id();
+       mce_setup(&m);
 
        /* assume first bank caused it */
        for (bank = 0; bank < NR_BANKS; ++bank) {
@@ -233,7 +231,8 @@ asmlinkage void mce_threshold_interrupt(void)
 
                        /* Log the machine check that caused the threshold
                           event. */
-                       do_machine_check(NULL, 0);
+                       machine_check_poll(MCP_TIMESTAMP,
+                                       &__get_cpu_var(mce_poll_banks));
 
                        if (high & MASK_OVERFLOW_HI) {
                                rdmsrl(address, m.misc);
@@ -243,13 +242,10 @@ asmlinkage void mce_threshold_interrupt(void)
                                       + bank * NR_BLOCKS
                                       + block;
                                mce_log(&m);
-                               goto out;
+                               return;
                        }
                }
        }
-out:
-       inc_irq_stat(irq_threshold_count);
-       irq_exit();
 }
 
 /*
@@ -481,7 +477,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 
 #ifdef CONFIG_SMP
        if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) {   /* symlink */
-               i = first_cpu(per_cpu(cpu_core_map, cpu));
+               i = cpumask_first(&per_cpu(cpu_core_map, cpu));
 
                /* first core not up yet */
                if (cpu_data(i).cpu_core_id)
@@ -501,7 +497,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                if (err)
                        goto out;
 
-               b->cpus = per_cpu(cpu_core_map, cpu);
+               cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
                per_cpu(threshold_banks, cpu)[bank] = b;
                goto out;
        }
@@ -512,15 +508,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                err = -ENOMEM;
                goto out;
        }
+       if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+               kfree(b);
+               err = -ENOMEM;
+               goto out;
+       }
 
        b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj);
        if (!b->kobj)
                goto out_free;
 
 #ifndef CONFIG_SMP
-       b->cpus = CPU_MASK_ALL;
+       cpumask_setall(b->cpus);
 #else
-       b->cpus = per_cpu(cpu_core_map, cpu);
+       cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu));
 #endif
 
        per_cpu(threshold_banks, cpu)[bank] = b;
@@ -529,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
        if (err)
                goto out_free;
 
-       for_each_cpu_mask_nr(i, b->cpus) {
+       for_each_cpu(i, b->cpus) {
                if (i == cpu)
                        continue;
 
@@ -545,6 +546,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 
 out_free:
        per_cpu(threshold_banks, cpu)[bank] = NULL;
+       free_cpumask_var(b->cpus);
        kfree(b);
 out:
        return err;
@@ -619,7 +621,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 #endif
 
        /* remove all sibling symlinks before unregistering */
-       for_each_cpu_mask_nr(i, b->cpus) {
+       for_each_cpu(i, b->cpus) {
                if (i == cpu)
                        continue;
 
@@ -632,6 +634,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 free_out:
        kobject_del(b->kobj);
        kobject_put(b->kobj);
+       free_cpumask_var(b->cpus);
        kfree(b);
        per_cpu(threshold_banks, cpu)[bank] = NULL;
 }
index f44c366243602b594566147a8447bfaa3d641013..aaa7d97309387d5e99aa63cfa5e7b9e6ea3131e1 100644 (file)
@@ -1,17 +1,21 @@
 /*
  * Intel specific MCE features.
  * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
+ * Copyright (C) 2008, 2009 Intel Corporation
+ * Author: Andi Kleen
  */
 
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <asm/processor.h>
+#include <asm/apic.h>
 #include <asm/msr.h>
 #include <asm/mce.h>
 #include <asm/hw_irq.h>
 #include <asm/idle.h>
 #include <asm/therm_throt.h>
+#include <asm/apic.h>
 
 asmlinkage void smp_thermal_interrupt(void)
 {
@@ -24,7 +28,7 @@ asmlinkage void smp_thermal_interrupt(void)
 
        rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
        if (therm_throt_process(msr_val & 1))
-               mce_log_therm_throt_event(smp_processor_id(), msr_val);
+               mce_log_therm_throt_event(msr_val);
 
        inc_irq_stat(irq_thermal_count);
        irq_exit();
@@ -48,13 +52,13 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
         */
        rdmsr(MSR_IA32_MISC_ENABLE, l, h);
        h = apic_read(APIC_LVTTHMR);
-       if ((l & (1 << 3)) && (h & APIC_DM_SMI)) {
+       if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
                printk(KERN_DEBUG
                       "CPU%d: Thermal monitoring handled by SMI\n", cpu);
                return;
        }
 
-       if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13)))
+       if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2))
                tm2 = 1;
 
        if (h & APIC_VECTOR_MASK) {
@@ -72,7 +76,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
        wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h);
 
        rdmsr(MSR_IA32_MISC_ENABLE, l, h);
-       wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h);
+       wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
 
        l = apic_read(APIC_LVTTHMR);
        apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
@@ -84,7 +88,209 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
        return;
 }
 
+/*
+ * Support for Intel Correct Machine Check Interrupts. This allows
+ * the CPU to raise an interrupt when a corrected machine check happened.
+ * Normally we pick those up using a regular polling timer.
+ * Also supports reliable discovery of shared banks.
+ */
+
+static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
+
+/*
+ * cmci_discover_lock protects against parallel discovery attempts
+ * which could race against each other.
+ */
+static DEFINE_SPINLOCK(cmci_discover_lock);
+
+#define CMCI_THRESHOLD 1
+
+static int cmci_supported(int *banks)
+{
+       u64 cap;
+
+       /*
+        * Vendor check is not strictly needed, but the initial
+        * initialization is vendor keyed and this
+        * makes sure none of the backdoors are entered otherwise.
+        */
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return 0;
+       if (!cpu_has_apic || lapic_get_maxlvt() < 6)
+               return 0;
+       rdmsrl(MSR_IA32_MCG_CAP, cap);
+       *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
+       return !!(cap & MCG_CMCI_P);
+}
+
+/*
+ * The interrupt handler. This is called on every event.
+ * Just call the poller directly to log any events.
+ * This could in theory increase the threshold under high load,
+ * but doesn't for now.
+ */
+static void intel_threshold_interrupt(void)
+{
+       machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+       mce_notify_user();
+}
+
+static void print_update(char *type, int *hdr, int num)
+{
+       if (*hdr == 0)
+               printk(KERN_INFO "CPU %d MCA banks", smp_processor_id());
+       *hdr = 1;
+       printk(KERN_CONT " %s:%d", type, num);
+}
+
+/*
+ * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
+ * on this CPU. Use the algorithm recommended in the SDM to discover shared
+ * banks.
+ */
+static void cmci_discover(int banks, int boot)
+{
+       unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
+       int hdr = 0;
+       int i;
+
+       spin_lock(&cmci_discover_lock);
+       for (i = 0; i < banks; i++) {
+               u64 val;
+
+               if (test_bit(i, owned))
+                       continue;
+
+               rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
+
+               /* Already owned by someone else? */
+               if (val & CMCI_EN) {
+                       if (test_and_clear_bit(i, owned) || boot)
+                               print_update("SHD", &hdr, i);
+                       __clear_bit(i, __get_cpu_var(mce_poll_banks));
+                       continue;
+               }
+
+               val |= CMCI_EN | CMCI_THRESHOLD;
+               wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
+               rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
+
+               /* Did the enable bit stick? -- the bank supports CMCI */
+               if (val & CMCI_EN) {
+                       if (!test_and_set_bit(i, owned) || boot)
+                               print_update("CMCI", &hdr, i);
+                       __clear_bit(i, __get_cpu_var(mce_poll_banks));
+               } else {
+                       WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
+               }
+       }
+       spin_unlock(&cmci_discover_lock);
+       if (hdr)
+               printk(KERN_CONT "\n");
+}
+
+/*
+ * Just in case we missed an event during initialization check
+ * all the CMCI owned banks.
+ */
+void cmci_recheck(void)
+{
+       unsigned long flags;
+       int banks;
+
+       if (!mce_available(&current_cpu_data) || !cmci_supported(&banks))
+               return;
+       local_irq_save(flags);
+       machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+       local_irq_restore(flags);
+}
+
+/*
+ * Disable CMCI on this CPU for all banks it owns when it goes down.
+ * This allows other CPUs to claim the banks on rediscovery.
+ */
+void cmci_clear(void)
+{
+       int i;
+       int banks;
+       u64 val;
+
+       if (!cmci_supported(&banks))
+               return;
+       spin_lock(&cmci_discover_lock);
+       for (i = 0; i < banks; i++) {
+               if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
+                       continue;
+               /* Disable CMCI */
+               rdmsrl(MSR_IA32_MC0_CTL2 + i, val);
+               val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK);
+               wrmsrl(MSR_IA32_MC0_CTL2 + i, val);
+               __clear_bit(i, __get_cpu_var(mce_banks_owned));
+       }
+       spin_unlock(&cmci_discover_lock);
+}
+
+/*
+ * After a CPU went down cycle through all the others and rediscover
+ * Must run in process context.
+ */
+void cmci_rediscover(int dying)
+{
+       int banks;
+       int cpu;
+       cpumask_var_t old;
+
+       if (!cmci_supported(&banks))
+               return;
+       if (!alloc_cpumask_var(&old, GFP_KERNEL))
+               return;
+       cpumask_copy(old, &current->cpus_allowed);
+
+       for_each_online_cpu (cpu) {
+               if (cpu == dying)
+                       continue;
+               if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)))
+                       continue;
+               /* Recheck banks in case CPUs don't all have the same */
+               if (cmci_supported(&banks))
+                       cmci_discover(banks, 0);
+       }
+
+       set_cpus_allowed_ptr(current, old);
+       free_cpumask_var(old);
+}
+
+/*
+ * Reenable CMCI on this CPU in case a CPU down failed.
+ */
+void cmci_reenable(void)
+{
+       int banks;
+       if (cmci_supported(&banks))
+               cmci_discover(banks, 0);
+}
+
+static __cpuinit void intel_init_cmci(void)
+{
+       int banks;
+
+       if (!cmci_supported(&banks))
+               return;
+
+       mce_threshold_vector = intel_threshold_interrupt;
+       cmci_discover(banks, 1);
+       /*
+        * For CPU #0 this runs with still disabled APIC, but that's
+        * ok because only the vector is set up. We still do another
+        * check for the banks later for CPU #0 just to make sure
+        * to not miss any events.
+        */
+       apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
+       cmci_recheck();
+}
+
 void mce_intel_feature_init(struct cpuinfo_x86 *c)
 {
        intel_init_thermal(c);
+       intel_init_cmci();
 }
index 9b60fce09f758d5af0520bccb61445a99aaad693..f53bdcbaf38255ae95eb936a5127548b09c8dbf2 100644 (file)
@@ -85,7 +85,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
         */
        rdmsr(MSR_IA32_MISC_ENABLE, l, h);
        h = apic_read(APIC_LVTTHMR);
-       if ((l & (1<<3)) && (h & APIC_DM_SMI)) {
+       if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
                printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
                                cpu);
                return; /* -EBUSY */
@@ -111,7 +111,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c)
        vendor_thermal_interrupt = intel_thermal_interrupt;
 
        rdmsr(MSR_IA32_MISC_ENABLE, l, h);
-       wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h);
+       wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
 
        l = apic_read(APIC_LVTTHMR);
        apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
new file mode 100644 (file)
index 0000000..23ee9e7
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Common corrected MCE threshold handler code:
+ */
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+
+#include <asm/irq_vectors.h>
+#include <asm/apic.h>
+#include <asm/idle.h>
+#include <asm/mce.h>
+
+static void default_threshold_interrupt(void)
+{
+       printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n",
+                        THRESHOLD_APIC_VECTOR);
+}
+
+void (*mce_threshold_vector)(void) = default_threshold_interrupt;
+
+asmlinkage void mce_threshold_interrupt(void)
+{
+       exit_idle();
+       irq_enter();
+       inc_irq_stat(irq_threshold_count);
+       mce_threshold_vector();
+       irq_exit();
+       /* Ack only at the end to avoid potential reentry */
+       ack_APIC_irq();
+}
index 9abd48b2267413a4212b18d0cb627325703b90de..f6c70a164e320eeb58ab097ce7ee2d9079e29ce4 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/nmi.h>
 #include <linux/kprobes.h>
 
-#include <asm/apic.h>
+#include <asm/genapic.h>
 #include <asm/intel_arch_perfmon.h>
 
 struct nmi_watchdog_ctlblk {
index 01b1244ef1c0f47456a246f89324973b4f74269e..d67e0e48bc2dfa1115bfc427323bd78e878d51fc 100644 (file)
@@ -7,11 +7,10 @@
 /*
  *     Get CPU information for use by the procfs.
  */
-#ifdef CONFIG_X86_32
 static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
                              unsigned int cpu)
 {
-#ifdef CONFIG_X86_HT
+#ifdef CONFIG_SMP
        if (c->x86_max_cores * smp_num_siblings > 1) {
                seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
                seq_printf(m, "siblings\t: %d\n",
@@ -24,6 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
 #endif
 }
 
+#ifdef CONFIG_X86_32
 static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
 {
        /*
@@ -50,22 +50,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
                   c->wp_works_ok ? "yes" : "no");
 }
 #else
-static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
-                             unsigned int cpu)
-{
-#ifdef CONFIG_SMP
-       if (c->x86_max_cores * smp_num_siblings > 1) {
-               seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-               seq_printf(m, "siblings\t: %d\n",
-                          cpus_weight(per_cpu(cpu_core_map, cpu)));
-               seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
-               seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
-               seq_printf(m, "apicid\t\t: %d\n", c->apicid);
-               seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
-       }
-#endif
-}
-
 static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
 {
        seq_printf(m,
index 52b3fefbd5af13c78b037b70f0e6bbeb6bbc780a..bb62b3e5caadca0faba5e489fb874bf563e0073c 100644 (file)
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
 #endif
 }
 
-static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
+static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = {
        .c_vendor       = "Transmeta",
        .c_ident        = { "GenuineTMx86", "TransmetaCPU" },
        .c_early_init   = early_init_transmeta,
index e777f79e0960d29fdaae20d800098569746450fe..fd2c37bf7acb6c6cc9a322600dc2c9dade028dbb 100644 (file)
@@ -8,7 +8,7 @@
  * so no special init takes place.
  */
 
-static struct cpu_dev umc_cpu_dev __cpuinitdata = {
+static const struct cpu_dev __cpuinitconst umc_cpu_dev = {
        .c_vendor       = "UMC",
        .c_ident        = { "UMC UMC UMC" },
        .c_models = {
index c689d19e35abb786657926265c03ab63e74a7538..ff958248e61d7d48965c7b61c3cfdf32bc551f23 100644 (file)
 #include <asm/apic.h>
 #include <asm/hpet.h>
 #include <linux/kdebug.h>
-#include <asm/smp.h>
+#include <asm/cpu.h>
 #include <asm/reboot.h>
 #include <asm/virtext.h>
 
-#include <mach_ipi.h>
-
 
 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
 
index 6b1f6f6f866172b41b4b64e1590bb40f2de19315..87d103ded1c325a71d0ba65126bfd81996f78915 100644 (file)
@@ -99,7 +99,7 @@ print_context_stack(struct thread_info *tinfo,
                                frame = frame->next_frame;
                                bp = (unsigned long) frame;
                        } else {
-                               ops->address(data, addr, bp == 0);
+                               ops->address(data, addr, 0);
                        }
                        print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
                }
index c302d070704826c44f0f4f8434383594e578cbe8..d35db5993fd68e3a740ee36ade60edf78d14fae8 100644 (file)
@@ -106,7 +106,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                const struct stacktrace_ops *ops, void *data)
 {
        const unsigned cpu = get_cpu();
-       unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
+       unsigned long *irq_stack_end =
+               (unsigned long *)per_cpu(irq_stack_ptr, cpu);
        unsigned used = 0;
        struct thread_info *tinfo;
        int graph = 0;
@@ -160,23 +161,23 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                        stack = (unsigned long *) estack_end[-2];
                        continue;
                }
-               if (irqstack_end) {
-                       unsigned long *irqstack;
-                       irqstack = irqstack_end -
-                               (IRQSTACKSIZE - 64) / sizeof(*irqstack);
+               if (irq_stack_end) {
+                       unsigned long *irq_stack;
+                       irq_stack = irq_stack_end -
+                               (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack);
 
-                       if (stack >= irqstack && stack < irqstack_end) {
+                       if (stack >= irq_stack && stack < irq_stack_end) {
                                if (ops->stack(data, "IRQ") < 0)
                                        break;
                                bp = print_context_stack(tinfo, stack, bp,
-                                       ops, data, irqstack_end, &graph);
+                                       ops, data, irq_stack_end, &graph);
                                /*
                                 * We link to the next stack (which would be
                                 * the process stack normally) the last
                                 * pointer (index -1 to end) in the IRQ stack:
                                 */
-                               stack = (unsigned long *) (irqstack_end[-1]);
-                               irqstack_end = NULL;
+                               stack = (unsigned long *) (irq_stack_end[-1]);
+                               irq_stack_end = NULL;
                                ops->stack(data, "EOI");
                                continue;
                        }
@@ -199,10 +200,10 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
        unsigned long *stack;
        int i;
        const int cpu = smp_processor_id();
-       unsigned long *irqstack_end =
-               (unsigned long *) (cpu_pda(cpu)->irqstackptr);
-       unsigned long *irqstack =
-               (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
+       unsigned long *irq_stack_end =
+               (unsigned long *)(per_cpu(irq_stack_ptr, cpu));
+       unsigned long *irq_stack =
+               (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE);
 
        /*
         * debugging aid: "show_stack(NULL, NULL);" prints the
@@ -218,9 +219,9 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
 
        stack = sp;
        for (i = 0; i < kstack_depth_to_print; i++) {
-               if (stack >= irqstack && stack <= irqstack_end) {
-                       if (stack == irqstack_end) {
-                               stack = (unsigned long *) (irqstack_end[-1]);
+               if (stack >= irq_stack && stack <= irq_stack_end) {
+                       if (stack == irq_stack_end) {
+                               stack = (unsigned long *) (irq_stack_end[-1]);
                                printk(" <EOI> ");
                        }
                } else {
@@ -241,7 +242,7 @@ void show_registers(struct pt_regs *regs)
        int i;
        unsigned long sp;
        const int cpu = smp_processor_id();
-       struct task_struct *cur = cpu_pda(cpu)->pcurrent;
+       struct task_struct *cur = current;
 
        sp = regs->sp;
        printk("CPU %d ", cpu);
index e85826829cf25c1671b55b306976a917a40b9c43..95b81c18b6bce844a194b6239a7548a7f7f71d8b 100644 (file)
@@ -110,19 +110,25 @@ int __init e820_all_mapped(u64 start, u64 end, unsigned type)
 /*
  * Add a memory region to the kernel e820 map.
  */
-void __init e820_add_region(u64 start, u64 size, int type)
+static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
+                                        int type)
 {
-       int x = e820.nr_map;
+       int x = e820x->nr_map;
 
-       if (x == ARRAY_SIZE(e820.map)) {
+       if (x == ARRAY_SIZE(e820x->map)) {
                printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
                return;
        }
 
-       e820.map[x].addr = start;
-       e820.map[x].size = size;
-       e820.map[x].type = type;
-       e820.nr_map++;
+       e820x->map[x].addr = start;
+       e820x->map[x].size = size;
+       e820x->map[x].type = type;
+       e820x->nr_map++;
+}
+
+void __init e820_add_region(u64 start, u64 size, int type)
+{
+       __e820_add_region(&e820, start, size, type);
 }
 
 void __init e820_print_map(char *who)
@@ -417,11 +423,11 @@ static int __init append_e820_map(struct e820entry *biosmap, int nr_map)
        return __append_e820_map(biosmap, nr_map);
 }
 
-static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
+static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
                                        u64 size, unsigned old_type,
                                        unsigned new_type)
 {
-       int i;
+       unsigned int i;
        u64 real_updated_size = 0;
 
        BUG_ON(old_type == new_type);
@@ -429,7 +435,7 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
        if (size > (ULLONG_MAX - start))
                size = ULLONG_MAX - start;
 
-       for (i = 0; i < e820.nr_map; i++) {
+       for (i = 0; i < e820x->nr_map; i++) {
                struct e820entry *ei = &e820x->map[i];
                u64 final_start, final_end;
                if (ei->type != old_type)
@@ -446,10 +452,16 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
                final_end = min(start + size, ei->addr + ei->size);
                if (final_start >= final_end)
                        continue;
-               e820_add_region(final_start, final_end - final_start,
-                                        new_type);
+
+               __e820_add_region(e820x, final_start, final_end - final_start,
+                                 new_type);
+
                real_updated_size += final_end - final_start;
 
+               /*
+                * left range could be head or tail, so need to update
+                * size at first.
+                */
                ei->size -= final_end - final_start;
                if (ei->addr < final_start)
                        continue;
@@ -461,13 +473,13 @@ static u64 __init e820_update_range_map(struct e820map *e820x, u64 start,
 u64 __init e820_update_range(u64 start, u64 size, unsigned old_type,
                             unsigned new_type)
 {
-       return e820_update_range_map(&e820, start, size, old_type, new_type);
+       return __e820_update_range(&e820, start, size, old_type, new_type);
 }
 
 static u64 __init e820_update_range_saved(u64 start, u64 size,
                                          unsigned old_type, unsigned new_type)
 {
-       return e820_update_range_map(&e820_saved, start, size, old_type,
+       return __e820_update_range(&e820_saved, start, size, old_type,
                                     new_type);
 }
 
@@ -858,6 +870,9 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
  */
 void __init reserve_early(u64 start, u64 end, char *name)
 {
+       if (start >= end)
+               return;
+
        drop_overlaps_that_are_ok(start, end);
        __reserve_early(start, end, name, 0);
 }
@@ -1017,8 +1032,8 @@ u64 __init find_e820_area_size(u64 start, u64 *sizep, u64 align)
                        continue;
                return addr;
        }
-       return -1UL;
 
+       return -1ULL;
 }
 
 /*
@@ -1031,13 +1046,22 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
        u64 start;
 
        start = startt;
-       while (size < sizet)
+       while (size < sizet && (start + 1))
                start = find_e820_area_size(start, &size, align);
 
        if (size < sizet)
                return 0;
 
+#ifdef CONFIG_X86_32
+       if (start >= MAXMEM)
+               return 0;
+       if (start + size > MAXMEM)
+               size = MAXMEM - start;
+#endif
+
        addr = round_down(start + size - sizet, align);
+       if (addr < start)
+               return 0;
        e820_update_range(addr, sizet, E820_RAM, E820_RESERVED);
        e820_update_range_saved(addr, sizet, E820_RAM, E820_RESERVED);
        printk(KERN_INFO "update e820 for early_reserve_e820\n");
index 504ad198e4ad0f999b43dfbb16f1957305e71ffe..335f049d110f5b095793cb6832a3571ee1d2586c 100644 (file)
@@ -13,8 +13,8 @@
 #include <asm/setup.h>
 #include <xen/hvc-console.h>
 #include <asm/pci-direct.h>
-#include <asm/pgtable.h>
 #include <asm/fixmap.h>
+#include <asm/pgtable.h>
 #include <linux/usb/ehci_def.h>
 
 /* Simple VGA output */
@@ -250,7 +250,7 @@ static int dbgp_wait_until_complete(void)
        return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
 }
 
-static void dbgp_mdelay(int ms)
+static void __init dbgp_mdelay(int ms)
 {
        int i;
 
@@ -311,7 +311,7 @@ static void dbgp_set_data(const void *buf, int size)
        writel(hi, &ehci_debug->data47);
 }
 
-static void dbgp_get_data(void *buf, int size)
+static void __init dbgp_get_data(void *buf, int size)
 {
        unsigned char *bytes = buf;
        u32 lo, hi;
@@ -355,7 +355,7 @@ static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
        return ret;
 }
 
-static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
+static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
                                 int size)
 {
        u32 pids, addr, ctrl;
@@ -386,8 +386,8 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
        return ret;
 }
 
-static int dbgp_control_msg(unsigned devnum, int requesttype, int request,
-       int value, int index, void *data, int size)
+static int __init dbgp_control_msg(unsigned devnum, int requesttype,
+       int request, int value, int index, void *data, int size)
 {
        u32 pids, addr, ctrl;
        struct usb_ctrlrequest req;
@@ -489,7 +489,7 @@ static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
        return 0;
 }
 
-static int ehci_reset_port(int port)
+static int __init ehci_reset_port(int port)
 {
        u32 portsc;
        u32 delay_time, delay;
@@ -532,7 +532,7 @@ static int ehci_reset_port(int port)
        return -EBUSY;
 }
 
-static int ehci_wait_for_port(int port)
+static int __init ehci_wait_for_port(int port)
 {
        u32 status;
        int ret, reps;
@@ -557,13 +557,13 @@ static inline void dbgp_printk(const char *fmt, ...) { }
 
 typedef void (*set_debug_port_t)(int port);
 
-static void default_set_debug_port(int port)
+static void __init default_set_debug_port(int port)
 {
 }
 
-static set_debug_port_t set_debug_port = default_set_debug_port;
+static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
 
-static void nvidia_set_debug_port(int port)
+static void __init nvidia_set_debug_port(int port)
 {
        u32 dword;
        dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
index eb1ef3b67dd50278c7ce88bd92f5895d7f06ede6..1736acc4d7aa6cfc13bc8ebd0db0e2727a8ebf5e 100644 (file)
@@ -366,10 +366,12 @@ void __init efi_init(void)
                                        SMBIOS_TABLE_GUID)) {
                        efi.smbios = config_tables[i].table;
                        printk(" SMBIOS=0x%lx ", config_tables[i].table);
+#ifdef CONFIG_X86_UV
                } else if (!efi_guidcmp(config_tables[i].guid,
                                        UV_SYSTEM_TABLE_GUID)) {
                        efi.uv_systab = config_tables[i].table;
                        printk(" UVsystab=0x%lx ", config_tables[i].table);
+#endif
                } else if (!efi_guidcmp(config_tables[i].guid,
                                        HCDP_TABLE_GUID)) {
                        efi.hcdp = config_tables[i].table;
index cb783b92c50cce5b9d123c8bff127ba967a9b230..22c3b7828c50fa1f0c61e17d6680cbf19d0b6a17 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/proto.h>
 #include <asm/efi.h>
 #include <asm/cacheflush.h>
+#include <asm/fixmap.h>
 
 static pgd_t save_pgd __initdata;
 static unsigned long efi_flags __initdata;
index ef00bb77d7e480d058ce21e2b66a20be730c7896..fbe66e626c09f437da6110bec5c080ef39e6bf95 100644 (file)
@@ -6,7 +6,7 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 
 /*
  * efi_call_phys(void *, ...) is a function with variable parameters.
@@ -113,6 +113,7 @@ ENTRY(efi_call_phys)
        movl    (%edx), %ecx
        pushl   %ecx
        ret
+ENDPROC(efi_call_phys)
 .previous
 
 .data
index 99b47d48c9f4ba23c71d5e5a5acebf55b62272ee..4c07ccab8146f0bbed2850f586a68f126b2bc7eb 100644 (file)
@@ -41,6 +41,7 @@ ENTRY(efi_call0)
        addq $32, %rsp
        RESTORE_XMM
        ret
+ENDPROC(efi_call0)
 
 ENTRY(efi_call1)
        SAVE_XMM
@@ -50,6 +51,7 @@ ENTRY(efi_call1)
        addq $32, %rsp
        RESTORE_XMM
        ret
+ENDPROC(efi_call1)
 
 ENTRY(efi_call2)
        SAVE_XMM
@@ -59,6 +61,7 @@ ENTRY(efi_call2)
        addq $32, %rsp
        RESTORE_XMM
        ret
+ENDPROC(efi_call2)
 
 ENTRY(efi_call3)
        SAVE_XMM
@@ -69,6 +72,7 @@ ENTRY(efi_call3)
        addq $32, %rsp
        RESTORE_XMM
        ret
+ENDPROC(efi_call3)
 
 ENTRY(efi_call4)
        SAVE_XMM
@@ -80,6 +84,7 @@ ENTRY(efi_call4)
        addq $32, %rsp
        RESTORE_XMM
        ret
+ENDPROC(efi_call4)
 
 ENTRY(efi_call5)
        SAVE_XMM
@@ -92,6 +97,7 @@ ENTRY(efi_call5)
        addq $48, %rsp
        RESTORE_XMM
        ret
+ENDPROC(efi_call5)
 
 ENTRY(efi_call6)
        SAVE_XMM
@@ -107,3 +113,4 @@ ENTRY(efi_call6)
        addq $48, %rsp
        RESTORE_XMM
        ret
+ENDPROC(efi_call6)
index 46469029e9d3d6a70b0092230271a4dbf2fcfebc..c929add475c9caf073f97c11b15ff6d645226126 100644 (file)
  *     1C(%esp) - %ds
  *     20(%esp) - %es
  *     24(%esp) - %fs
- *     28(%esp) - orig_eax
- *     2C(%esp) - %eip
- *     30(%esp) - %cs
- *     34(%esp) - %eflags
- *     38(%esp) - %oldesp
- *     3C(%esp) - %oldss
+ *     28(%esp) - %gs          saved iff !CONFIG_X86_32_LAZY_GS
+ *     2C(%esp) - orig_eax
+ *     30(%esp) - %eip
+ *     34(%esp) - %cs
+ *     38(%esp) - %eflags
+ *     3C(%esp) - %oldesp
+ *     40(%esp) - %oldss
  *
  * "current" is in register %ebx during any slow entries.
  */
@@ -46,7 +47,7 @@
 #include <asm/errno.h>
 #include <asm/segment.h>
 #include <asm/smp.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/desc.h>
 #include <asm/percpu.h>
 #include <asm/dwarf2.h>
 #define resume_userspace_sig   resume_userspace
 #endif
 
-#define SAVE_ALL \
-       cld; \
-       pushl %fs; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       /*CFI_REL_OFFSET fs, 0;*/\
-       pushl %es; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       /*CFI_REL_OFFSET es, 0;*/\
-       pushl %ds; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       /*CFI_REL_OFFSET ds, 0;*/\
-       pushl %eax; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET eax, 0;\
-       pushl %ebp; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET ebp, 0;\
-       pushl %edi; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET edi, 0;\
-       pushl %esi; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET esi, 0;\
-       pushl %edx; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET edx, 0;\
-       pushl %ecx; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET ecx, 0;\
-       pushl %ebx; \
-       CFI_ADJUST_CFA_OFFSET 4;\
-       CFI_REL_OFFSET ebx, 0;\
-       movl $(__USER_DS), %edx; \
-       movl %edx, %ds; \
-       movl %edx, %es; \
-       movl $(__KERNEL_PERCPU), %edx; \
+/*
+ * User gs save/restore
+ *
+ * %gs is used for userland TLS and kernel only uses it for stack
+ * canary which is required to be at %gs:20 by gcc.  Read the comment
+ * at the top of stackprotector.h for more info.
+ *
+ * Local labels 98 and 99 are used.
+ */
+#ifdef CONFIG_X86_32_LAZY_GS
+
+ /* unfortunately push/pop can't be no-op */
+.macro PUSH_GS
+       pushl $0
+       CFI_ADJUST_CFA_OFFSET 4
+.endm
+.macro POP_GS pop=0
+       addl $(4 + \pop), %esp
+       CFI_ADJUST_CFA_OFFSET -(4 + \pop)
+.endm
+.macro POP_GS_EX
+.endm
+
+ /* all the rest are no-op */
+.macro PTGS_TO_GS
+.endm
+.macro PTGS_TO_GS_EX
+.endm
+.macro GS_TO_REG reg
+.endm
+.macro REG_TO_PTGS reg
+.endm
+.macro SET_KERNEL_GS reg
+.endm
+
+#else  /* CONFIG_X86_32_LAZY_GS */
+
+.macro PUSH_GS
+       pushl %gs
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET gs, 0*/
+.endm
+
+.macro POP_GS pop=0
+98:    popl %gs
+       CFI_ADJUST_CFA_OFFSET -4
+       /*CFI_RESTORE gs*/
+  .if \pop <> 0
+       add $\pop, %esp
+       CFI_ADJUST_CFA_OFFSET -\pop
+  .endif
+.endm
+.macro POP_GS_EX
+.pushsection .fixup, "ax"
+99:    movl $0, (%esp)
+       jmp 98b
+.section __ex_table, "a"
+       .align 4
+       .long 98b, 99b
+.popsection
+.endm
+
+.macro PTGS_TO_GS
+98:    mov PT_GS(%esp), %gs
+.endm
+.macro PTGS_TO_GS_EX
+.pushsection .fixup, "ax"
+99:    movl $0, PT_GS(%esp)
+       jmp 98b
+.section __ex_table, "a"
+       .align 4
+       .long 98b, 99b
+.popsection
+.endm
+
+.macro GS_TO_REG reg
+       movl %gs, \reg
+       /*CFI_REGISTER gs, \reg*/
+.endm
+.macro REG_TO_PTGS reg
+       movl \reg, PT_GS(%esp)
+       /*CFI_REL_OFFSET gs, PT_GS*/
+.endm
+.macro SET_KERNEL_GS reg
+       movl $(__KERNEL_STACK_CANARY), \reg
+       movl \reg, %gs
+.endm
+
+#endif /* CONFIG_X86_32_LAZY_GS */
+
+.macro SAVE_ALL
+       cld
+       PUSH_GS
+       pushl %fs
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET fs, 0;*/
+       pushl %es
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET es, 0;*/
+       pushl %ds
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET ds, 0;*/
+       pushl %eax
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET eax, 0
+       pushl %ebp
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET ebp, 0
+       pushl %edi
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET edi, 0
+       pushl %esi
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET esi, 0
+       pushl %edx
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET edx, 0
+       pushl %ecx
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET ecx, 0
+       pushl %ebx
+       CFI_ADJUST_CFA_OFFSET 4
+       CFI_REL_OFFSET ebx, 0
+       movl $(__USER_DS), %edx
+       movl %edx, %ds
+       movl %edx, %es
+       movl $(__KERNEL_PERCPU), %edx
        movl %edx, %fs
+       SET_KERNEL_GS %edx
+.endm
 
-#define RESTORE_INT_REGS \
-       popl %ebx;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE ebx;\
-       popl %ecx;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE ecx;\
-       popl %edx;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE edx;\
-       popl %esi;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE esi;\
-       popl %edi;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE edi;\
-       popl %ebp;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       CFI_RESTORE ebp;\
-       popl %eax;      \
-       CFI_ADJUST_CFA_OFFSET -4;\
+.macro RESTORE_INT_REGS
+       popl %ebx
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE ebx
+       popl %ecx
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE ecx
+       popl %edx
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE edx
+       popl %esi
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE esi
+       popl %edi
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE edi
+       popl %ebp
+       CFI_ADJUST_CFA_OFFSET -4
+       CFI_RESTORE ebp
+       popl %eax
+       CFI_ADJUST_CFA_OFFSET -4
        CFI_RESTORE eax
+.endm
 
-#define RESTORE_REGS   \
-       RESTORE_INT_REGS; \
-1:     popl %ds;       \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       /*CFI_RESTORE ds;*/\
-2:     popl %es;       \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       /*CFI_RESTORE es;*/\
-3:     popl %fs;       \
-       CFI_ADJUST_CFA_OFFSET -4;\
-       /*CFI_RESTORE fs;*/\
-.pushsection .fixup,"ax";      \
-4:     movl $0,(%esp); \
-       jmp 1b;         \
-5:     movl $0,(%esp); \
-       jmp 2b;         \
-6:     movl $0,(%esp); \
-       jmp 3b;         \
-.section __ex_table,"a";\
-       .align 4;       \
-       .long 1b,4b;    \
-       .long 2b,5b;    \
-       .long 3b,6b;    \
+.macro RESTORE_REGS pop=0
+       RESTORE_INT_REGS
+1:     popl %ds
+       CFI_ADJUST_CFA_OFFSET -4
+       /*CFI_RESTORE ds;*/
+2:     popl %es
+       CFI_ADJUST_CFA_OFFSET -4
+       /*CFI_RESTORE es;*/
+3:     popl %fs
+       CFI_ADJUST_CFA_OFFSET -4
+       /*CFI_RESTORE fs;*/
+       POP_GS \pop
+.pushsection .fixup, "ax"
+4:     movl $0, (%esp)
+       jmp 1b
+5:     movl $0, (%esp)
+       jmp 2b
+6:     movl $0, (%esp)
+       jmp 3b
+.section __ex_table, "a"
+       .align 4
+       .long 1b, 4b
+       .long 2b, 5b
+       .long 3b, 6b
 .popsection
+       POP_GS_EX
+.endm
 
-#define RING0_INT_FRAME \
-       CFI_STARTPROC simple;\
-       CFI_SIGNAL_FRAME;\
-       CFI_DEF_CFA esp, 3*4;\
-       /*CFI_OFFSET cs, -2*4;*/\
+.macro RING0_INT_FRAME
+       CFI_STARTPROC simple
+       CFI_SIGNAL_FRAME
+       CFI_DEF_CFA esp, 3*4
+       /*CFI_OFFSET cs, -2*4;*/
        CFI_OFFSET eip, -3*4
+.endm
 
-#define RING0_EC_FRAME \
-       CFI_STARTPROC simple;\
-       CFI_SIGNAL_FRAME;\
-       CFI_DEF_CFA esp, 4*4;\
-       /*CFI_OFFSET cs, -2*4;*/\
+.macro RING0_EC_FRAME
+       CFI_STARTPROC simple
+       CFI_SIGNAL_FRAME
+       CFI_DEF_CFA esp, 4*4
+       /*CFI_OFFSET cs, -2*4;*/
        CFI_OFFSET eip, -3*4
+.endm
 
-#define RING0_PTREGS_FRAME \
-       CFI_STARTPROC simple;\
-       CFI_SIGNAL_FRAME;\
-       CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
-       /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
-       CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
-       /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
-       /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
-       CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
-       CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
-       CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
-       CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
-       CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
-       CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
+.macro RING0_PTREGS_FRAME
+       CFI_STARTPROC simple
+       CFI_SIGNAL_FRAME
+       CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
+       /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
+       CFI_OFFSET eip, PT_EIP-PT_OLDESP
+       /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
+       /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
+       CFI_OFFSET eax, PT_EAX-PT_OLDESP
+       CFI_OFFSET ebp, PT_EBP-PT_OLDESP
+       CFI_OFFSET edi, PT_EDI-PT_OLDESP
+       CFI_OFFSET esi, PT_ESI-PT_OLDESP
+       CFI_OFFSET edx, PT_EDX-PT_OLDESP
+       CFI_OFFSET ecx, PT_ECX-PT_OLDESP
        CFI_OFFSET ebx, PT_EBX-PT_OLDESP
+.endm
 
 ENTRY(ret_from_fork)
        CFI_STARTPROC
@@ -341,8 +442,7 @@ sysenter_past_esp:
 
        GET_THREAD_INFO(%ebp)
 
-       /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-       testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
        jnz sysenter_audit
 sysenter_do_call:
        cmpl $(nr_syscalls), %eax
@@ -353,7 +453,7 @@ sysenter_do_call:
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        movl TI_flags(%ebp), %ecx
-       testw $_TIF_ALLWORK_MASK, %cx
+       testl $_TIF_ALLWORK_MASK, %ecx
        jne sysexit_audit
 sysenter_exit:
 /* if something modifies registers it must also disable sysexit */
@@ -362,11 +462,12 @@ sysenter_exit:
        xorl %ebp,%ebp
        TRACE_IRQS_ON
 1:     mov  PT_FS(%esp), %fs
+       PTGS_TO_GS
        ENABLE_INTERRUPTS_SYSEXIT
 
 #ifdef CONFIG_AUDITSYSCALL
 sysenter_audit:
-       testw $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
+       testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
        jnz syscall_trace_entry
        addl $4,%esp
        CFI_ADJUST_CFA_OFFSET -4
@@ -383,7 +484,7 @@ sysenter_audit:
        jmp sysenter_do_call
 
 sysexit_audit:
-       testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
+       testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
        jne syscall_exit_work
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_ANY)
@@ -396,7 +497,7 @@ sysexit_audit:
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        movl TI_flags(%ebp), %ecx
-       testw $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %cx
+       testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
        jne syscall_exit_work
        movl PT_EAX(%esp),%eax  /* reload syscall return value */
        jmp sysenter_exit
@@ -410,6 +511,7 @@ sysexit_audit:
        .align 4
        .long 1b,2b
 .popsection
+       PTGS_TO_GS_EX
 ENDPROC(ia32_sysenter_target)
 
        # system call handler stub
@@ -420,8 +522,7 @@ ENTRY(system_call)
        SAVE_ALL
        GET_THREAD_INFO(%ebp)
                                        # system call tracing in operation / emulation
-       /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
-       testw $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
+       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
        jnz syscall_trace_entry
        cmpl $(nr_syscalls), %eax
        jae syscall_badsys
@@ -435,7 +536,7 @@ syscall_exit:
                                        # between sampling and the iret
        TRACE_IRQS_OFF
        movl TI_flags(%ebp), %ecx
-       testw $_TIF_ALLWORK_MASK, %cx   # current->work
+       testl $_TIF_ALLWORK_MASK, %ecx  # current->work
        jne syscall_exit_work
 
 restore_all:
@@ -452,8 +553,7 @@ restore_all:
 restore_nocheck:
        TRACE_IRQS_IRET
 restore_nocheck_notrace:
-       RESTORE_REGS
-       addl $4, %esp                   # skip orig_eax/error_code
+       RESTORE_REGS 4                  # skip orig_eax/error_code
        CFI_ADJUST_CFA_OFFSET -4
 irq_return:
        INTERRUPT_RETURN
@@ -571,7 +671,7 @@ END(syscall_trace_entry)
        # perform syscall exit tracing
        ALIGN
 syscall_exit_work:
-       testb $_TIF_WORK_SYSCALL_EXIT, %cl
+       testl $_TIF_WORK_SYSCALL_EXIT, %ecx
        jz work_pending
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_ANY)     # could let syscall_trace_leave() call
@@ -595,28 +695,50 @@ syscall_badsys:
 END(syscall_badsys)
        CFI_ENDPROC
 
-#define FIXUP_ESPFIX_STACK \
-       /* since we are on a wrong stack, we cant make it a C code :( */ \
-       PER_CPU(gdt_page, %ebx); \
-       GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
-       addl %esp, %eax; \
-       pushl $__KERNEL_DS; \
-       CFI_ADJUST_CFA_OFFSET 4; \
-       pushl %eax; \
-       CFI_ADJUST_CFA_OFFSET 4; \
-       lss (%esp), %esp; \
-       CFI_ADJUST_CFA_OFFSET -8;
-#define UNWIND_ESPFIX_STACK \
-       movl %ss, %eax; \
-       /* see if on espfix stack */ \
-       cmpw $__ESPFIX_SS, %ax; \
-       jne 27f; \
-       movl $__KERNEL_DS, %eax; \
-       movl %eax, %ds; \
-       movl %eax, %es; \
-       /* switch to normal stack */ \
-       FIXUP_ESPFIX_STACK; \
-27:;
+/*
+ * System calls that need a pt_regs pointer.
+ */
+#define PTREGSCALL(name) \
+       ALIGN; \
+ptregs_##name: \
+       leal 4(%esp),%eax; \
+       jmp sys_##name;
+
+PTREGSCALL(iopl)
+PTREGSCALL(fork)
+PTREGSCALL(clone)
+PTREGSCALL(vfork)
+PTREGSCALL(execve)
+PTREGSCALL(sigaltstack)
+PTREGSCALL(sigreturn)
+PTREGSCALL(rt_sigreturn)
+PTREGSCALL(vm86)
+PTREGSCALL(vm86old)
+
+.macro FIXUP_ESPFIX_STACK
+       /* since we are on a wrong stack, we cant make it a C code :( */
+       PER_CPU(gdt_page, %ebx)
+       GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah)
+       addl %esp, %eax
+       pushl $__KERNEL_DS
+       CFI_ADJUST_CFA_OFFSET 4
+       pushl %eax
+       CFI_ADJUST_CFA_OFFSET 4
+       lss (%esp), %esp
+       CFI_ADJUST_CFA_OFFSET -8
+.endm
+.macro UNWIND_ESPFIX_STACK
+       movl %ss, %eax
+       /* see if on espfix stack */
+       cmpw $__ESPFIX_SS, %ax
+       jne 27f
+       movl $__KERNEL_DS, %eax
+       movl %eax, %ds
+       movl %eax, %es
+       /* switch to normal stack */
+       FIXUP_ESPFIX_STACK
+27:
+.endm
 
 /*
  * Build the entry stubs and pointer table with some assembler magic.
@@ -672,7 +794,7 @@ common_interrupt:
 ENDPROC(common_interrupt)
        CFI_ENDPROC
 
-#define BUILD_INTERRUPT(name, nr)      \
+#define BUILD_INTERRUPT3(name, nr, fn) \
 ENTRY(name)                            \
        RING0_INT_FRAME;                \
        pushl $~(nr);                   \
@@ -680,13 +802,15 @@ ENTRY(name)                               \
        SAVE_ALL;                       \
        TRACE_IRQS_OFF                  \
        movl %esp,%eax;                 \
-       call smp_##name;                \
+       call fn;                        \
        jmp ret_from_intr;              \
        CFI_ENDPROC;                    \
 ENDPROC(name)
 
+#define BUILD_INTERRUPT(name, nr)      BUILD_INTERRUPT3(name, nr, smp_##name)
+
 /* The include is where all of the SMP etc. interrupts come from */
-#include "entry_arch.h"
+#include <asm/entry_arch.h>
 
 ENTRY(coprocessor_error)
        RING0_INT_FRAME
@@ -1068,7 +1192,10 @@ ENTRY(page_fault)
        CFI_ADJUST_CFA_OFFSET 4
        ALIGN
 error_code:
-       /* the function address is in %fs's slot on the stack */
+       /* the function address is in %gs's slot on the stack */
+       pushl %fs
+       CFI_ADJUST_CFA_OFFSET 4
+       /*CFI_REL_OFFSET fs, 0*/
        pushl %es
        CFI_ADJUST_CFA_OFFSET 4
        /*CFI_REL_OFFSET es, 0*/
@@ -1097,20 +1224,15 @@ error_code:
        CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET ebx, 0
        cld
-       pushl %fs
-       CFI_ADJUST_CFA_OFFSET 4
-       /*CFI_REL_OFFSET fs, 0*/
        movl $(__KERNEL_PERCPU), %ecx
        movl %ecx, %fs
        UNWIND_ESPFIX_STACK
-       popl %ecx
-       CFI_ADJUST_CFA_OFFSET -4
-       /*CFI_REGISTER es, ecx*/
-       movl PT_FS(%esp), %edi          # get the function address
+       GS_TO_REG %ecx
+       movl PT_GS(%esp), %edi          # get the function address
        movl PT_ORIG_EAX(%esp), %edx    # get the error code
        movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
-       mov  %ecx, PT_FS(%esp)
-       /*CFI_REL_OFFSET fs, ES*/
+       REG_TO_PTGS %ecx
+       SET_KERNEL_GS %ecx
        movl $(__USER_DS), %ecx
        movl %ecx, %ds
        movl %ecx, %es
@@ -1134,26 +1256,27 @@ END(page_fault)
  * by hand onto the new stack - while updating the return eip past
  * the instruction that would have done it for sysenter.
  */
-#define FIX_STACK(offset, ok, label)           \
-       cmpw $__KERNEL_CS,4(%esp);              \
-       jne ok;                                 \
-label:                                         \
-       movl TSS_sysenter_sp0+offset(%esp),%esp;        \
-       CFI_DEF_CFA esp, 0;                     \
-       CFI_UNDEFINED eip;                      \
-       pushfl;                                 \
-       CFI_ADJUST_CFA_OFFSET 4;                \
-       pushl $__KERNEL_CS;                     \
-       CFI_ADJUST_CFA_OFFSET 4;                \
-       pushl $sysenter_past_esp;               \
-       CFI_ADJUST_CFA_OFFSET 4;                \
+.macro FIX_STACK offset ok label
+       cmpw $__KERNEL_CS, 4(%esp)
+       jne \ok
+\label:
+       movl TSS_sysenter_sp0 + \offset(%esp), %esp
+       CFI_DEF_CFA esp, 0
+       CFI_UNDEFINED eip
+       pushfl
+       CFI_ADJUST_CFA_OFFSET 4
+       pushl $__KERNEL_CS
+       CFI_ADJUST_CFA_OFFSET 4
+       pushl $sysenter_past_esp
+       CFI_ADJUST_CFA_OFFSET 4
        CFI_REL_OFFSET eip, 0
+.endm
 
 ENTRY(debug)
        RING0_INT_FRAME
        cmpl $ia32_sysenter_target,(%esp)
        jne debug_stack_correct
-       FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
+       FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
 debug_stack_correct:
        pushl $-1                       # mark this as an int
        CFI_ADJUST_CFA_OFFSET 4
@@ -1211,7 +1334,7 @@ nmi_stack_correct:
 
 nmi_stack_fixup:
        RING0_INT_FRAME
-       FIX_STACK(12,nmi_stack_correct, 1)
+       FIX_STACK 12, nmi_stack_correct, 1
        jmp nmi_stack_correct
 
 nmi_debug_stack_check:
@@ -1222,7 +1345,7 @@ nmi_debug_stack_check:
        jb nmi_stack_correct
        cmpl $debug_esp_fix_insn,(%esp)
        ja nmi_stack_correct
-       FIX_STACK(24,nmi_stack_correct, 1)
+       FIX_STACK 24, nmi_stack_correct, 1
        jmp nmi_stack_correct
 
 nmi_espfix_stack:
@@ -1234,7 +1357,7 @@ nmi_espfix_stack:
        CFI_ADJUST_CFA_OFFSET 4
        pushl %esp
        CFI_ADJUST_CFA_OFFSET 4
-       addw $4, (%esp)
+       addl $4, (%esp)
        /* copy the iret frame of 12 bytes */
        .rept 3
        pushl 16(%esp)
index a1346217e43c4d6cc636fa80ca70853970e64417..a331ec38af9ebb5d186685623448ac2e5263fd31 100644 (file)
 #include <asm/unistd.h>
 #include <asm/thread_info.h>
 #include <asm/hw_irq.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/irqflags.h>
 #include <asm/paravirt.h>
 #include <asm/ftrace.h>
+#include <asm/percpu.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
@@ -76,20 +77,17 @@ ENTRY(ftrace_caller)
        movq 8(%rbp), %rsi
        subq $MCOUNT_INSN_SIZE, %rdi
 
-.globl ftrace_call
-ftrace_call:
+GLOBAL(ftrace_call)
        call ftrace_stub
 
        MCOUNT_RESTORE_FRAME
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-.globl ftrace_graph_call
-ftrace_graph_call:
+GLOBAL(ftrace_graph_call)
        jmp ftrace_stub
 #endif
 
-.globl ftrace_stub
-ftrace_stub:
+GLOBAL(ftrace_stub)
        retq
 END(ftrace_caller)
 
@@ -109,8 +107,7 @@ ENTRY(mcount)
        jnz ftrace_graph_caller
 #endif
 
-.globl ftrace_stub
-ftrace_stub:
+GLOBAL(ftrace_stub)
        retq
 
 trace:
@@ -147,9 +144,7 @@ ENTRY(ftrace_graph_caller)
        retq
 END(ftrace_graph_caller)
 
-
-.globl return_to_handler
-return_to_handler:
+GLOBAL(return_to_handler)
        subq  $80, %rsp
 
        movq %rax, (%rsp)
@@ -187,6 +182,7 @@ return_to_handler:
 ENTRY(native_usergs_sysret64)
        swapgs
        sysretq
+ENDPROC(native_usergs_sysret64)
 #endif /* CONFIG_PARAVIRT */
 
 
@@ -209,7 +205,7 @@ ENTRY(native_usergs_sysret64)
 
        /* %rsp:at FRAMEEND */
        .macro FIXUP_TOP_OF_STACK tmp offset=0
-       movq %gs:pda_oldrsp,\tmp
+       movq PER_CPU_VAR(old_rsp),\tmp
        movq \tmp,RSP+\offset(%rsp)
        movq $__USER_DS,SS+\offset(%rsp)
        movq $__USER_CS,CS+\offset(%rsp)
@@ -220,7 +216,7 @@ ENTRY(native_usergs_sysret64)
 
        .macro RESTORE_TOP_OF_STACK tmp offset=0
        movq RSP+\offset(%rsp),\tmp
-       movq \tmp,%gs:pda_oldrsp
+       movq \tmp,PER_CPU_VAR(old_rsp)
        movq EFLAGS+\offset(%rsp),\tmp
        movq \tmp,R11+\offset(%rsp)
        .endm
@@ -336,15 +332,15 @@ ENTRY(save_args)
        je 1f
        SWAPGS
        /*
-        * irqcount is used to check if a CPU is already on an interrupt stack
+        * irq_count is used to check if a CPU is already on an interrupt stack
         * or not. While this is essentially redundant with preempt_count it is
         * a little cheaper to use a separate counter in the PDA (short of
         * moving irq_enter into assembly, which would be too much work)
         */
-1:     incl %gs:pda_irqcount
+1:     incl PER_CPU_VAR(irq_count)
        jne 2f
        popq_cfi %rax                   /* move return address... */
-       mov %gs:pda_irqstackptr,%rsp
+       mov PER_CPU_VAR(irq_stack_ptr),%rsp
        EMPTY_FRAME 0
        pushq_cfi %rbp                  /* backlink for unwinder */
        pushq_cfi %rax                  /* ... to the new stack */
@@ -372,6 +368,7 @@ ENTRY(save_rest)
 END(save_rest)
 
 /* save complete stack frame */
+       .pushsection .kprobes.text, "ax"
 ENTRY(save_paranoid)
        XCPT_FRAME 1 RDI+8
        cld
@@ -400,6 +397,7 @@ ENTRY(save_paranoid)
 1:     ret
        CFI_ENDPROC
 END(save_paranoid)
+       .popsection
 
 /*
  * A newly forked process directly context switches into this address.
@@ -409,6 +407,8 @@ END(save_paranoid)
 ENTRY(ret_from_fork)
        DEFAULT_FRAME
 
+       LOCK ; btr $TIF_FORK,TI_flags(%r8)
+
        push kernel_eflags(%rip)
        CFI_ADJUST_CFA_OFFSET 8
        popf                                    # reset kernel eflags
@@ -418,7 +418,6 @@ ENTRY(ret_from_fork)
 
        GET_THREAD_INFO(%rcx)
 
-       CFI_REMEMBER_STATE
        RESTORE_REST
 
        testl $3, CS-ARGOFFSET(%rsp)            # from kernel_thread?
@@ -430,7 +429,6 @@ ENTRY(ret_from_fork)
        RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
        jmp ret_from_sys_call                   # go to the SYSRET fastpath
 
-       CFI_RESTORE_STATE
        CFI_ENDPROC
 END(ret_from_fork)
 
@@ -468,7 +466,7 @@ END(ret_from_fork)
 ENTRY(system_call)
        CFI_STARTPROC   simple
        CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,PDA_STACKOFFSET
+       CFI_DEF_CFA     rsp,KERNEL_STACK_OFFSET
        CFI_REGISTER    rip,rcx
        /*CFI_REGISTER  rflags,r11*/
        SWAPGS_UNSAFE_STACK
@@ -479,8 +477,8 @@ ENTRY(system_call)
         */
 ENTRY(system_call_after_swapgs)
 
-       movq    %rsp,%gs:pda_oldrsp
-       movq    %gs:pda_kernelstack,%rsp
+       movq    %rsp,PER_CPU_VAR(old_rsp)
+       movq    PER_CPU_VAR(kernel_stack),%rsp
        /*
         * No need to follow this irqs off/on section - it's straight
         * and short:
@@ -523,7 +521,7 @@ sysret_check:
        CFI_REGISTER    rip,rcx
        RESTORE_ARGS 0,-ARG_SKIP,1
        /*CFI_REGISTER  rflags,r11*/
-       movq    %gs:pda_oldrsp, %rsp
+       movq    PER_CPU_VAR(old_rsp), %rsp
        USERGS_SYSRET64
 
        CFI_RESTORE_STATE
@@ -630,16 +628,14 @@ tracesys:
  * Syscall return path ending with IRET.
  * Has correct top of stack, but partial stack frame.
  */
-       .globl int_ret_from_sys_call
-       .globl int_with_check
-int_ret_from_sys_call:
+GLOBAL(int_ret_from_sys_call)
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
        testl $3,CS-ARGOFFSET(%rsp)
        je retint_restore_args
        movl $_TIF_ALLWORK_MASK,%edi
        /* edi: mask to check */
-int_with_check:
+GLOBAL(int_with_check)
        LOCKDEP_SYS_EXIT_IRQ
        GET_THREAD_INFO(%rcx)
        movl TI_flags(%rcx),%edx
@@ -833,11 +829,11 @@ common_interrupt:
        XCPT_FRAME
        addq $-0x80,(%rsp)              /* Adjust vector to [-256,-1] range */
        interrupt do_IRQ
-       /* 0(%rsp): oldrsp-ARGOFFSET */
+       /* 0(%rsp): old_rsp-ARGOFFSET */
 ret_from_intr:
        DISABLE_INTERRUPTS(CLBR_NONE)
        TRACE_IRQS_OFF
-       decl %gs:pda_irqcount
+       decl PER_CPU_VAR(irq_count)
        leaveq
        CFI_DEF_CFA_REGISTER    rsp
        CFI_ADJUST_CFA_OFFSET   -8
@@ -982,10 +978,14 @@ apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
        irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
 #endif
 
+#ifdef CONFIG_X86_UV
 apicinterrupt UV_BAU_MESSAGE \
        uv_bau_message_intr1 uv_bau_message_interrupt
+#endif
 apicinterrupt LOCAL_TIMER_VECTOR \
        apic_timer_interrupt smp_apic_timer_interrupt
+apicinterrupt GENERIC_INTERRUPT_VECTOR \
+       generic_interrupt smp_generic_interrupt
 
 #ifdef CONFIG_SMP
 apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
@@ -1073,10 +1073,10 @@ ENTRY(\sym)
        TRACE_IRQS_OFF
        movq %rsp,%rdi          /* pt_regs pointer */
        xorl %esi,%esi          /* no error code */
-       movq %gs:pda_data_offset, %rbp
-       subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+       PER_CPU(init_tss, %rbp)
+       subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
        call \do_sym
-       addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
+       addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
        jmp paranoid_exit       /* %ebx: no swapgs flag */
        CFI_ENDPROC
 END(\sym)
@@ -1138,7 +1138,7 @@ ENTRY(native_load_gs_index)
        CFI_STARTPROC
        pushf
        CFI_ADJUST_CFA_OFFSET 8
-       DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
+       DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
        SWAPGS
 gs_change:
        movl %edi,%gs
@@ -1260,14 +1260,14 @@ ENTRY(call_softirq)
        CFI_REL_OFFSET rbp,0
        mov  %rsp,%rbp
        CFI_DEF_CFA_REGISTER rbp
-       incl %gs:pda_irqcount
-       cmove %gs:pda_irqstackptr,%rsp
+       incl PER_CPU_VAR(irq_count)
+       cmove PER_CPU_VAR(irq_stack_ptr),%rsp
        push  %rbp                      # backlink for old unwinder
        call __do_softirq
        leaveq
        CFI_DEF_CFA_REGISTER    rsp
        CFI_ADJUST_CFA_OFFSET   -8
-       decl %gs:pda_irqcount
+       decl PER_CPU_VAR(irq_count)
        ret
        CFI_ENDPROC
 END(call_softirq)
@@ -1297,15 +1297,15 @@ ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
        movq %rdi, %rsp            # we don't return, adjust the stack frame
        CFI_ENDPROC
        DEFAULT_FRAME
-11:    incl %gs:pda_irqcount
+11:    incl PER_CPU_VAR(irq_count)
        movq %rsp,%rbp
        CFI_DEF_CFA_REGISTER rbp
-       cmovzq %gs:pda_irqstackptr,%rsp
+       cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
        pushq %rbp                      # backlink for old unwinder
        call xen_evtchn_do_upcall
        popq %rsp
        CFI_DEF_CFA_REGISTER rsp
-       decl %gs:pda_irqcount
+       decl PER_CPU_VAR(irq_count)
        jmp  error_exit
        CFI_ENDPROC
 END(do_hypervisor_callback)
diff --git a/arch/x86/kernel/es7000_32.c b/arch/x86/kernel/es7000_32.c
deleted file mode 100644 (file)
index 53699c9..0000000
+++ /dev/null
@@ -1,378 +0,0 @@
-/*
- * Written by: Garry Forsgren, Unisys Corporation
- *             Natalie Protasevich, Unisys Corporation
- * This file contains the code to configure and interface
- * with Unisys ES7000 series hardware system manager.
- *
- * Copyright (c) 2003 Unisys Corporation.  All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write the Free Software Foundation, Inc., 59
- * Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Contact information: Unisys Corporation, Township Line & Union Meeting
- * Roads-A, Unisys Way, Blue Bell, Pennsylvania, 19424, or:
- *
- * http://www.unisys.com
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <asm/io.h>
-#include <asm/nmi.h>
-#include <asm/smp.h>
-#include <asm/atomic.h>
-#include <asm/apicdef.h>
-#include <mach_mpparse.h>
-#include <asm/genapic.h>
-#include <asm/setup.h>
-
-/*
- * ES7000 chipsets
- */
-
-#define NON_UNISYS             0
-#define ES7000_CLASSIC         1
-#define ES7000_ZORRO           2
-
-
-#define        MIP_REG                 1
-#define        MIP_PSAI_REG            4
-
-#define        MIP_BUSY                1
-#define        MIP_SPIN                0xf0000
-#define        MIP_VALID               0x0100000000000000ULL
-#define        MIP_PORT(VALUE) ((VALUE >> 32) & 0xffff)
-
-#define        MIP_RD_LO(VALUE)        (VALUE & 0xffffffff)
-
-struct mip_reg_info {
-       unsigned long long mip_info;
-       unsigned long long delivery_info;
-       unsigned long long host_reg;
-       unsigned long long mip_reg;
-};
-
-struct part_info {
-       unsigned char type;
-       unsigned char length;
-       unsigned char part_id;
-       unsigned char apic_mode;
-       unsigned long snum;
-       char ptype[16];
-       char sname[64];
-       char pname[64];
-};
-
-struct psai {
-       unsigned long long entry_type;
-       unsigned long long addr;
-       unsigned long long bep_addr;
-};
-
-struct es7000_mem_info {
-       unsigned char type;
-       unsigned char length;
-       unsigned char resv[6];
-       unsigned long long  start;
-       unsigned long long  size;
-};
-
-struct es7000_oem_table {
-       unsigned long long hdr;
-       struct mip_reg_info mip;
-       struct part_info pif;
-       struct es7000_mem_info shm;
-       struct psai psai;
-};
-
-#ifdef CONFIG_ACPI
-
-struct oem_table {
-       struct acpi_table_header Header;
-       u32 OEMTableAddr;
-       u32 OEMTableSize;
-};
-
-extern int find_unisys_acpi_oem_table(unsigned long *oem_addr);
-extern void unmap_unisys_acpi_oem_table(unsigned long oem_addr);
-#endif
-
-struct mip_reg {
-       unsigned long long off_0;
-       unsigned long long off_8;
-       unsigned long long off_10;
-       unsigned long long off_18;
-       unsigned long long off_20;
-       unsigned long long off_28;
-       unsigned long long off_30;
-       unsigned long long off_38;
-};
-
-#define        MIP_SW_APIC             0x1020b
-#define        MIP_FUNC(VALUE)         (VALUE & 0xff)
-
-/*
- * ES7000 Globals
- */
-
-static volatile unsigned long  *psai = NULL;
-static struct mip_reg          *mip_reg;
-static struct mip_reg          *host_reg;
-static int                     mip_port;
-static unsigned long           mip_addr, host_addr;
-
-int es7000_plat;
-
-/*
- * GSI override for ES7000 platforms.
- */
-
-static unsigned int base;
-
-static int
-es7000_rename_gsi(int ioapic, int gsi)
-{
-       if (es7000_plat == ES7000_ZORRO)
-               return gsi;
-
-       if (!base) {
-               int i;
-               for (i = 0; i < nr_ioapics; i++)
-                       base += nr_ioapic_registers[i];
-       }
-
-       if (!ioapic && (gsi < 16))
-               gsi += base;
-       return gsi;
-}
-
-static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
-{
-       unsigned long vect = 0, psaival = 0;
-
-       if (psai == NULL)
-               return -1;
-
-       vect = ((unsigned long)__pa(eip)/0x1000) << 16;
-       psaival = (0x1000000 | vect | cpu);
-
-       while (*psai & 0x1000000)
-               ;
-
-       *psai = psaival;
-
-       return 0;
-}
-
-static void noop_wait_for_deassert(atomic_t *deassert_not_used)
-{
-}
-
-static int __init es7000_update_genapic(void)
-{
-       genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
-
-       /* MPENTIUMIII */
-       if (boot_cpu_data.x86 == 6 &&
-           (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) {
-               es7000_update_genapic_to_cluster();
-               genapic->wait_for_init_deassert = noop_wait_for_deassert;
-               genapic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
-       }
-
-       return 0;
-}
-
-void __init
-setup_unisys(void)
-{
-       /*
-        * Determine the generation of the ES7000 currently running.
-        *
-        * es7000_plat = 1 if the machine is a 5xx ES7000 box
-        * es7000_plat = 2 if the machine is a x86_64 ES7000 box
-        *
-        */
-       if (!(boot_cpu_data.x86 <= 15 && boot_cpu_data.x86_model <= 2))
-               es7000_plat = ES7000_ZORRO;
-       else
-               es7000_plat = ES7000_CLASSIC;
-       ioapic_renumber_irq = es7000_rename_gsi;
-
-       x86_quirks->update_genapic = es7000_update_genapic;
-}
-
-/*
- * Parse the OEM Table
- */
-
-int __init
-parse_unisys_oem (char *oemptr)
-{
-       int                     i;
-       int                     success = 0;
-       unsigned char           type, size;
-       unsigned long           val;
-       char                    *tp = NULL;
-       struct psai             *psaip = NULL;
-       struct mip_reg_info     *mi;
-       struct mip_reg          *host, *mip;
-
-       tp = oemptr;
-
-       tp += 8;
-
-       for (i=0; i <= 6; i++) {
-               type = *tp++;
-               size = *tp++;
-               tp -= 2;
-               switch (type) {
-               case MIP_REG:
-                       mi = (struct mip_reg_info *)tp;
-                       val = MIP_RD_LO(mi->host_reg);
-                       host_addr = val;
-                       host = (struct mip_reg *)val;
-                       host_reg = __va(host);
-                       val = MIP_RD_LO(mi->mip_reg);
-                       mip_port = MIP_PORT(mi->mip_info);
-                       mip_addr = val;
-                       mip = (struct mip_reg *)val;
-                       mip_reg = __va(mip);
-                       pr_debug("es7000_mipcfg: host_reg = 0x%lx \n",
-                                (unsigned long)host_reg);
-                       pr_debug("es7000_mipcfg: mip_reg = 0x%lx \n",
-                                (unsigned long)mip_reg);
-                       success++;
-                       break;
-               case MIP_PSAI_REG:
-                       psaip = (struct psai *)tp;
-                       if (tp != NULL) {
-                               if (psaip->addr)
-                                       psai = __va(psaip->addr);
-                               else
-                                       psai = NULL;
-                               success++;
-                       }
-                       break;
-               default:
-                       break;
-               }
-               tp += size;
-       }
-
-       if (success < 2) {
-               es7000_plat = NON_UNISYS;
-       } else
-               setup_unisys();
-       return es7000_plat;
-}
-
-#ifdef CONFIG_ACPI
-static unsigned long oem_addrX;
-static unsigned long oem_size;
-int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
-{
-       struct acpi_table_header *header = NULL;
-       int i = 0;
-
-       while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
-               if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
-                       struct oem_table *t = (struct oem_table *)header;
-
-                       oem_addrX = t->OEMTableAddr;
-                       oem_size = t->OEMTableSize;
-
-                       *oem_addr = (unsigned long)__acpi_map_table(oem_addrX,
-                                                                   oem_size);
-                       return 0;
-               }
-       }
-       return -1;
-}
-
-void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr)
-{
-}
-#endif
-
-static void
-es7000_spin(int n)
-{
-       int i = 0;
-
-       while (i++ < n)
-               rep_nop();
-}
-
-static int __init
-es7000_mip_write(struct mip_reg *mip_reg)
-{
-       int                     status = 0;
-       int                     spin;
-
-       spin = MIP_SPIN;
-       while (((unsigned long long)host_reg->off_38 &
-               (unsigned long long)MIP_VALID) != 0) {
-                       if (--spin <= 0) {
-                               printk("es7000_mip_write: Timeout waiting for Host Valid Flag");
-                               return -1;
-                       }
-               es7000_spin(MIP_SPIN);
-       }
-
-       memcpy(host_reg, mip_reg, sizeof(struct mip_reg));
-       outb(1, mip_port);
-
-       spin = MIP_SPIN;
-
-       while (((unsigned long long)mip_reg->off_38 &
-               (unsigned long long)MIP_VALID) == 0) {
-               if (--spin <= 0) {
-                       printk("es7000_mip_write: Timeout waiting for MIP Valid Flag");
-                       return -1;
-               }
-               es7000_spin(MIP_SPIN);
-       }
-
-       status = ((unsigned long long)mip_reg->off_0 &
-               (unsigned long long)0xffff0000000000ULL) >> 48;
-       mip_reg->off_38 = ((unsigned long long)mip_reg->off_38 &
-               (unsigned long long)~MIP_VALID);
-       return status;
-}
-
-void __init
-es7000_sw_apic(void)
-{
-       if (es7000_plat) {
-               int mip_status;
-               struct mip_reg es7000_mip_reg;
-
-               printk("ES7000: Enabling APIC mode.\n");
-               memset(&es7000_mip_reg, 0, sizeof(struct mip_reg));
-               es7000_mip_reg.off_0 = MIP_SW_APIC;
-               es7000_mip_reg.off_38 = (MIP_VALID);
-               while ((mip_status = es7000_mip_write(&es7000_mip_reg)) != 0)
-                       printk("es7000_sw_apic: command failed, status = %x\n",
-                               mip_status);
-               return;
-       }
-}
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
deleted file mode 100644 (file)
index 2bced78..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Generic APIC sub-arch probe layer.
- *
- * Hacked for x86-64 by James Cleverdon from i386 architecture code by
- * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
- * James Cleverdon.
- */
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/hardirq.h>
-#include <linux/dmar.h>
-
-#include <asm/smp.h>
-#include <asm/ipi.h>
-#include <asm/genapic.h>
-#include <asm/setup.h>
-
-extern struct genapic apic_flat;
-extern struct genapic apic_physflat;
-extern struct genapic apic_x2xpic_uv_x;
-extern struct genapic apic_x2apic_phys;
-extern struct genapic apic_x2apic_cluster;
-
-struct genapic __read_mostly *genapic = &apic_flat;
-
-static struct genapic *apic_probe[] __initdata = {
-       &apic_x2apic_uv_x,
-       &apic_x2apic_phys,
-       &apic_x2apic_cluster,
-       &apic_physflat,
-       NULL,
-};
-
-/*
- * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
- */
-void __init setup_apic_routing(void)
-{
-       if (genapic == &apic_x2apic_phys || genapic == &apic_x2apic_cluster) {
-               if (!intr_remapping_enabled)
-                       genapic = &apic_flat;
-       }
-
-       if (genapic == &apic_flat) {
-               if (max_physical_apicid >= 8)
-                       genapic = &apic_physflat;
-               printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
-       }
-
-       if (x86_quirks->update_genapic)
-               x86_quirks->update_genapic();
-}
-
-/* Same for both flat and physical. */
-
-void apic_send_IPI_self(int vector)
-{
-       __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
-}
-
-int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       int i;
-
-       for (i = 0; apic_probe[i]; ++i) {
-               if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
-                       genapic = apic_probe[i];
-                       printk(KERN_INFO "Setting APIC routing to %s.\n",
-                               genapic->name);
-                       return 1;
-               }
-       }
-       return 0;
-}
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
deleted file mode 100644 (file)
index 3418548..0000000
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * Copyright 2004 James Cleverdon, IBM.
- * Subject to the GNU Public License, v.2
- *
- * Flat APIC subarch code.
- *
- * Hacked for x86-64 by James Cleverdon from i386 architecture code by
- * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
- * James Cleverdon.
- */
-#include <linux/errno.h>
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/hardirq.h>
-#include <asm/smp.h>
-#include <asm/ipi.h>
-#include <asm/genapic.h>
-#include <mach_apicdef.h>
-
-#ifdef CONFIG_ACPI
-#include <acpi/acpi_bus.h>
-#endif
-
-static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       return 1;
-}
-
-static const struct cpumask *flat_target_cpus(void)
-{
-       return cpu_online_mask;
-}
-
-static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
-       /* Careful. Some cpus do not strictly honor the set of cpus
-        * specified in the interrupt destination when using lowest
-        * priority interrupt delivery mode.
-        *
-        * In particular there was a hyperthreading cpu observed to
-        * deliver interrupts to the wrong hyperthread when only one
-        * hyperthread was specified in the interrupt desitination.
-        */
-       cpumask_clear(retmask);
-       cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
-}
-
-/*
- * Set up the logical destination ID.
- *
- * Intel recommends to set DFR, LDR and TPR before enabling
- * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
- * document number 292116).  So here it goes...
- */
-static void flat_init_apic_ldr(void)
-{
-       unsigned long val;
-       unsigned long num, id;
-
-       num = smp_processor_id();
-       id = 1UL << num;
-       apic_write(APIC_DFR, APIC_DFR_FLAT);
-       val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
-       val |= SET_APIC_LOGICAL_ID(id);
-       apic_write(APIC_LDR, val);
-}
-
-static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __send_IPI_dest_field(mask, vector, APIC_DEST_LOGICAL);
-       local_irq_restore(flags);
-}
-
-static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
-{
-       unsigned long mask = cpumask_bits(cpumask)[0];
-
-       _flat_send_IPI_mask(mask, vector);
-}
-
-static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
-                                         int vector)
-{
-       unsigned long mask = cpumask_bits(cpumask)[0];
-       int cpu = smp_processor_id();
-
-       if (cpu < BITS_PER_LONG)
-               clear_bit(cpu, &mask);
-       _flat_send_IPI_mask(mask, vector);
-}
-
-static void flat_send_IPI_allbutself(int vector)
-{
-       int cpu = smp_processor_id();
-#ifdef CONFIG_HOTPLUG_CPU
-       int hotplug = 1;
-#else
-       int hotplug = 0;
-#endif
-       if (hotplug || vector == NMI_VECTOR) {
-               if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
-                       unsigned long mask = cpumask_bits(cpu_online_mask)[0];
-
-                       if (cpu < BITS_PER_LONG)
-                               clear_bit(cpu, &mask);
-
-                       _flat_send_IPI_mask(mask, vector);
-               }
-       } else if (num_online_cpus() > 1) {
-               __send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
-       }
-}
-
-static void flat_send_IPI_all(int vector)
-{
-       if (vector == NMI_VECTOR)
-               flat_send_IPI_mask(cpu_online_mask, vector);
-       else
-               __send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
-}
-
-static unsigned int get_apic_id(unsigned long x)
-{
-       unsigned int id;
-
-       id = (((x)>>24) & 0xFFu);
-       return id;
-}
-
-static unsigned long set_apic_id(unsigned int id)
-{
-       unsigned long x;
-
-       x = ((id & 0xFFu)<<24);
-       return x;
-}
-
-static unsigned int read_xapic_id(void)
-{
-       unsigned int id;
-
-       id = get_apic_id(apic_read(APIC_ID));
-       return id;
-}
-
-static int flat_apic_id_registered(void)
-{
-       return physid_isset(read_xapic_id(), phys_cpu_present_map);
-}
-
-static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
-       return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
-}
-
-static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                               const struct cpumask *andmask)
-{
-       unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
-       unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
-
-       return mask1 & mask2;
-}
-
-static unsigned int phys_pkg_id(int index_msb)
-{
-       return hard_smp_processor_id() >> index_msb;
-}
-
-struct genapic apic_flat =  {
-       .name = "flat",
-       .acpi_madt_oem_check = flat_acpi_madt_oem_check,
-       .int_delivery_mode = dest_LowestPrio,
-       .int_dest_mode = (APIC_DEST_LOGICAL != 0),
-       .target_cpus = flat_target_cpus,
-       .vector_allocation_domain = flat_vector_allocation_domain,
-       .apic_id_registered = flat_apic_id_registered,
-       .init_apic_ldr = flat_init_apic_ldr,
-       .send_IPI_all = flat_send_IPI_all,
-       .send_IPI_allbutself = flat_send_IPI_allbutself,
-       .send_IPI_mask = flat_send_IPI_mask,
-       .send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
-       .send_IPI_self = apic_send_IPI_self,
-       .cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFu<<24),
-};
-
-/*
- * Physflat mode is used when there are more than 8 CPUs on a AMD system.
- * We cannot use logical delivery in this case because the mask
- * overflows, so use physical mode.
- */
-static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-#ifdef CONFIG_ACPI
-       /*
-        * Quirk: some x86_64 machines can only use physical APIC mode
-        * regardless of how many processors are present (x86_64 ES7000
-        * is an example).
-        */
-       if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
-               (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
-               printk(KERN_DEBUG "system APIC only can use physical flat");
-               return 1;
-       }
-#endif
-
-       return 0;
-}
-
-static const struct cpumask *physflat_target_cpus(void)
-{
-       return cpu_online_mask;
-}
-
-static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
-       cpumask_clear(retmask);
-       cpumask_set_cpu(cpu, retmask);
-}
-
-static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
-{
-       send_IPI_mask_sequence(cpumask, vector);
-}
-
-static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
-                                             int vector)
-{
-       send_IPI_mask_allbutself(cpumask, vector);
-}
-
-static void physflat_send_IPI_allbutself(int vector)
-{
-       send_IPI_mask_allbutself(cpu_online_mask, vector);
-}
-
-static void physflat_send_IPI_all(int vector)
-{
-       physflat_send_IPI_mask(cpu_online_mask, vector);
-}
-
-static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
-       int cpu;
-
-       /*
-        * We're using fixed IRQ delivery, can only return one phys APIC ID.
-        * May as well be the first.
-        */
-       cpu = cpumask_first(cpumask);
-       if ((unsigned)cpu < nr_cpu_ids)
-               return per_cpu(x86_cpu_to_apicid, cpu);
-       else
-               return BAD_APICID;
-}
-
-static unsigned int
-physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                               const struct cpumask *andmask)
-{
-       int cpu;
-
-       /*
-        * We're using fixed IRQ delivery, can only return one phys APIC ID.
-        * May as well be the first.
-        */
-       for_each_cpu_and(cpu, cpumask, andmask)
-               if (cpumask_test_cpu(cpu, cpu_online_mask))
-                       break;
-       if (cpu < nr_cpu_ids)
-               return per_cpu(x86_cpu_to_apicid, cpu);
-       return BAD_APICID;
-}
-
-struct genapic apic_physflat =  {
-       .name = "physical flat",
-       .acpi_madt_oem_check = physflat_acpi_madt_oem_check,
-       .int_delivery_mode = dest_Fixed,
-       .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
-       .target_cpus = physflat_target_cpus,
-       .vector_allocation_domain = physflat_vector_allocation_domain,
-       .apic_id_registered = flat_apic_id_registered,
-       .init_apic_ldr = flat_init_apic_ldr,/*not needed, but shouldn't hurt*/
-       .send_IPI_all = physflat_send_IPI_all,
-       .send_IPI_allbutself = physflat_send_IPI_allbutself,
-       .send_IPI_mask = physflat_send_IPI_mask,
-       .send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
-       .send_IPI_self = apic_send_IPI_self,
-       .cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFu<<24),
-};
diff --git a/arch/x86/kernel/genx2apic_cluster.c b/arch/x86/kernel/genx2apic_cluster.c
deleted file mode 100644 (file)
index 6ce497c..0000000
+++ /dev/null
@@ -1,198 +0,0 @@
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/dmar.h>
-
-#include <asm/smp.h>
-#include <asm/ipi.h>
-#include <asm/genapic.h>
-
-DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
-
-static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       if (cpu_has_x2apic)
-               return 1;
-
-       return 0;
-}
-
-/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
-
-static const struct cpumask *x2apic_target_cpus(void)
-{
-       return cpumask_of(0);
-}
-
-/*
- * for now each logical cpu is in its own vector allocation domain.
- */
-static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
-       cpumask_clear(retmask);
-       cpumask_set_cpu(cpu, retmask);
-}
-
-static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
-                                  unsigned int dest)
-{
-       unsigned long cfg;
-
-       cfg = __prepare_ICR(0, vector, dest);
-
-       /*
-        * send the IPI.
-        */
-       x2apic_icr_write(cfg, apicid);
-}
-
-/*
- * for now, we send the IPI's one by one in the cpumask.
- * TBD: Based on the cpu mask, we can send the IPI's to the cluster group
- * at once. We have 16 cpu's in a cluster. This will minimize IPI register
- * writes.
- */
-static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       unsigned long flags;
-       unsigned long query_cpu;
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
-               __x2apic_send_IPI_dest(
-                       per_cpu(x86_cpu_to_logical_apicid, query_cpu),
-                       vector, APIC_DEST_LOGICAL);
-       local_irq_restore(flags);
-}
-
-static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
-                                           int vector)
-{
-       unsigned long flags;
-       unsigned long query_cpu;
-       unsigned long this_cpu = smp_processor_id();
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
-               if (query_cpu != this_cpu)
-                       __x2apic_send_IPI_dest(
-                               per_cpu(x86_cpu_to_logical_apicid, query_cpu),
-                               vector, APIC_DEST_LOGICAL);
-       local_irq_restore(flags);
-}
-
-static void x2apic_send_IPI_allbutself(int vector)
-{
-       unsigned long flags;
-       unsigned long query_cpu;
-       unsigned long this_cpu = smp_processor_id();
-
-       local_irq_save(flags);
-       for_each_online_cpu(query_cpu)
-               if (query_cpu != this_cpu)
-                       __x2apic_send_IPI_dest(
-                               per_cpu(x86_cpu_to_logical_apicid, query_cpu),
-                               vector, APIC_DEST_LOGICAL);
-       local_irq_restore(flags);
-}
-
-static void x2apic_send_IPI_all(int vector)
-{
-       x2apic_send_IPI_mask(cpu_online_mask, vector);
-}
-
-static int x2apic_apic_id_registered(void)
-{
-       return 1;
-}
-
-static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
-       int cpu;
-
-       /*
-        * We're using fixed IRQ delivery, can only return one logical APIC ID.
-        * May as well be the first.
-        */
-       cpu = cpumask_first(cpumask);
-       if ((unsigned)cpu < nr_cpu_ids)
-               return per_cpu(x86_cpu_to_logical_apicid, cpu);
-       else
-               return BAD_APICID;
-}
-
-static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                                 const struct cpumask *andmask)
-{
-       int cpu;
-
-       /*
-        * We're using fixed IRQ delivery, can only return one logical APIC ID.
-        * May as well be the first.
-        */
-       for_each_cpu_and(cpu, cpumask, andmask)
-               if (cpumask_test_cpu(cpu, cpu_online_mask))
-                       break;
-       if (cpu < nr_cpu_ids)
-               return per_cpu(x86_cpu_to_logical_apicid, cpu);
-       return BAD_APICID;
-}
-
-static unsigned int get_apic_id(unsigned long x)
-{
-       unsigned int id;
-
-       id = x;
-       return id;
-}
-
-static unsigned long set_apic_id(unsigned int id)
-{
-       unsigned long x;
-
-       x = id;
-       return x;
-}
-
-static unsigned int phys_pkg_id(int index_msb)
-{
-       return current_cpu_data.initial_apicid >> index_msb;
-}
-
-static void x2apic_send_IPI_self(int vector)
-{
-       apic_write(APIC_SELF_IPI, vector);
-}
-
-static void init_x2apic_ldr(void)
-{
-       int cpu = smp_processor_id();
-
-       per_cpu(x86_cpu_to_logical_apicid, cpu) = apic_read(APIC_LDR);
-       return;
-}
-
-struct genapic apic_x2apic_cluster = {
-       .name = "cluster x2apic",
-       .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
-       .int_delivery_mode = dest_LowestPrio,
-       .int_dest_mode = (APIC_DEST_LOGICAL != 0),
-       .target_cpus = x2apic_target_cpus,
-       .vector_allocation_domain = x2apic_vector_allocation_domain,
-       .apic_id_registered = x2apic_apic_id_registered,
-       .init_apic_ldr = init_x2apic_ldr,
-       .send_IPI_all = x2apic_send_IPI_all,
-       .send_IPI_allbutself = x2apic_send_IPI_allbutself,
-       .send_IPI_mask = x2apic_send_IPI_mask,
-       .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
-       .send_IPI_self = x2apic_send_IPI_self,
-       .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFFFFFFFu),
-};
diff --git a/arch/x86/kernel/genx2apic_phys.c b/arch/x86/kernel/genx2apic_phys.c
deleted file mode 100644 (file)
index 21bcc0e..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/dmar.h>
-
-#include <asm/smp.h>
-#include <asm/ipi.h>
-#include <asm/genapic.h>
-
-static int x2apic_phys;
-
-static int set_x2apic_phys_mode(char *arg)
-{
-       x2apic_phys = 1;
-       return 0;
-}
-early_param("x2apic_phys", set_x2apic_phys_mode);
-
-static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       if (cpu_has_x2apic && x2apic_phys)
-               return 1;
-
-       return 0;
-}
-
-/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
-
-static const struct cpumask *x2apic_target_cpus(void)
-{
-       return cpumask_of(0);
-}
-
-static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
-       cpumask_clear(retmask);
-       cpumask_set_cpu(cpu, retmask);
-}
-
-static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
-                                  unsigned int dest)
-{
-       unsigned long cfg;
-
-       cfg = __prepare_ICR(0, vector, dest);
-
-       /*
-        * send the IPI.
-        */
-       x2apic_icr_write(cfg, apicid);
-}
-
-static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       unsigned long flags;
-       unsigned long query_cpu;
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask) {
-               __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
-                                      vector, APIC_DEST_PHYSICAL);
-       }
-       local_irq_restore(flags);
-}
-
-static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
-                                           int vector)
-{
-       unsigned long flags;
-       unsigned long query_cpu;
-       unsigned long this_cpu = smp_processor_id();
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask) {
-               if (query_cpu != this_cpu)
-                       __x2apic_send_IPI_dest(
-                               per_cpu(x86_cpu_to_apicid, query_cpu),
-                               vector, APIC_DEST_PHYSICAL);
-       }
-       local_irq_restore(flags);
-}
-
-static void x2apic_send_IPI_allbutself(int vector)
-{
-       unsigned long flags;
-       unsigned long query_cpu;
-       unsigned long this_cpu = smp_processor_id();
-
-       local_irq_save(flags);
-       for_each_online_cpu(query_cpu)
-               if (query_cpu != this_cpu)
-                       __x2apic_send_IPI_dest(
-                               per_cpu(x86_cpu_to_apicid, query_cpu),
-                               vector, APIC_DEST_PHYSICAL);
-       local_irq_restore(flags);
-}
-
-static void x2apic_send_IPI_all(int vector)
-{
-       x2apic_send_IPI_mask(cpu_online_mask, vector);
-}
-
-static int x2apic_apic_id_registered(void)
-{
-       return 1;
-}
-
-static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
-       int cpu;
-
-       /*
-        * We're using fixed IRQ delivery, can only return one phys APIC ID.
-        * May as well be the first.
-        */
-       cpu = cpumask_first(cpumask);
-       if ((unsigned)cpu < nr_cpu_ids)
-               return per_cpu(x86_cpu_to_apicid, cpu);
-       else
-               return BAD_APICID;
-}
-
-static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                                 const struct cpumask *andmask)
-{
-       int cpu;
-
-       /*
-        * We're using fixed IRQ delivery, can only return one phys APIC ID.
-        * May as well be the first.
-        */
-       for_each_cpu_and(cpu, cpumask, andmask)
-               if (cpumask_test_cpu(cpu, cpu_online_mask))
-                       break;
-       if (cpu < nr_cpu_ids)
-               return per_cpu(x86_cpu_to_apicid, cpu);
-       return BAD_APICID;
-}
-
-static unsigned int get_apic_id(unsigned long x)
-{
-       unsigned int id;
-
-       id = x;
-       return id;
-}
-
-static unsigned long set_apic_id(unsigned int id)
-{
-       unsigned long x;
-
-       x = id;
-       return x;
-}
-
-static unsigned int phys_pkg_id(int index_msb)
-{
-       return current_cpu_data.initial_apicid >> index_msb;
-}
-
-static void x2apic_send_IPI_self(int vector)
-{
-       apic_write(APIC_SELF_IPI, vector);
-}
-
-static void init_x2apic_ldr(void)
-{
-       return;
-}
-
-struct genapic apic_x2apic_phys = {
-       .name = "physical x2apic",
-       .acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
-       .int_delivery_mode = dest_Fixed,
-       .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
-       .target_cpus = x2apic_target_cpus,
-       .vector_allocation_domain = x2apic_vector_allocation_domain,
-       .apic_id_registered = x2apic_apic_id_registered,
-       .init_apic_ldr = init_x2apic_ldr,
-       .send_IPI_all = x2apic_send_IPI_all,
-       .send_IPI_allbutself = x2apic_send_IPI_allbutself,
-       .send_IPI_mask = x2apic_send_IPI_mask,
-       .send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
-       .send_IPI_self = x2apic_send_IPI_self,
-       .cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFFFFFFFu),
-};
diff --git a/arch/x86/kernel/genx2apic_uv_x.c b/arch/x86/kernel/genx2apic_uv_x.c
deleted file mode 100644 (file)
index b193e08..0000000
+++ /dev/null
@@ -1,600 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * SGI UV APIC functions (note: not an Intel compatible APIC)
- *
- * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
- */
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/cpu.h>
-#include <linux/cpumask.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/module.h>
-#include <linux/hardirq.h>
-#include <linux/timer.h>
-#include <linux/proc_fs.h>
-#include <asm/current.h>
-#include <asm/smp.h>
-#include <asm/ipi.h>
-#include <asm/genapic.h>
-#include <asm/pgtable.h>
-#include <asm/uv/uv_mmrs.h>
-#include <asm/uv/uv_hub.h>
-#include <asm/uv/bios.h>
-
-DEFINE_PER_CPU(int, x2apic_extra_bits);
-
-static enum uv_system_type uv_system_type;
-
-static int uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       if (!strcmp(oem_id, "SGI")) {
-               if (!strcmp(oem_table_id, "UVL"))
-                       uv_system_type = UV_LEGACY_APIC;
-               else if (!strcmp(oem_table_id, "UVX"))
-                       uv_system_type = UV_X2APIC;
-               else if (!strcmp(oem_table_id, "UVH")) {
-                       uv_system_type = UV_NON_UNIQUE_APIC;
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-enum uv_system_type get_uv_system_type(void)
-{
-       return uv_system_type;
-}
-
-int is_uv_system(void)
-{
-       return uv_system_type != UV_NONE;
-}
-EXPORT_SYMBOL_GPL(is_uv_system);
-
-DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
-EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info);
-
-struct uv_blade_info *uv_blade_info;
-EXPORT_SYMBOL_GPL(uv_blade_info);
-
-short *uv_node_to_blade;
-EXPORT_SYMBOL_GPL(uv_node_to_blade);
-
-short *uv_cpu_to_blade;
-EXPORT_SYMBOL_GPL(uv_cpu_to_blade);
-
-short uv_possible_blades;
-EXPORT_SYMBOL_GPL(uv_possible_blades);
-
-unsigned long sn_rtc_cycles_per_second;
-EXPORT_SYMBOL(sn_rtc_cycles_per_second);
-
-/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */
-
-static const struct cpumask *uv_target_cpus(void)
-{
-       return cpumask_of(0);
-}
-
-static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
-{
-       cpumask_clear(retmask);
-       cpumask_set_cpu(cpu, retmask);
-}
-
-int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
-{
-       unsigned long val;
-       int pnode;
-
-       pnode = uv_apicid_to_pnode(phys_apicid);
-       val = (1UL << UVH_IPI_INT_SEND_SHFT) |
-           (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
-           (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
-           APIC_DM_INIT;
-       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
-       mdelay(10);
-
-       val = (1UL << UVH_IPI_INT_SEND_SHFT) |
-           (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
-           (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
-           APIC_DM_STARTUP;
-       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
-       return 0;
-}
-
-static void uv_send_IPI_one(int cpu, int vector)
-{
-       unsigned long val, apicid, lapicid;
-       int pnode;
-
-       apicid = per_cpu(x86_cpu_to_apicid, cpu);
-       lapicid = apicid & 0x3f;                /* ZZZ macro needed */
-       pnode = uv_apicid_to_pnode(apicid);
-       val =
-           (1UL << UVH_IPI_INT_SEND_SHFT) | (lapicid <<
-                                             UVH_IPI_INT_APIC_ID_SHFT) |
-           (vector << UVH_IPI_INT_VECTOR_SHFT);
-       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
-}
-
-static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
-{
-       unsigned int cpu;
-
-       for_each_cpu(cpu, mask)
-               uv_send_IPI_one(cpu, vector);
-}
-
-static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
-{
-       unsigned int cpu;
-       unsigned int this_cpu = smp_processor_id();
-
-       for_each_cpu(cpu, mask)
-               if (cpu != this_cpu)
-                       uv_send_IPI_one(cpu, vector);
-}
-
-static void uv_send_IPI_allbutself(int vector)
-{
-       unsigned int cpu;
-       unsigned int this_cpu = smp_processor_id();
-
-       for_each_online_cpu(cpu)
-               if (cpu != this_cpu)
-                       uv_send_IPI_one(cpu, vector);
-}
-
-static void uv_send_IPI_all(int vector)
-{
-       uv_send_IPI_mask(cpu_online_mask, vector);
-}
-
-static int uv_apic_id_registered(void)
-{
-       return 1;
-}
-
-static void uv_init_apic_ldr(void)
-{
-}
-
-static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
-{
-       int cpu;
-
-       /*
-        * We're using fixed IRQ delivery, can only return one phys APIC ID.
-        * May as well be the first.
-        */
-       cpu = cpumask_first(cpumask);
-       if ((unsigned)cpu < nr_cpu_ids)
-               return per_cpu(x86_cpu_to_apicid, cpu);
-       else
-               return BAD_APICID;
-}
-
-static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
-                                             const struct cpumask *andmask)
-{
-       int cpu;
-
-       /*
-        * We're using fixed IRQ delivery, can only return one phys APIC ID.
-        * May as well be the first.
-        */
-       for_each_cpu_and(cpu, cpumask, andmask)
-               if (cpumask_test_cpu(cpu, cpu_online_mask))
-                       break;
-       if (cpu < nr_cpu_ids)
-               return per_cpu(x86_cpu_to_apicid, cpu);
-       return BAD_APICID;
-}
-
-static unsigned int get_apic_id(unsigned long x)
-{
-       unsigned int id;
-
-       WARN_ON(preemptible() && num_online_cpus() > 1);
-       id = x | __get_cpu_var(x2apic_extra_bits);
-
-       return id;
-}
-
-static unsigned long set_apic_id(unsigned int id)
-{
-       unsigned long x;
-
-       /* maskout x2apic_extra_bits ? */
-       x = id;
-       return x;
-}
-
-static unsigned int uv_read_apic_id(void)
-{
-
-       return get_apic_id(apic_read(APIC_ID));
-}
-
-static unsigned int phys_pkg_id(int index_msb)
-{
-       return uv_read_apic_id() >> index_msb;
-}
-
-static void uv_send_IPI_self(int vector)
-{
-       apic_write(APIC_SELF_IPI, vector);
-}
-
-struct genapic apic_x2apic_uv_x = {
-       .name = "UV large system",
-       .acpi_madt_oem_check = uv_acpi_madt_oem_check,
-       .int_delivery_mode = dest_Fixed,
-       .int_dest_mode = (APIC_DEST_PHYSICAL != 0),
-       .target_cpus = uv_target_cpus,
-       .vector_allocation_domain = uv_vector_allocation_domain,
-       .apic_id_registered = uv_apic_id_registered,
-       .init_apic_ldr = uv_init_apic_ldr,
-       .send_IPI_all = uv_send_IPI_all,
-       .send_IPI_allbutself = uv_send_IPI_allbutself,
-       .send_IPI_mask = uv_send_IPI_mask,
-       .send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
-       .send_IPI_self = uv_send_IPI_self,
-       .cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
-       .cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
-       .phys_pkg_id = phys_pkg_id,
-       .get_apic_id = get_apic_id,
-       .set_apic_id = set_apic_id,
-       .apic_id_mask = (0xFFFFFFFFu),
-};
-
-static __cpuinit void set_x2apic_extra_bits(int pnode)
-{
-       __get_cpu_var(x2apic_extra_bits) = (pnode << 6);
-}
-
-/*
- * Called on boot cpu.
- */
-static __init int boot_pnode_to_blade(int pnode)
-{
-       int blade;
-
-       for (blade = 0; blade < uv_num_possible_blades(); blade++)
-               if (pnode == uv_blade_info[blade].pnode)
-                       return blade;
-       BUG();
-}
-
-struct redir_addr {
-       unsigned long redirect;
-       unsigned long alias;
-};
-
-#define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
-
-static __initdata struct redir_addr redir_addrs[] = {
-       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
-       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
-       {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
-};
-
-static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
-{
-       union uvh_si_alias0_overlay_config_u alias;
-       union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) {
-               alias.v = uv_read_local_mmr(redir_addrs[i].alias);
-               if (alias.s.base == 0) {
-                       *size = (1UL << alias.s.m_alias);
-                       redirect.v = uv_read_local_mmr(redir_addrs[i].redirect);
-                       *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT;
-                       return;
-               }
-       }
-       BUG();
-}
-
-static __init void map_low_mmrs(void)
-{
-       init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
-       init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
-}
-
-enum map_type {map_wb, map_uc};
-
-static __init void map_high(char *id, unsigned long base, int shift,
-                           int max_pnode, enum map_type map_type)
-{
-       unsigned long bytes, paddr;
-
-       paddr = base << shift;
-       bytes = (1UL << shift) * (max_pnode + 1);
-       printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
-                                               paddr + bytes);
-       if (map_type == map_uc)
-               init_extra_mapping_uc(paddr, bytes);
-       else
-               init_extra_mapping_wb(paddr, bytes);
-
-}
-static __init void map_gru_high(int max_pnode)
-{
-       union uvh_rh_gam_gru_overlay_config_mmr_u gru;
-       int shift = UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR_BASE_SHFT;
-
-       gru.v = uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR);
-       if (gru.s.enable)
-               map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
-}
-
-static __init void map_config_high(int max_pnode)
-{
-       union uvh_rh_gam_cfg_overlay_config_mmr_u cfg;
-       int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT;
-
-       cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR);
-       if (cfg.s.enable)
-               map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc);
-}
-
-static __init void map_mmr_high(int max_pnode)
-{
-       union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
-       int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
-
-       mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
-       if (mmr.s.enable)
-               map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
-}
-
-static __init void map_mmioh_high(int max_pnode)
-{
-       union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
-       int shift = UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR_BASE_SHFT;
-
-       mmioh.v = uv_read_local_mmr(UVH_RH_GAM_MMIOH_OVERLAY_CONFIG_MMR);
-       if (mmioh.s.enable)
-               map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc);
-}
-
-static __init void uv_rtc_init(void)
-{
-       long status;
-       u64 ticks_per_sec;
-
-       status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
-                                       &ticks_per_sec);
-       if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
-               printk(KERN_WARNING
-                       "unable to determine platform RTC clock frequency, "
-                       "guessing.\n");
-               /* BIOS gives wrong value for clock freq. so guess */
-               sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
-       } else
-               sn_rtc_cycles_per_second = ticks_per_sec;
-}
-
-/*
- * percpu heartbeat timer
- */
-static void uv_heartbeat(unsigned long ignored)
-{
-       struct timer_list *timer = &uv_hub_info->scir.timer;
-       unsigned char bits = uv_hub_info->scir.state;
-
-       /* flip heartbeat bit */
-       bits ^= SCIR_CPU_HEARTBEAT;
-
-       /* is this cpu idle? */
-       if (idle_cpu(raw_smp_processor_id()))
-               bits &= ~SCIR_CPU_ACTIVITY;
-       else
-               bits |= SCIR_CPU_ACTIVITY;
-
-       /* update system controller interface reg */
-       uv_set_scir_bits(bits);
-
-       /* enable next timer period */
-       mod_timer(timer, jiffies + SCIR_CPU_HB_INTERVAL);
-}
-
-static void __cpuinit uv_heartbeat_enable(int cpu)
-{
-       if (!uv_cpu_hub_info(cpu)->scir.enabled) {
-               struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
-
-               uv_set_cpu_scir_bits(cpu, SCIR_CPU_HEARTBEAT|SCIR_CPU_ACTIVITY);
-               setup_timer(timer, uv_heartbeat, cpu);
-               timer->expires = jiffies + SCIR_CPU_HB_INTERVAL;
-               add_timer_on(timer, cpu);
-               uv_cpu_hub_info(cpu)->scir.enabled = 1;
-       }
-
-       /* check boot cpu */
-       if (!uv_cpu_hub_info(0)->scir.enabled)
-               uv_heartbeat_enable(0);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-static void __cpuinit uv_heartbeat_disable(int cpu)
-{
-       if (uv_cpu_hub_info(cpu)->scir.enabled) {
-               uv_cpu_hub_info(cpu)->scir.enabled = 0;
-               del_timer(&uv_cpu_hub_info(cpu)->scir.timer);
-       }
-       uv_set_cpu_scir_bits(cpu, 0xff);
-}
-
-/*
- * cpu hotplug notifier
- */
-static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self,
-                                      unsigned long action, void *hcpu)
-{
-       long cpu = (long)hcpu;
-
-       switch (action) {
-       case CPU_ONLINE:
-               uv_heartbeat_enable(cpu);
-               break;
-       case CPU_DOWN_PREPARE:
-               uv_heartbeat_disable(cpu);
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static __init void uv_scir_register_cpu_notifier(void)
-{
-       hotcpu_notifier(uv_scir_cpu_notify, 0);
-}
-
-#else /* !CONFIG_HOTPLUG_CPU */
-
-static __init void uv_scir_register_cpu_notifier(void)
-{
-}
-
-static __init int uv_init_heartbeat(void)
-{
-       int cpu;
-
-       if (is_uv_system())
-               for_each_online_cpu(cpu)
-                       uv_heartbeat_enable(cpu);
-       return 0;
-}
-
-late_initcall(uv_init_heartbeat);
-
-#endif /* !CONFIG_HOTPLUG_CPU */
-
-/*
- * Called on each cpu to initialize the per_cpu UV data area.
- *     ZZZ hotplug not supported yet
- */
-void __cpuinit uv_cpu_init(void)
-{
-       /* CPU 0 initilization will be done via uv_system_init. */
-       if (!uv_blade_info)
-               return;
-
-       uv_blade_info[uv_numa_blade_id()].nr_online_cpus++;
-
-       if (get_uv_system_type() == UV_NON_UNIQUE_APIC)
-               set_x2apic_extra_bits(uv_hub_info->pnode);
-}
-
-
-void __init uv_system_init(void)
-{
-       union uvh_si_addr_map_config_u m_n_config;
-       union uvh_node_id_u node_id;
-       unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
-       int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
-       int max_pnode = 0;
-       unsigned long mmr_base, present;
-
-       map_low_mmrs();
-
-       m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
-       m_val = m_n_config.s.m_skt;
-       n_val = m_n_config.s.n_skt;
-       mmr_base =
-           uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
-           ~UV_MMR_ENABLE;
-       printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
-
-       for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
-               uv_possible_blades +=
-                 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
-       printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
-
-       bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
-       uv_blade_info = kmalloc(bytes, GFP_KERNEL);
-
-       get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
-
-       bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
-       uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
-       memset(uv_node_to_blade, 255, bytes);
-
-       bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
-       uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
-       memset(uv_cpu_to_blade, 255, bytes);
-
-       blade = 0;
-       for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
-               present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
-               for (j = 0; j < 64; j++) {
-                       if (!test_bit(j, &present))
-                               continue;
-                       uv_blade_info[blade].pnode = (i * 64 + j);
-                       uv_blade_info[blade].nr_possible_cpus = 0;
-                       uv_blade_info[blade].nr_online_cpus = 0;
-                       blade++;
-               }
-       }
-
-       node_id.v = uv_read_local_mmr(UVH_NODE_ID);
-       gnode_upper = (((unsigned long)node_id.s.node_id) &
-                      ~((1 << n_val) - 1)) << m_val;
-
-       uv_bios_init();
-       uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
-                           &sn_coherency_id, &sn_region_size);
-       uv_rtc_init();
-
-       for_each_present_cpu(cpu) {
-               nid = cpu_to_node(cpu);
-               pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
-               blade = boot_pnode_to_blade(pnode);
-               lcpu = uv_blade_info[blade].nr_possible_cpus;
-               uv_blade_info[blade].nr_possible_cpus++;
-
-               uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
-               uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
-               uv_cpu_hub_info(cpu)->m_val = m_val;
-               uv_cpu_hub_info(cpu)->n_val = m_val;
-               uv_cpu_hub_info(cpu)->numa_blade_id = blade;
-               uv_cpu_hub_info(cpu)->blade_processor_id = lcpu;
-               uv_cpu_hub_info(cpu)->pnode = pnode;
-               uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) - 1;
-               uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
-               uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
-               uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
-               uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
-               uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
-               uv_node_to_blade[nid] = blade;
-               uv_cpu_to_blade[cpu] = blade;
-               max_pnode = max(pnode, max_pnode);
-
-               printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
-                       "lcpu %d, blade %d\n",
-                       cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
-                       lcpu, blade);
-       }
-
-       map_gru_high(max_pnode);
-       map_mmr_high(max_pnode);
-       map_config_high(max_pnode);
-       map_mmioh_high(max_pnode);
-
-       uv_cpu_init();
-       uv_scir_register_cpu_notifier();
-       proc_mkdir("sgi_uv", NULL);
-}
index b9a4d8c4b93529b3c568c054755d04d1f2534581..f5b2722476907bcd3fe7ace735238330c604b9e9 100644 (file)
 #include <asm/bios_ebda.h>
 #include <asm/trampoline.h>
 
-/* boot cpu pda */
-static struct x8664_pda _boot_cpu_pda;
-
-#ifdef CONFIG_SMP
-/*
- * We install an empty cpu_pda pointer table to indicate to early users
- * (numa_set_node) that the cpu_pda pointer table for cpus other than
- * the boot cpu is not yet setup.
- */
-static struct x8664_pda *__cpu_pda[NR_CPUS] __initdata;
-#else
-static struct x8664_pda *__cpu_pda[NR_CPUS] __read_mostly;
-#endif
-
-void __init x86_64_init_pda(void)
-{
-       _cpu_pda = __cpu_pda;
-       cpu_pda(0) = &_boot_cpu_pda;
-       pda_init(0);
-}
-
 static void __init zap_identity_mappings(void)
 {
        pgd_t *pgd = pgd_offset_k(0UL);
@@ -112,8 +91,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
        if (console_loglevel == 10)
                early_printk("Kernel alive\n");
 
-       x86_64_init_pda();
-
        x86_64_start_reservations(real_mode_data);
 }
 
index e835b4eea70be85d0b9fac4f846fdd55becbf189..c32ca19d591a5de88057d2d9534168332fa7bf77 100644 (file)
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <asm/segment.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
+#include <asm/page_types.h>
+#include <asm/pgtable_types.h>
 #include <asm/desc.h>
 #include <asm/cache.h>
 #include <asm/thread_info.h>
 #include <asm/asm-offsets.h>
 #include <asm/setup.h>
 #include <asm/processor-flags.h>
+#include <asm/percpu.h>
 
 /* Physical address */
 #define pa(X) ((X) - __PAGE_OFFSET)
@@ -429,14 +430,34 @@ is386:    movl $2,%ecx            # set MP
        ljmp $(__KERNEL_CS),$1f
 1:     movl $(__KERNEL_DS),%eax        # reload all the segment registers
        movl %eax,%ss                   # after changing gdt.
-       movl %eax,%fs                   # gets reset once there's real percpu
 
        movl $(__USER_DS),%eax          # DS/ES contains default USER segment
        movl %eax,%ds
        movl %eax,%es
 
-       xorl %eax,%eax                  # Clear GS and LDT
+       movl $(__KERNEL_PERCPU), %eax
+       movl %eax,%fs                   # set this cpu's percpu
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+       /*
+        * The linker can't handle this by relocation.  Manually set
+        * base address in stack canary segment descriptor.
+        */
+       cmpb $0,ready
+       jne 1f
+       movl $per_cpu__gdt_page,%eax
+       movl $per_cpu__stack_canary,%ecx
+       subl $20, %ecx
+       movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
+       shrl $16, %ecx
+       movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
+       movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
+1:
+#endif
+       movl $(__KERNEL_STACK_CANARY),%eax
        movl %eax,%gs
+
+       xorl %eax,%eax                  # Clear LDT
        lldt %ax
 
        cld                     # gcc2 wants the direction flag cleared at all times
@@ -446,8 +467,6 @@ is386:      movl $2,%ecx            # set MP
        movb $1, ready
        cmpb $0,%cl             # the first CPU calls start_kernel
        je   1f
-       movl $(__KERNEL_PERCPU), %eax
-       movl %eax,%fs           # set this cpu's percpu
        movl (stack_start), %esp
 1:
 #endif /* CONFIG_SMP */
@@ -548,11 +567,7 @@ early_fault:
        pushl %eax
        pushl %edx              /* trapno */
        pushl $fault_msg
-#ifdef CONFIG_EARLY_PRINTK
-       call early_printk
-#else
        call printk
-#endif
 #endif
        call dump_stack
 hlt_loop:
@@ -580,11 +595,10 @@ ignore_int:
        pushl 32(%esp)
        pushl 40(%esp)
        pushl $int_msg
-#ifdef CONFIG_EARLY_PRINTK
-       call early_printk
-#else
        call printk
-#endif
+
+       call dump_stack
+
        addl $(5*4),%esp
        popl %ds
        popl %es
@@ -660,7 +674,7 @@ early_recursion_flag:
        .long 0
 
 int_msg:
-       .asciz "Unknown interrupt or fault at EIP %p %p %p\n"
+       .asciz "Unknown interrupt or fault at: %p %p %p\n"
 
 fault_msg:
 /* fault info: */
index 0e275d495563fa33d9fddce555a82a581fa8cff0..54b29bb24e7178f931d4681426b4276191b0255f 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/msr.h>
 #include <asm/cache.h>
 #include <asm/processor-flags.h>
+#include <asm/percpu.h>
 
 #ifdef CONFIG_PARAVIRT
 #include <asm/asm-offsets.h>
@@ -226,12 +227,15 @@ ENTRY(secondary_startup_64)
        movl %eax,%fs
        movl %eax,%gs
 
-       /* 
-        * Setup up a dummy PDA. this is just for some early bootup code
-        * that does in_interrupt() 
-        */ 
+       /* Set up %gs.
+        *
+        * The base of %gs always points to the bottom of the irqstack
+        * union.  If the stack protector canary is enabled, it is
+        * located at %gs:40.  Note that, on SMP, the boot cpu uses
+        * init data section till per cpu areas are set up.
+        */
        movl    $MSR_GS_BASE,%ecx
-       movq    $empty_zero_page,%rax
+       movq    initial_gs(%rip),%rax
        movq    %rax,%rdx
        shrq    $32,%rdx
        wrmsr   
@@ -257,6 +261,8 @@ ENTRY(secondary_startup_64)
        .align  8
        ENTRY(initial_code)
        .quad   x86_64_start_kernel
+       ENTRY(initial_gs)
+       .quad   INIT_PER_CPU_VAR(irq_stack_union)
        __FINITDATA
 
        ENTRY(stack_start)
@@ -323,8 +329,6 @@ early_idt_ripmsg:
 #endif /* CONFIG_EARLY_PRINTK */
        .previous
 
-.balign PAGE_SIZE
-
 #define NEXT_PAGE(name) \
        .balign PAGE_SIZE; \
 ENTRY(name)
@@ -401,7 +405,8 @@ NEXT_PAGE(level2_spare_pgt)
        .globl early_gdt_descr
 early_gdt_descr:
        .word   GDT_ENTRIES*8-1
-       .quad   per_cpu__gdt_page
+early_gdt_descr_base:
+       .quad   INIT_PER_CPU_VAR(gdt_page)
 
 ENTRY(phys_base)
        /* This must match the first entry in level2_kernel_pgt */
@@ -412,7 +417,7 @@ ENTRY(phys_base)
        .section .bss, "aw", @nobits
        .align L1_CACHE_BYTES
 ENTRY(idt_table)
-       .skip 256 * 16
+       .skip IDT_ENTRIES * 16
 
        .section .bss.page_aligned, "aw", @nobits
        .align PAGE_SIZE
index 11d5093eb281372c706b83c39edbdeff2d066705..df89102bef809908536b0449e73e434458f5fabc 100644 (file)
@@ -22,7 +22,6 @@
 #include <asm/pgtable.h>
 #include <asm/desc.h>
 #include <asm/apic.h>
-#include <asm/arch_hooks.h>
 #include <asm/i8259.h>
 
 /*
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
deleted file mode 100644 (file)
index bc7ac4d..0000000
+++ /dev/null
@@ -1,4169 +0,0 @@
-/*
- *     Intel IO-APIC support for multi-Pentium hosts.
- *
- *     Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
- *
- *     Many thanks to Stig Venaas for trying out countless experimental
- *     patches and reporting/debugging problems patiently!
- *
- *     (c) 1999, Multiple IO-APIC support, developed by
- *     Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
- *      Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
- *     further tested and cleaned up by Zach Brown <zab@redhat.com>
- *     and Ingo Molnar <mingo@redhat.com>
- *
- *     Fixes
- *     Maciej W. Rozycki       :       Bits for genuine 82489DX APICs;
- *                                     thanks to Eric Gilmore
- *                                     and Rolf G. Tews
- *                                     for testing these extensively
- *     Paul Diefenbaugh        :       Added full ACPI support
- */
-
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/pci.h>
-#include <linux/mc146818rtc.h>
-#include <linux/compiler.h>
-#include <linux/acpi.h>
-#include <linux/module.h>
-#include <linux/sysdev.h>
-#include <linux/msi.h>
-#include <linux/htirq.h>
-#include <linux/freezer.h>
-#include <linux/kthread.h>
-#include <linux/jiffies.h>     /* time_after() */
-#ifdef CONFIG_ACPI
-#include <acpi/acpi_bus.h>
-#endif
-#include <linux/bootmem.h>
-#include <linux/dmar.h>
-#include <linux/hpet.h>
-
-#include <asm/idle.h>
-#include <asm/io.h>
-#include <asm/smp.h>
-#include <asm/desc.h>
-#include <asm/proto.h>
-#include <asm/acpi.h>
-#include <asm/dma.h>
-#include <asm/timer.h>
-#include <asm/i8259.h>
-#include <asm/nmi.h>
-#include <asm/msidef.h>
-#include <asm/hypertransport.h>
-#include <asm/setup.h>
-#include <asm/irq_remapping.h>
-#include <asm/hpet.h>
-#include <asm/uv/uv_hub.h>
-#include <asm/uv/uv_irq.h>
-
-#include <mach_ipi.h>
-#include <mach_apic.h>
-#include <mach_apicdef.h>
-
-#define __apicdebuginit(type) static type __init
-
-/*
- *      Is the SiS APIC rmw bug present ?
- *      -1 = don't know, 0 = no, 1 = yes
- */
-int sis_apic_bug = -1;
-
-static DEFINE_SPINLOCK(ioapic_lock);
-static DEFINE_SPINLOCK(vector_lock);
-
-/*
- * # of IRQ routing registers
- */
-int nr_ioapic_registers[MAX_IO_APICS];
-
-/* I/O APIC entries */
-struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
-int nr_ioapics;
-
-/* MP IRQ source entries */
-struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
-
-/* # of MP IRQ source entries */
-int mp_irq_entries;
-
-#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
-int mp_bus_id_to_type[MAX_MP_BUSSES];
-#endif
-
-DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
-
-int skip_ioapic_setup;
-
-static int __init parse_noapic(char *str)
-{
-       /* disable IO-APIC */
-       disable_ioapic_setup();
-       return 0;
-}
-early_param("noapic", parse_noapic);
-
-struct irq_pin_list;
-
-/*
- * This is performance-critical, we want to do it O(1)
- *
- * the indexing order of this array favors 1:1 mappings
- * between pins and IRQs.
- */
-
-struct irq_pin_list {
-       int apic, pin;
-       struct irq_pin_list *next;
-};
-
-static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
-{
-       struct irq_pin_list *pin;
-       int node;
-
-       node = cpu_to_node(cpu);
-
-       pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
-
-       return pin;
-}
-
-struct irq_cfg {
-       struct irq_pin_list *irq_2_pin;
-       cpumask_var_t domain;
-       cpumask_var_t old_domain;
-       unsigned move_cleanup_count;
-       u8 vector;
-       u8 move_in_progress : 1;
-#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
-       u8 move_desc_pending : 1;
-#endif
-};
-
-/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
-#ifdef CONFIG_SPARSE_IRQ
-static struct irq_cfg irq_cfgx[] = {
-#else
-static struct irq_cfg irq_cfgx[NR_IRQS] = {
-#endif
-       [0]  = { .vector = IRQ0_VECTOR,  },
-       [1]  = { .vector = IRQ1_VECTOR,  },
-       [2]  = { .vector = IRQ2_VECTOR,  },
-       [3]  = { .vector = IRQ3_VECTOR,  },
-       [4]  = { .vector = IRQ4_VECTOR,  },
-       [5]  = { .vector = IRQ5_VECTOR,  },
-       [6]  = { .vector = IRQ6_VECTOR,  },
-       [7]  = { .vector = IRQ7_VECTOR,  },
-       [8]  = { .vector = IRQ8_VECTOR,  },
-       [9]  = { .vector = IRQ9_VECTOR,  },
-       [10] = { .vector = IRQ10_VECTOR, },
-       [11] = { .vector = IRQ11_VECTOR, },
-       [12] = { .vector = IRQ12_VECTOR, },
-       [13] = { .vector = IRQ13_VECTOR, },
-       [14] = { .vector = IRQ14_VECTOR, },
-       [15] = { .vector = IRQ15_VECTOR, },
-};
-
-int __init arch_early_irq_init(void)
-{
-       struct irq_cfg *cfg;
-       struct irq_desc *desc;
-       int count;
-       int i;
-
-       cfg = irq_cfgx;
-       count = ARRAY_SIZE(irq_cfgx);
-
-       for (i = 0; i < count; i++) {
-               desc = irq_to_desc(i);
-               desc->chip_data = &cfg[i];
-               alloc_bootmem_cpumask_var(&cfg[i].domain);
-               alloc_bootmem_cpumask_var(&cfg[i].old_domain);
-               if (i < NR_IRQS_LEGACY)
-                       cpumask_setall(cfg[i].domain);
-       }
-
-       return 0;
-}
-
-#ifdef CONFIG_SPARSE_IRQ
-static struct irq_cfg *irq_cfg(unsigned int irq)
-{
-       struct irq_cfg *cfg = NULL;
-       struct irq_desc *desc;
-
-       desc = irq_to_desc(irq);
-       if (desc)
-               cfg = desc->chip_data;
-
-       return cfg;
-}
-
-static struct irq_cfg *get_one_free_irq_cfg(int cpu)
-{
-       struct irq_cfg *cfg;
-       int node;
-
-       node = cpu_to_node(cpu);
-
-       cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
-       if (cfg) {
-               if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
-                       kfree(cfg);
-                       cfg = NULL;
-               } else if (!alloc_cpumask_var_node(&cfg->old_domain,
-                                                         GFP_ATOMIC, node)) {
-                       free_cpumask_var(cfg->domain);
-                       kfree(cfg);
-                       cfg = NULL;
-               } else {
-                       cpumask_clear(cfg->domain);
-                       cpumask_clear(cfg->old_domain);
-               }
-       }
-
-       return cfg;
-}
-
-int arch_init_chip_data(struct irq_desc *desc, int cpu)
-{
-       struct irq_cfg *cfg;
-
-       cfg = desc->chip_data;
-       if (!cfg) {
-               desc->chip_data = get_one_free_irq_cfg(cpu);
-               if (!desc->chip_data) {
-                       printk(KERN_ERR "can not alloc irq_cfg\n");
-                       BUG_ON(1);
-               }
-       }
-
-       return 0;
-}
-
-#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
-
-static void
-init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int cpu)
-{
-       struct irq_pin_list *old_entry, *head, *tail, *entry;
-
-       cfg->irq_2_pin = NULL;
-       old_entry = old_cfg->irq_2_pin;
-       if (!old_entry)
-               return;
-
-       entry = get_one_free_irq_2_pin(cpu);
-       if (!entry)
-               return;
-
-       entry->apic     = old_entry->apic;
-       entry->pin      = old_entry->pin;
-       head            = entry;
-       tail            = entry;
-       old_entry       = old_entry->next;
-       while (old_entry) {
-               entry = get_one_free_irq_2_pin(cpu);
-               if (!entry) {
-                       entry = head;
-                       while (entry) {
-                               head = entry->next;
-                               kfree(entry);
-                               entry = head;
-                       }
-                       /* still use the old one */
-                       return;
-               }
-               entry->apic     = old_entry->apic;
-               entry->pin      = old_entry->pin;
-               tail->next      = entry;
-               tail            = entry;
-               old_entry       = old_entry->next;
-       }
-
-       tail->next = NULL;
-       cfg->irq_2_pin = head;
-}
-
-static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
-{
-       struct irq_pin_list *entry, *next;
-
-       if (old_cfg->irq_2_pin == cfg->irq_2_pin)
-               return;
-
-       entry = old_cfg->irq_2_pin;
-
-       while (entry) {
-               next = entry->next;
-               kfree(entry);
-               entry = next;
-       }
-       old_cfg->irq_2_pin = NULL;
-}
-
-void arch_init_copy_chip_data(struct irq_desc *old_desc,
-                                struct irq_desc *desc, int cpu)
-{
-       struct irq_cfg *cfg;
-       struct irq_cfg *old_cfg;
-
-       cfg = get_one_free_irq_cfg(cpu);
-
-       if (!cfg)
-               return;
-
-       desc->chip_data = cfg;
-
-       old_cfg = old_desc->chip_data;
-
-       memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
-
-       init_copy_irq_2_pin(old_cfg, cfg, cpu);
-}
-
-static void free_irq_cfg(struct irq_cfg *old_cfg)
-{
-       kfree(old_cfg);
-}
-
-void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
-{
-       struct irq_cfg *old_cfg, *cfg;
-
-       old_cfg = old_desc->chip_data;
-       cfg = desc->chip_data;
-
-       if (old_cfg == cfg)
-               return;
-
-       if (old_cfg) {
-               free_irq_2_pin(old_cfg, cfg);
-               free_irq_cfg(old_cfg);
-               old_desc->chip_data = NULL;
-       }
-}
-
-static void
-set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
-{
-       struct irq_cfg *cfg = desc->chip_data;
-
-       if (!cfg->move_in_progress) {
-               /* it means that domain is not changed */
-               if (!cpumask_intersects(&desc->affinity, mask))
-                       cfg->move_desc_pending = 1;
-       }
-}
-#endif
-
-#else
-static struct irq_cfg *irq_cfg(unsigned int irq)
-{
-       return irq < nr_irqs ? irq_cfgx + irq : NULL;
-}
-
-#endif
-
-#ifndef CONFIG_NUMA_MIGRATE_IRQ_DESC
-static inline void
-set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
-{
-}
-#endif
-
-struct io_apic {
-       unsigned int index;
-       unsigned int unused[3];
-       unsigned int data;
-};
-
-static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
-{
-       return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
-               + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
-}
-
-static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
-{
-       struct io_apic __iomem *io_apic = io_apic_base(apic);
-       writel(reg, &io_apic->index);
-       return readl(&io_apic->data);
-}
-
-static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-{
-       struct io_apic __iomem *io_apic = io_apic_base(apic);
-       writel(reg, &io_apic->index);
-       writel(value, &io_apic->data);
-}
-
-/*
- * Re-write a value: to be used for read-modify-write
- * cycles where the read already set up the index register.
- *
- * Older SiS APIC requires we rewrite the index register
- */
-static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
-{
-       struct io_apic __iomem *io_apic = io_apic_base(apic);
-
-       if (sis_apic_bug)
-               writel(reg, &io_apic->index);
-       writel(value, &io_apic->data);
-}
-
-static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
-{
-       struct irq_pin_list *entry;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       entry = cfg->irq_2_pin;
-       for (;;) {
-               unsigned int reg;
-               int pin;
-
-               if (!entry)
-                       break;
-               pin = entry->pin;
-               reg = io_apic_read(entry->apic, 0x10 + pin*2);
-               /* Is the remote IRR bit set? */
-               if (reg & IO_APIC_REDIR_REMOTE_IRR) {
-                       spin_unlock_irqrestore(&ioapic_lock, flags);
-                       return true;
-               }
-               if (!entry->next)
-                       break;
-               entry = entry->next;
-       }
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-
-       return false;
-}
-
-union entry_union {
-       struct { u32 w1, w2; };
-       struct IO_APIC_route_entry entry;
-};
-
-static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
-{
-       union entry_union eu;
-       unsigned long flags;
-       spin_lock_irqsave(&ioapic_lock, flags);
-       eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
-       eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-       return eu.entry;
-}
-
-/*
- * When we write a new IO APIC routing entry, we need to write the high
- * word first! If the mask bit in the low word is clear, we will enable
- * the interrupt, and we need to make sure the entry is fully populated
- * before that happens.
- */
-static void
-__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
-{
-       union entry_union eu;
-       eu.entry = e;
-       io_apic_write(apic, 0x11 + 2*pin, eu.w2);
-       io_apic_write(apic, 0x10 + 2*pin, eu.w1);
-}
-
-static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&ioapic_lock, flags);
-       __ioapic_write_entry(apic, pin, e);
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
-/*
- * When we mask an IO APIC routing entry, we need to write the low
- * word first, in order to set the mask bit before we change the
- * high bits!
- */
-static void ioapic_mask_entry(int apic, int pin)
-{
-       unsigned long flags;
-       union entry_union eu = { .entry.mask = 1 };
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       io_apic_write(apic, 0x10 + 2*pin, eu.w1);
-       io_apic_write(apic, 0x11 + 2*pin, eu.w2);
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
-#ifdef CONFIG_SMP
-static void send_cleanup_vector(struct irq_cfg *cfg)
-{
-       cpumask_var_t cleanup_mask;
-
-       if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
-               unsigned int i;
-               cfg->move_cleanup_count = 0;
-               for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
-                       cfg->move_cleanup_count++;
-               for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
-                       send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
-       } else {
-               cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
-               cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
-               send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
-               free_cpumask_var(cleanup_mask);
-       }
-       cfg->move_in_progress = 0;
-}
-
-static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
-{
-       int apic, pin;
-       struct irq_pin_list *entry;
-       u8 vector = cfg->vector;
-
-       entry = cfg->irq_2_pin;
-       for (;;) {
-               unsigned int reg;
-
-               if (!entry)
-                       break;
-
-               apic = entry->apic;
-               pin = entry->pin;
-#ifdef CONFIG_INTR_REMAP
-               /*
-                * With interrupt-remapping, destination information comes
-                * from interrupt-remapping table entry.
-                */
-               if (!irq_remapped(irq))
-                       io_apic_write(apic, 0x11 + pin*2, dest);
-#else
-               io_apic_write(apic, 0x11 + pin*2, dest);
-#endif
-               reg = io_apic_read(apic, 0x10 + pin*2);
-               reg &= ~IO_APIC_REDIR_VECTOR_MASK;
-               reg |= vector;
-               io_apic_modify(apic, 0x10 + pin*2, reg);
-               if (!entry->next)
-                       break;
-               entry = entry->next;
-       }
-}
-
-static int
-assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
-
-/*
- * Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
- * of that, or returns BAD_APICID and leaves desc->affinity untouched.
- */
-static unsigned int
-set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
-{
-       struct irq_cfg *cfg;
-       unsigned int irq;
-
-       if (!cpumask_intersects(mask, cpu_online_mask))
-               return BAD_APICID;
-
-       irq = desc->irq;
-       cfg = desc->chip_data;
-       if (assign_irq_vector(irq, cfg, mask))
-               return BAD_APICID;
-
-       cpumask_and(&desc->affinity, cfg->domain, mask);
-       set_extra_move_desc(desc, mask);
-       return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
-}
-
-static void
-set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
-{
-       struct irq_cfg *cfg;
-       unsigned long flags;
-       unsigned int dest;
-       unsigned int irq;
-
-       irq = desc->irq;
-       cfg = desc->chip_data;
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       dest = set_desc_affinity(desc, mask);
-       if (dest != BAD_APICID) {
-               /* Only the high 8 bits are valid. */
-               dest = SET_APIC_LOGICAL_ID(dest);
-               __target_IO_APIC_irq(irq, dest, cfg);
-       }
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
-static void
-set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
-{
-       struct irq_desc *desc;
-
-       desc = irq_to_desc(irq);
-
-       set_ioapic_affinity_irq_desc(desc, mask);
-}
-#endif /* CONFIG_SMP */
-
-/*
- * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
- * shared ISA-space IRQs, so we have to support them. We are super
- * fast in the common case, and fast for shared ISA-space IRQs.
- */
-static void add_pin_to_irq_cpu(struct irq_cfg *cfg, int cpu, int apic, int pin)
-{
-       struct irq_pin_list *entry;
-
-       entry = cfg->irq_2_pin;
-       if (!entry) {
-               entry = get_one_free_irq_2_pin(cpu);
-               if (!entry) {
-                       printk(KERN_ERR "can not alloc irq_2_pin to add %d - %d\n",
-                                       apic, pin);
-                       return;
-               }
-               cfg->irq_2_pin = entry;
-               entry->apic = apic;
-               entry->pin = pin;
-               return;
-       }
-
-       while (entry->next) {
-               /* not again, please */
-               if (entry->apic == apic && entry->pin == pin)
-                       return;
-
-               entry = entry->next;
-       }
-
-       entry->next = get_one_free_irq_2_pin(cpu);
-       entry = entry->next;
-       entry->apic = apic;
-       entry->pin = pin;
-}
-
-/*
- * Reroute an IRQ to a different pin.
- */
-static void __init replace_pin_at_irq_cpu(struct irq_cfg *cfg, int cpu,
-                                     int oldapic, int oldpin,
-                                     int newapic, int newpin)
-{
-       struct irq_pin_list *entry = cfg->irq_2_pin;
-       int replaced = 0;
-
-       while (entry) {
-               if (entry->apic == oldapic && entry->pin == oldpin) {
-                       entry->apic = newapic;
-                       entry->pin = newpin;
-                       replaced = 1;
-                       /* every one is different, right? */
-                       break;
-               }
-               entry = entry->next;
-       }
-
-       /* why? call replace before add? */
-       if (!replaced)
-               add_pin_to_irq_cpu(cfg, cpu, newapic, newpin);
-}
-
-static inline void io_apic_modify_irq(struct irq_cfg *cfg,
-                               int mask_and, int mask_or,
-                               void (*final)(struct irq_pin_list *entry))
-{
-       int pin;
-       struct irq_pin_list *entry;
-
-       for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
-               unsigned int reg;
-               pin = entry->pin;
-               reg = io_apic_read(entry->apic, 0x10 + pin * 2);
-               reg &= mask_and;
-               reg |= mask_or;
-               io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
-               if (final)
-                       final(entry);
-       }
-}
-
-static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
-{
-       io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
-}
-
-#ifdef CONFIG_X86_64
-static void io_apic_sync(struct irq_pin_list *entry)
-{
-       /*
-        * Synchronize the IO-APIC and the CPU by doing
-        * a dummy read from the IO-APIC
-        */
-       struct io_apic __iomem *io_apic;
-       io_apic = io_apic_base(entry->apic);
-       readl(&io_apic->data);
-}
-
-static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
-{
-       io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
-}
-#else /* CONFIG_X86_32 */
-static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
-{
-       io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, NULL);
-}
-
-static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg)
-{
-       io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER,
-                       IO_APIC_REDIR_MASKED, NULL);
-}
-
-static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg)
-{
-       io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED,
-                       IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
-}
-#endif /* CONFIG_X86_32 */
-
-static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
-{
-       struct irq_cfg *cfg = desc->chip_data;
-       unsigned long flags;
-
-       BUG_ON(!cfg);
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       __mask_IO_APIC_irq(cfg);
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
-static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
-{
-       struct irq_cfg *cfg = desc->chip_data;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       __unmask_IO_APIC_irq(cfg);
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-}
-
-static void mask_IO_APIC_irq(unsigned int irq)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       mask_IO_APIC_irq_desc(desc);
-}
-static void unmask_IO_APIC_irq(unsigned int irq)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       unmask_IO_APIC_irq_desc(desc);
-}
-
-static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
-{
-       struct IO_APIC_route_entry entry;
-
-       /* Check delivery_mode to be sure we're not clearing an SMI pin */
-       entry = ioapic_read_entry(apic, pin);
-       if (entry.delivery_mode == dest_SMI)
-               return;
-       /*
-        * Disable it in the IO-APIC irq-routing table:
-        */
-       ioapic_mask_entry(apic, pin);
-}
-
-static void clear_IO_APIC (void)
-{
-       int apic, pin;
-
-       for (apic = 0; apic < nr_ioapics; apic++)
-               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-                       clear_IO_APIC_pin(apic, pin);
-}
-
-#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
-void send_IPI_self(int vector)
-{
-       unsigned int cfg;
-
-       /*
-        * Wait for idle.
-        */
-       apic_wait_icr_idle();
-       cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
-       /*
-        * Send the IPI. The write to APIC_ICR fires this off.
-        */
-       apic_write(APIC_ICR, cfg);
-}
-#endif /* !CONFIG_SMP && CONFIG_X86_32*/
-
-#ifdef CONFIG_X86_32
-/*
- * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
- * specific CPU-side IRQs.
- */
-
-#define MAX_PIRQS 8
-static int pirq_entries [MAX_PIRQS];
-static int pirqs_enabled;
-
-static int __init ioapic_pirq_setup(char *str)
-{
-       int i, max;
-       int ints[MAX_PIRQS+1];
-
-       get_options(str, ARRAY_SIZE(ints), ints);
-
-       for (i = 0; i < MAX_PIRQS; i++)
-               pirq_entries[i] = -1;
-
-       pirqs_enabled = 1;
-       apic_printk(APIC_VERBOSE, KERN_INFO
-                       "PIRQ redirection, working around broken MP-BIOS.\n");
-       max = MAX_PIRQS;
-       if (ints[0] < MAX_PIRQS)
-               max = ints[0];
-
-       for (i = 0; i < max; i++) {
-               apic_printk(APIC_VERBOSE, KERN_DEBUG
-                               "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
-               /*
-                * PIRQs are mapped upside down, usually.
-                */
-               pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
-       }
-       return 1;
-}
-
-__setup("pirq=", ioapic_pirq_setup);
-#endif /* CONFIG_X86_32 */
-
-#ifdef CONFIG_INTR_REMAP
-/* I/O APIC RTE contents at the OS boot up */
-static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
-
-/*
- * Saves and masks all the unmasked IO-APIC RTE's
- */
-int save_mask_IO_APIC_setup(void)
-{
-       union IO_APIC_reg_01 reg_01;
-       unsigned long flags;
-       int apic, pin;
-
-       /*
-        * The number of IO-APIC IRQ registers (== #pins):
-        */
-       for (apic = 0; apic < nr_ioapics; apic++) {
-               spin_lock_irqsave(&ioapic_lock, flags);
-               reg_01.raw = io_apic_read(apic, 1);
-               spin_unlock_irqrestore(&ioapic_lock, flags);
-               nr_ioapic_registers[apic] = reg_01.bits.entries+1;
-       }
-
-       for (apic = 0; apic < nr_ioapics; apic++) {
-               early_ioapic_entries[apic] =
-                       kzalloc(sizeof(struct IO_APIC_route_entry) *
-                               nr_ioapic_registers[apic], GFP_KERNEL);
-               if (!early_ioapic_entries[apic])
-                       goto nomem;
-       }
-
-       for (apic = 0; apic < nr_ioapics; apic++)
-               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-                       struct IO_APIC_route_entry entry;
-
-                       entry = early_ioapic_entries[apic][pin] =
-                               ioapic_read_entry(apic, pin);
-                       if (!entry.mask) {
-                               entry.mask = 1;
-                               ioapic_write_entry(apic, pin, entry);
-                       }
-               }
-
-       return 0;
-
-nomem:
-       while (apic >= 0)
-               kfree(early_ioapic_entries[apic--]);
-       memset(early_ioapic_entries, 0,
-               ARRAY_SIZE(early_ioapic_entries));
-
-       return -ENOMEM;
-}
-
-void restore_IO_APIC_setup(void)
-{
-       int apic, pin;
-
-       for (apic = 0; apic < nr_ioapics; apic++) {
-               if (!early_ioapic_entries[apic])
-                       break;
-               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
-                       ioapic_write_entry(apic, pin,
-                                          early_ioapic_entries[apic][pin]);
-               kfree(early_ioapic_entries[apic]);
-               early_ioapic_entries[apic] = NULL;
-       }
-}
-
-void reinit_intr_remapped_IO_APIC(int intr_remapping)
-{
-       /*
-        * for now plain restore of previous settings.
-        * TBD: In the case of OS enabling interrupt-remapping,
-        * IO-APIC RTE's need to be setup to point to interrupt-remapping
-        * table entries. for now, do a plain restore, and wait for
-        * the setup_IO_APIC_irqs() to do proper initialization.
-        */
-       restore_IO_APIC_setup();
-}
-#endif
-
-/*
- * Find the IRQ entry number of a certain pin.
- */
-static int find_irq_entry(int apic, int pin, int type)
-{
-       int i;
-
-       for (i = 0; i < mp_irq_entries; i++)
-               if (mp_irqs[i].mp_irqtype == type &&
-                   (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
-                    mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
-                   mp_irqs[i].mp_dstirq == pin)
-                       return i;
-
-       return -1;
-}
-
-/*
- * Find the pin to which IRQ[irq] (ISA) is connected
- */
-static int __init find_isa_irq_pin(int irq, int type)
-{
-       int i;
-
-       for (i = 0; i < mp_irq_entries; i++) {
-               int lbus = mp_irqs[i].mp_srcbus;
-
-               if (test_bit(lbus, mp_bus_not_pci) &&
-                   (mp_irqs[i].mp_irqtype == type) &&
-                   (mp_irqs[i].mp_srcbusirq == irq))
-
-                       return mp_irqs[i].mp_dstirq;
-       }
-       return -1;
-}
-
-static int __init find_isa_irq_apic(int irq, int type)
-{
-       int i;
-
-       for (i = 0; i < mp_irq_entries; i++) {
-               int lbus = mp_irqs[i].mp_srcbus;
-
-               if (test_bit(lbus, mp_bus_not_pci) &&
-                   (mp_irqs[i].mp_irqtype == type) &&
-                   (mp_irqs[i].mp_srcbusirq == irq))
-                       break;
-       }
-       if (i < mp_irq_entries) {
-               int apic;
-               for(apic = 0; apic < nr_ioapics; apic++) {
-                       if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
-                               return apic;
-               }
-       }
-
-       return -1;
-}
-
-/*
- * Find a specific PCI IRQ entry.
- * Not an __init, possibly needed by modules
- */
-static int pin_2_irq(int idx, int apic, int pin);
-
-int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
-{
-       int apic, i, best_guess = -1;
-
-       apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
-               bus, slot, pin);
-       if (test_bit(bus, mp_bus_not_pci)) {
-               apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
-               return -1;
-       }
-       for (i = 0; i < mp_irq_entries; i++) {
-               int lbus = mp_irqs[i].mp_srcbus;
-
-               for (apic = 0; apic < nr_ioapics; apic++)
-                       if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
-                           mp_irqs[i].mp_dstapic == MP_APIC_ALL)
-                               break;
-
-               if (!test_bit(lbus, mp_bus_not_pci) &&
-                   !mp_irqs[i].mp_irqtype &&
-                   (bus == lbus) &&
-                   (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
-                       int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
-
-                       if (!(apic || IO_APIC_IRQ(irq)))
-                               continue;
-
-                       if (pin == (mp_irqs[i].mp_srcbusirq & 3))
-                               return irq;
-                       /*
-                        * Use the first all-but-pin matching entry as a
-                        * best-guess fuzzy result for broken mptables.
-                        */
-                       if (best_guess < 0)
-                               best_guess = irq;
-               }
-       }
-       return best_guess;
-}
-
-EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
-
-#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
-/*
- * EISA Edge/Level control register, ELCR
- */
-static int EISA_ELCR(unsigned int irq)
-{
-       if (irq < NR_IRQS_LEGACY) {
-               unsigned int port = 0x4d0 + (irq >> 3);
-               return (inb(port) >> (irq & 7)) & 1;
-       }
-       apic_printk(APIC_VERBOSE, KERN_INFO
-                       "Broken MPtable reports ISA irq %d\n", irq);
-       return 0;
-}
-
-#endif
-
-/* ISA interrupts are always polarity zero edge triggered,
- * when listed as conforming in the MP table. */
-
-#define default_ISA_trigger(idx)       (0)
-#define default_ISA_polarity(idx)      (0)
-
-/* EISA interrupts are always polarity zero and can be edge or level
- * trigger depending on the ELCR value.  If an interrupt is listed as
- * EISA conforming in the MP table, that means its trigger type must
- * be read in from the ELCR */
-
-#define default_EISA_trigger(idx)      (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
-#define default_EISA_polarity(idx)     default_ISA_polarity(idx)
-
-/* PCI interrupts are always polarity one level triggered,
- * when listed as conforming in the MP table. */
-
-#define default_PCI_trigger(idx)       (1)
-#define default_PCI_polarity(idx)      (1)
-
-/* MCA interrupts are always polarity zero level triggered,
- * when listed as conforming in the MP table. */
-
-#define default_MCA_trigger(idx)       (1)
-#define default_MCA_polarity(idx)      default_ISA_polarity(idx)
-
-static int MPBIOS_polarity(int idx)
-{
-       int bus = mp_irqs[idx].mp_srcbus;
-       int polarity;
-
-       /*
-        * Determine IRQ line polarity (high active or low active):
-        */
-       switch (mp_irqs[idx].mp_irqflag & 3)
-       {
-               case 0: /* conforms, ie. bus-type dependent polarity */
-                       if (test_bit(bus, mp_bus_not_pci))
-                               polarity = default_ISA_polarity(idx);
-                       else
-                               polarity = default_PCI_polarity(idx);
-                       break;
-               case 1: /* high active */
-               {
-                       polarity = 0;
-                       break;
-               }
-               case 2: /* reserved */
-               {
-                       printk(KERN_WARNING "broken BIOS!!\n");
-                       polarity = 1;
-                       break;
-               }
-               case 3: /* low active */
-               {
-                       polarity = 1;
-                       break;
-               }
-               default: /* invalid */
-               {
-                       printk(KERN_WARNING "broken BIOS!!\n");
-                       polarity = 1;
-                       break;
-               }
-       }
-       return polarity;
-}
-
-static int MPBIOS_trigger(int idx)
-{
-       int bus = mp_irqs[idx].mp_srcbus;
-       int trigger;
-
-       /*
-        * Determine IRQ trigger mode (edge or level sensitive):
-        */
-       switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
-       {
-               case 0: /* conforms, ie. bus-type dependent */
-                       if (test_bit(bus, mp_bus_not_pci))
-                               trigger = default_ISA_trigger(idx);
-                       else
-                               trigger = default_PCI_trigger(idx);
-#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
-                       switch (mp_bus_id_to_type[bus]) {
-                               case MP_BUS_ISA: /* ISA pin */
-                               {
-                                       /* set before the switch */
-                                       break;
-                               }
-                               case MP_BUS_EISA: /* EISA pin */
-                               {
-                                       trigger = default_EISA_trigger(idx);
-                                       break;
-                               }
-                               case MP_BUS_PCI: /* PCI pin */
-                               {
-                                       /* set before the switch */
-                                       break;
-                               }
-                               case MP_BUS_MCA: /* MCA pin */
-                               {
-                                       trigger = default_MCA_trigger(idx);
-                                       break;
-                               }
-                               default:
-                               {
-                                       printk(KERN_WARNING "broken BIOS!!\n");
-                                       trigger = 1;
-                                       break;
-                               }
-                       }
-#endif
-                       break;
-               case 1: /* edge */
-               {
-                       trigger = 0;
-                       break;
-               }
-               case 2: /* reserved */
-               {
-                       printk(KERN_WARNING "broken BIOS!!\n");
-                       trigger = 1;
-                       break;
-               }
-               case 3: /* level */
-               {
-                       trigger = 1;
-                       break;
-               }
-               default: /* invalid */
-               {
-                       printk(KERN_WARNING "broken BIOS!!\n");
-                       trigger = 0;
-                       break;
-               }
-       }
-       return trigger;
-}
-
-static inline int irq_polarity(int idx)
-{
-       return MPBIOS_polarity(idx);
-}
-
-static inline int irq_trigger(int idx)
-{
-       return MPBIOS_trigger(idx);
-}
-
-int (*ioapic_renumber_irq)(int ioapic, int irq);
-static int pin_2_irq(int idx, int apic, int pin)
-{
-       int irq, i;
-       int bus = mp_irqs[idx].mp_srcbus;
-
-       /*
-        * Debugging check, we are in big trouble if this message pops up!
-        */
-       if (mp_irqs[idx].mp_dstirq != pin)
-               printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
-
-       if (test_bit(bus, mp_bus_not_pci)) {
-               irq = mp_irqs[idx].mp_srcbusirq;
-       } else {
-               /*
-                * PCI IRQs are mapped in order
-                */
-               i = irq = 0;
-               while (i < apic)
-                       irq += nr_ioapic_registers[i++];
-               irq += pin;
-               /*
-                 * For MPS mode, so far only needed by ES7000 platform
-                 */
-               if (ioapic_renumber_irq)
-                       irq = ioapic_renumber_irq(apic, irq);
-       }
-
-#ifdef CONFIG_X86_32
-       /*
-        * PCI IRQ command line redirection. Yes, limits are hardcoded.
-        */
-       if ((pin >= 16) && (pin <= 23)) {
-               if (pirq_entries[pin-16] != -1) {
-                       if (!pirq_entries[pin-16]) {
-                               apic_printk(APIC_VERBOSE, KERN_DEBUG
-                                               "disabling PIRQ%d\n", pin-16);
-                       } else {
-                               irq = pirq_entries[pin-16];
-                               apic_printk(APIC_VERBOSE, KERN_DEBUG
-                                               "using PIRQ%d -> IRQ %d\n",
-                                               pin-16, irq);
-                       }
-               }
-       }
-#endif
-
-       return irq;
-}
-
-void lock_vector_lock(void)
-{
-       /* Used to the online set of cpus does not change
-        * during assign_irq_vector.
-        */
-       spin_lock(&vector_lock);
-}
-
-void unlock_vector_lock(void)
-{
-       spin_unlock(&vector_lock);
-}
-
-static int
-__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
-{
-       /*
-        * NOTE! The local APIC isn't very good at handling
-        * multiple interrupts at the same interrupt level.
-        * As the interrupt level is determined by taking the
-        * vector number and shifting that right by 4, we
-        * want to spread these out a bit so that they don't
-        * all fall in the same interrupt level.
-        *
-        * Also, we've got to be careful not to trash gate
-        * 0x80, because int 0x80 is hm, kind of importantish. ;)
-        */
-       static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
-       unsigned int old_vector;
-       int cpu, err;
-       cpumask_var_t tmp_mask;
-
-       if ((cfg->move_in_progress) || cfg->move_cleanup_count)
-               return -EBUSY;
-
-       if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
-               return -ENOMEM;
-
-       old_vector = cfg->vector;
-       if (old_vector) {
-               cpumask_and(tmp_mask, mask, cpu_online_mask);
-               cpumask_and(tmp_mask, cfg->domain, tmp_mask);
-               if (!cpumask_empty(tmp_mask)) {
-                       free_cpumask_var(tmp_mask);
-                       return 0;
-               }
-       }
-
-       /* Only try and allocate irqs on cpus that are present */
-       err = -ENOSPC;
-       for_each_cpu_and(cpu, mask, cpu_online_mask) {
-               int new_cpu;
-               int vector, offset;
-
-               vector_allocation_domain(cpu, tmp_mask);
-
-               vector = current_vector;
-               offset = current_offset;
-next:
-               vector += 8;
-               if (vector >= first_system_vector) {
-                       /* If out of vectors on large boxen, must share them. */
-                       offset = (offset + 1) % 8;
-                       vector = FIRST_DEVICE_VECTOR + offset;
-               }
-               if (unlikely(current_vector == vector))
-                       continue;
-
-               if (test_bit(vector, used_vectors))
-                       goto next;
-
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
-                       if (per_cpu(vector_irq, new_cpu)[vector] != -1)
-                               goto next;
-               /* Found one! */
-               current_vector = vector;
-               current_offset = offset;
-               if (old_vector) {
-                       cfg->move_in_progress = 1;
-                       cpumask_copy(cfg->old_domain, cfg->domain);
-               }
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
-                       per_cpu(vector_irq, new_cpu)[vector] = irq;
-               cfg->vector = vector;
-               cpumask_copy(cfg->domain, tmp_mask);
-               err = 0;
-               break;
-       }
-       free_cpumask_var(tmp_mask);
-       return err;
-}
-
-static int
-assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
-{
-       int err;
-       unsigned long flags;
-
-       spin_lock_irqsave(&vector_lock, flags);
-       err = __assign_irq_vector(irq, cfg, mask);
-       spin_unlock_irqrestore(&vector_lock, flags);
-       return err;
-}
-
-static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
-{
-       int cpu, vector;
-
-       BUG_ON(!cfg->vector);
-
-       vector = cfg->vector;
-       for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
-               per_cpu(vector_irq, cpu)[vector] = -1;
-
-       cfg->vector = 0;
-       cpumask_clear(cfg->domain);
-
-       if (likely(!cfg->move_in_progress))
-               return;
-       for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
-               for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
-                                                               vector++) {
-                       if (per_cpu(vector_irq, cpu)[vector] != irq)
-                               continue;
-                       per_cpu(vector_irq, cpu)[vector] = -1;
-                       break;
-               }
-       }
-       cfg->move_in_progress = 0;
-}
-
-void __setup_vector_irq(int cpu)
-{
-       /* Initialize vector_irq on a new cpu */
-       /* This function must be called with vector_lock held */
-       int irq, vector;
-       struct irq_cfg *cfg;
-       struct irq_desc *desc;
-
-       /* Mark the inuse vectors */
-       for_each_irq_desc(irq, desc) {
-               cfg = desc->chip_data;
-               if (!cpumask_test_cpu(cpu, cfg->domain))
-                       continue;
-               vector = cfg->vector;
-               per_cpu(vector_irq, cpu)[vector] = irq;
-       }
-       /* Mark the free vectors */
-       for (vector = 0; vector < NR_VECTORS; ++vector) {
-               irq = per_cpu(vector_irq, cpu)[vector];
-               if (irq < 0)
-                       continue;
-
-               cfg = irq_cfg(irq);
-               if (!cpumask_test_cpu(cpu, cfg->domain))
-                       per_cpu(vector_irq, cpu)[vector] = -1;
-       }
-}
-
-static struct irq_chip ioapic_chip;
-#ifdef CONFIG_INTR_REMAP
-static struct irq_chip ir_ioapic_chip;
-#endif
-
-#define IOAPIC_AUTO     -1
-#define IOAPIC_EDGE     0
-#define IOAPIC_LEVEL    1
-
-#ifdef CONFIG_X86_32
-static inline int IO_APIC_irq_trigger(int irq)
-{
-       int apic, idx, pin;
-
-       for (apic = 0; apic < nr_ioapics; apic++) {
-               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-                       idx = find_irq_entry(apic, pin, mp_INT);
-                       if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
-                               return irq_trigger(idx);
-               }
-       }
-       /*
-         * nonexistent IRQs are edge default
-         */
-       return 0;
-}
-#else
-static inline int IO_APIC_irq_trigger(int irq)
-{
-       return 1;
-}
-#endif
-
-static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
-{
-
-       if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-           trigger == IOAPIC_LEVEL)
-               desc->status |= IRQ_LEVEL;
-       else
-               desc->status &= ~IRQ_LEVEL;
-
-#ifdef CONFIG_INTR_REMAP
-       if (irq_remapped(irq)) {
-               desc->status |= IRQ_MOVE_PCNTXT;
-               if (trigger)
-                       set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
-                                                     handle_fasteoi_irq,
-                                                    "fasteoi");
-               else
-                       set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
-                                                     handle_edge_irq, "edge");
-               return;
-       }
-#endif
-       if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-           trigger == IOAPIC_LEVEL)
-               set_irq_chip_and_handler_name(irq, &ioapic_chip,
-                                             handle_fasteoi_irq,
-                                             "fasteoi");
-       else
-               set_irq_chip_and_handler_name(irq, &ioapic_chip,
-                                             handle_edge_irq, "edge");
-}
-
-static int setup_ioapic_entry(int apic, int irq,
-                             struct IO_APIC_route_entry *entry,
-                             unsigned int destination, int trigger,
-                             int polarity, int vector)
-{
-       /*
-        * add it to the IO-APIC irq-routing table:
-        */
-       memset(entry,0,sizeof(*entry));
-
-#ifdef CONFIG_INTR_REMAP
-       if (intr_remapping_enabled) {
-               struct intel_iommu *iommu = map_ioapic_to_ir(apic);
-               struct irte irte;
-               struct IR_IO_APIC_route_entry *ir_entry =
-                       (struct IR_IO_APIC_route_entry *) entry;
-               int index;
-
-               if (!iommu)
-                       panic("No mapping iommu for ioapic %d\n", apic);
-
-               index = alloc_irte(iommu, irq, 1);
-               if (index < 0)
-                       panic("Failed to allocate IRTE for ioapic %d\n", apic);
-
-               memset(&irte, 0, sizeof(irte));
-
-               irte.present = 1;
-               irte.dst_mode = INT_DEST_MODE;
-               irte.trigger_mode = trigger;
-               irte.dlvry_mode = INT_DELIVERY_MODE;
-               irte.vector = vector;
-               irte.dest_id = IRTE_DEST(destination);
-
-               modify_irte(irq, &irte);
-
-               ir_entry->index2 = (index >> 15) & 0x1;
-               ir_entry->zero = 0;
-               ir_entry->format = 1;
-               ir_entry->index = (index & 0x7fff);
-       } else
-#endif
-       {
-               entry->delivery_mode = INT_DELIVERY_MODE;
-               entry->dest_mode = INT_DEST_MODE;
-               entry->dest = destination;
-       }
-
-       entry->mask = 0;                                /* enable IRQ */
-       entry->trigger = trigger;
-       entry->polarity = polarity;
-       entry->vector = vector;
-
-       /* Mask level triggered irqs.
-        * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
-        */
-       if (trigger)
-               entry->mask = 1;
-       return 0;
-}
-
-static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_desc *desc,
-                             int trigger, int polarity)
-{
-       struct irq_cfg *cfg;
-       struct IO_APIC_route_entry entry;
-       unsigned int dest;
-
-       if (!IO_APIC_IRQ(irq))
-               return;
-
-       cfg = desc->chip_data;
-
-       if (assign_irq_vector(irq, cfg, TARGET_CPUS))
-               return;
-
-       dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
-
-       apic_printk(APIC_VERBOSE,KERN_DEBUG
-                   "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
-                   "IRQ %d Mode:%i Active:%i)\n",
-                   apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
-                   irq, trigger, polarity);
-
-
-       if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
-                              dest, trigger, polarity, cfg->vector)) {
-               printk("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
-                      mp_ioapics[apic].mp_apicid, pin);
-               __clear_irq_vector(irq, cfg);
-               return;
-       }
-
-       ioapic_register_intr(irq, desc, trigger);
-       if (irq < NR_IRQS_LEGACY)
-               disable_8259A_irq(irq);
-
-       ioapic_write_entry(apic, pin, entry);
-}
-
-static void __init setup_IO_APIC_irqs(void)
-{
-       int apic, pin, idx, irq;
-       int notcon = 0;
-       struct irq_desc *desc;
-       struct irq_cfg *cfg;
-       int cpu = boot_cpu_id;
-
-       apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
-
-       for (apic = 0; apic < nr_ioapics; apic++) {
-               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-
-                       idx = find_irq_entry(apic, pin, mp_INT);
-                       if (idx == -1) {
-                               if (!notcon) {
-                                       notcon = 1;
-                                       apic_printk(APIC_VERBOSE,
-                                               KERN_DEBUG " %d-%d",
-                                               mp_ioapics[apic].mp_apicid,
-                                               pin);
-                               } else
-                                       apic_printk(APIC_VERBOSE, " %d-%d",
-                                               mp_ioapics[apic].mp_apicid,
-                                               pin);
-                               continue;
-                       }
-                       if (notcon) {
-                               apic_printk(APIC_VERBOSE,
-                                       " (apicid-pin) not connected\n");
-                               notcon = 0;
-                       }
-
-                       irq = pin_2_irq(idx, apic, pin);
-#ifdef CONFIG_X86_32
-                       if (multi_timer_check(apic, irq))
-                               continue;
-#endif
-                       desc = irq_to_desc_alloc_cpu(irq, cpu);
-                       if (!desc) {
-                               printk(KERN_INFO "can not get irq_desc for %d\n", irq);
-                               continue;
-                       }
-                       cfg = desc->chip_data;
-                       add_pin_to_irq_cpu(cfg, cpu, apic, pin);
-
-                       setup_IO_APIC_irq(apic, pin, irq, desc,
-                                       irq_trigger(idx), irq_polarity(idx));
-               }
-       }
-
-       if (notcon)
-               apic_printk(APIC_VERBOSE,
-                       " (apicid-pin) not connected\n");
-}
-
-/*
- * Set up the timer pin, possibly with the 8259A-master behind.
- */
-static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
-                                       int vector)
-{
-       struct IO_APIC_route_entry entry;
-
-#ifdef CONFIG_INTR_REMAP
-       if (intr_remapping_enabled)
-               return;
-#endif
-
-       memset(&entry, 0, sizeof(entry));
-
-       /*
-        * We use logical delivery to get the timer IRQ
-        * to the first CPU.
-        */
-       entry.dest_mode = INT_DEST_MODE;
-       entry.mask = 1;                                 /* mask IRQ now */
-       entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
-       entry.delivery_mode = INT_DELIVERY_MODE;
-       entry.polarity = 0;
-       entry.trigger = 0;
-       entry.vector = vector;
-
-       /*
-        * The timer IRQ doesn't have to know that behind the
-        * scene we may have a 8259A-master in AEOI mode ...
-        */
-       set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
-
-       /*
-        * Add it to the IO-APIC irq-routing table:
-        */
-       ioapic_write_entry(apic, pin, entry);
-}
-
-
-__apicdebuginit(void) print_IO_APIC(void)
-{
-       int apic, i;
-       union IO_APIC_reg_00 reg_00;
-       union IO_APIC_reg_01 reg_01;
-       union IO_APIC_reg_02 reg_02;
-       union IO_APIC_reg_03 reg_03;
-       unsigned long flags;
-       struct irq_cfg *cfg;
-       struct irq_desc *desc;
-       unsigned int irq;
-
-       if (apic_verbosity == APIC_QUIET)
-               return;
-
-       printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
-       for (i = 0; i < nr_ioapics; i++)
-               printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
-                      mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
-
-       /*
-        * We are a bit conservative about what we expect.  We have to
-        * know about every hardware change ASAP.
-        */
-       printk(KERN_INFO "testing the IO APIC.......................\n");
-
-       for (apic = 0; apic < nr_ioapics; apic++) {
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       reg_00.raw = io_apic_read(apic, 0);
-       reg_01.raw = io_apic_read(apic, 1);
-       if (reg_01.bits.version >= 0x10)
-               reg_02.raw = io_apic_read(apic, 2);
-       if (reg_01.bits.version >= 0x20)
-               reg_03.raw = io_apic_read(apic, 3);
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-
-       printk("\n");
-       printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
-       printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
-       printk(KERN_DEBUG ".......    : physical APIC id: %02X\n", reg_00.bits.ID);
-       printk(KERN_DEBUG ".......    : Delivery Type: %X\n", reg_00.bits.delivery_type);
-       printk(KERN_DEBUG ".......    : LTS          : %X\n", reg_00.bits.LTS);
-
-       printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
-       printk(KERN_DEBUG ".......     : max redirection entries: %04X\n", reg_01.bits.entries);
-
-       printk(KERN_DEBUG ".......     : PRQ implemented: %X\n", reg_01.bits.PRQ);
-       printk(KERN_DEBUG ".......     : IO APIC version: %04X\n", reg_01.bits.version);
-
-       /*
-        * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
-        * but the value of reg_02 is read as the previous read register
-        * value, so ignore it if reg_02 == reg_01.
-        */
-       if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
-               printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
-               printk(KERN_DEBUG ".......     : arbitration: %02X\n", reg_02.bits.arbitration);
-       }
-
-       /*
-        * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
-        * or reg_03, but the value of reg_0[23] is read as the previous read
-        * register value, so ignore it if reg_03 == reg_0[12].
-        */
-       if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
-           reg_03.raw != reg_01.raw) {
-               printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
-               printk(KERN_DEBUG ".......     : Boot DT    : %X\n", reg_03.bits.boot_DT);
-       }
-
-       printk(KERN_DEBUG ".... IRQ redirection table:\n");
-
-       printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
-                         " Stat Dmod Deli Vect:   \n");
-
-       for (i = 0; i <= reg_01.bits.entries; i++) {
-               struct IO_APIC_route_entry entry;
-
-               entry = ioapic_read_entry(apic, i);
-
-               printk(KERN_DEBUG " %02x %03X ",
-                       i,
-                       entry.dest
-               );
-
-               printk("%1d    %1d    %1d   %1d   %1d    %1d    %1d    %02X\n",
-                       entry.mask,
-                       entry.trigger,
-                       entry.irr,
-                       entry.polarity,
-                       entry.delivery_status,
-                       entry.dest_mode,
-                       entry.delivery_mode,
-                       entry.vector
-               );
-       }
-       }
-       printk(KERN_DEBUG "IRQ to pin mappings:\n");
-       for_each_irq_desc(irq, desc) {
-               struct irq_pin_list *entry;
-
-               cfg = desc->chip_data;
-               entry = cfg->irq_2_pin;
-               if (!entry)
-                       continue;
-               printk(KERN_DEBUG "IRQ%d ", irq);
-               for (;;) {
-                       printk("-> %d:%d", entry->apic, entry->pin);
-                       if (!entry->next)
-                               break;
-                       entry = entry->next;
-               }
-               printk("\n");
-       }
-
-       printk(KERN_INFO ".................................... done.\n");
-
-       return;
-}
-
-__apicdebuginit(void) print_APIC_bitfield(int base)
-{
-       unsigned int v;
-       int i, j;
-
-       if (apic_verbosity == APIC_QUIET)
-               return;
-
-       printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
-       for (i = 0; i < 8; i++) {
-               v = apic_read(base + i*0x10);
-               for (j = 0; j < 32; j++) {
-                       if (v & (1<<j))
-                               printk("1");
-                       else
-                               printk("0");
-               }
-               printk("\n");
-       }
-}
-
-__apicdebuginit(void) print_local_APIC(void *dummy)
-{
-       unsigned int v, ver, maxlvt;
-       u64 icr;
-
-       if (apic_verbosity == APIC_QUIET)
-               return;
-
-       printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-               smp_processor_id(), hard_smp_processor_id());
-       v = apic_read(APIC_ID);
-       printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, read_apic_id());
-       v = apic_read(APIC_LVR);
-       printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-       ver = GET_APIC_VERSION(v);
-       maxlvt = lapic_get_maxlvt();
-
-       v = apic_read(APIC_TASKPRI);
-       printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-
-       if (APIC_INTEGRATED(ver)) {                     /* !82489DX */
-               if (!APIC_XAPIC(ver)) {
-                       v = apic_read(APIC_ARBPRI);
-                       printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-                              v & APIC_ARBPRI_MASK);
-               }
-               v = apic_read(APIC_PROCPRI);
-               printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
-       }
-
-       /*
-        * Remote read supported only in the 82489DX and local APIC for
-        * Pentium processors.
-        */
-       if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
-               v = apic_read(APIC_RRR);
-               printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-       }
-
-       v = apic_read(APIC_LDR);
-       printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-       if (!x2apic_enabled()) {
-               v = apic_read(APIC_DFR);
-               printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-       }
-       v = apic_read(APIC_SPIV);
-       printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-
-       printk(KERN_DEBUG "... APIC ISR field:\n");
-       print_APIC_bitfield(APIC_ISR);
-       printk(KERN_DEBUG "... APIC TMR field:\n");
-       print_APIC_bitfield(APIC_TMR);
-       printk(KERN_DEBUG "... APIC IRR field:\n");
-       print_APIC_bitfield(APIC_IRR);
-
-       if (APIC_INTEGRATED(ver)) {             /* !82489DX */
-               if (maxlvt > 3)         /* Due to the Pentium erratum 3AP. */
-                       apic_write(APIC_ESR, 0);
-
-               v = apic_read(APIC_ESR);
-               printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
-       }
-
-       icr = apic_icr_read();
-       printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
-       printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
-
-       v = apic_read(APIC_LVTT);
-       printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-
-       if (maxlvt > 3) {                       /* PC is LVT#4. */
-               v = apic_read(APIC_LVTPC);
-               printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-       }
-       v = apic_read(APIC_LVT0);
-       printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-       v = apic_read(APIC_LVT1);
-       printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-
-       if (maxlvt > 2) {                       /* ERR is LVT#3. */
-               v = apic_read(APIC_LVTERR);
-               printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-       }
-
-       v = apic_read(APIC_TMICT);
-       printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-       v = apic_read(APIC_TMCCT);
-       printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-       v = apic_read(APIC_TDCR);
-       printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-       printk("\n");
-}
-
-__apicdebuginit(void) print_all_local_APICs(void)
-{
-       int cpu;
-
-       preempt_disable();
-       for_each_online_cpu(cpu)
-               smp_call_function_single(cpu, print_local_APIC, NULL, 1);
-       preempt_enable();
-}
-
-__apicdebuginit(void) print_PIC(void)
-{
-       unsigned int v;
-       unsigned long flags;
-
-       if (apic_verbosity == APIC_QUIET)
-               return;
-
-       printk(KERN_DEBUG "\nprinting PIC contents\n");
-
-       spin_lock_irqsave(&i8259A_lock, flags);
-
-       v = inb(0xa1) << 8 | inb(0x21);
-       printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
-
-       v = inb(0xa0) << 8 | inb(0x20);
-       printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
-
-       outb(0x0b,0xa0);
-       outb(0x0b,0x20);
-       v = inb(0xa0) << 8 | inb(0x20);
-       outb(0x0a,0xa0);
-       outb(0x0a,0x20);
-
-       spin_unlock_irqrestore(&i8259A_lock, flags);
-
-       printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
-
-       v = inb(0x4d1) << 8 | inb(0x4d0);
-       printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-}
-
-__apicdebuginit(int) print_all_ICs(void)
-{
-       print_PIC();
-       print_all_local_APICs();
-       print_IO_APIC();
-
-       return 0;
-}
-
-fs_initcall(print_all_ICs);
-
-
-/* Where if anywhere is the i8259 connect in external int mode */
-static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
-
-void __init enable_IO_APIC(void)
-{
-       union IO_APIC_reg_01 reg_01;
-       int i8259_apic, i8259_pin;
-       int apic;
-       unsigned long flags;
-
-#ifdef CONFIG_X86_32
-       int i;
-       if (!pirqs_enabled)
-               for (i = 0; i < MAX_PIRQS; i++)
-                       pirq_entries[i] = -1;
-#endif
-
-       /*
-        * The number of IO-APIC IRQ registers (== #pins):
-        */
-       for (apic = 0; apic < nr_ioapics; apic++) {
-               spin_lock_irqsave(&ioapic_lock, flags);
-               reg_01.raw = io_apic_read(apic, 1);
-               spin_unlock_irqrestore(&ioapic_lock, flags);
-               nr_ioapic_registers[apic] = reg_01.bits.entries+1;
-       }
-       for(apic = 0; apic < nr_ioapics; apic++) {
-               int pin;
-               /* See if any of the pins is in ExtINT mode */
-               for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
-                       struct IO_APIC_route_entry entry;
-                       entry = ioapic_read_entry(apic, pin);
-
-                       /* If the interrupt line is enabled and in ExtInt mode
-                        * I have found the pin where the i8259 is connected.
-                        */
-                       if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
-                               ioapic_i8259.apic = apic;
-                               ioapic_i8259.pin  = pin;
-                               goto found_i8259;
-                       }
-               }
-       }
- found_i8259:
-       /* Look to see what if the MP table has reported the ExtINT */
-       /* If we could not find the appropriate pin by looking at the ioapic
-        * the i8259 probably is not connected the ioapic but give the
-        * mptable a chance anyway.
-        */
-       i8259_pin  = find_isa_irq_pin(0, mp_ExtINT);
-       i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
-       /* Trust the MP table if nothing is setup in the hardware */
-       if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
-               printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
-               ioapic_i8259.pin  = i8259_pin;
-               ioapic_i8259.apic = i8259_apic;
-       }
-       /* Complain if the MP table and the hardware disagree */
-       if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
-               (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
-       {
-               printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
-       }
-
-       /*
-        * Do not trust the IO-APIC being empty at bootup
-        */
-       clear_IO_APIC();
-}
-
-/*
- * Not an __init, needed by the reboot code
- */
-void disable_IO_APIC(void)
-{
-       /*
-        * Clear the IO-APIC before rebooting:
-        */
-       clear_IO_APIC();
-
-       /*
-        * If the i8259 is routed through an IOAPIC
-        * Put that IOAPIC in virtual wire mode
-        * so legacy interrupts can be delivered.
-        */
-       if (ioapic_i8259.pin != -1) {
-               struct IO_APIC_route_entry entry;
-
-               memset(&entry, 0, sizeof(entry));
-               entry.mask            = 0; /* Enabled */
-               entry.trigger         = 0; /* Edge */
-               entry.irr             = 0;
-               entry.polarity        = 0; /* High */
-               entry.delivery_status = 0;
-               entry.dest_mode       = 0; /* Physical */
-               entry.delivery_mode   = dest_ExtINT; /* ExtInt */
-               entry.vector          = 0;
-               entry.dest            = read_apic_id();
-
-               /*
-                * Add it to the IO-APIC irq-routing table:
-                */
-               ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
-       }
-
-       disconnect_bsp_APIC(ioapic_i8259.pin != -1);
-}
-
-#ifdef CONFIG_X86_32
-/*
- * function to set the IO-APIC physical IDs based on the
- * values stored in the MPC table.
- *
- * by Matt Domsch <Matt_Domsch@dell.com>  Tue Dec 21 12:25:05 CST 1999
- */
-
-static void __init setup_ioapic_ids_from_mpc(void)
-{
-       union IO_APIC_reg_00 reg_00;
-       physid_mask_t phys_id_present_map;
-       int apic;
-       int i;
-       unsigned char old_id;
-       unsigned long flags;
-
-       if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
-               return;
-
-       /*
-        * Don't check I/O APIC IDs for xAPIC systems.  They have
-        * no meaning without the serial APIC bus.
-        */
-       if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
-               || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
-               return;
-       /*
-        * This is broken; anything with a real cpu count has to
-        * circumvent this idiocy regardless.
-        */
-       phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
-
-       /*
-        * Set the IOAPIC ID to the value stored in the MPC table.
-        */
-       for (apic = 0; apic < nr_ioapics; apic++) {
-
-               /* Read the register 0 value */
-               spin_lock_irqsave(&ioapic_lock, flags);
-               reg_00.raw = io_apic_read(apic, 0);
-               spin_unlock_irqrestore(&ioapic_lock, flags);
-
-               old_id = mp_ioapics[apic].mp_apicid;
-
-               if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
-                       printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
-                               apic, mp_ioapics[apic].mp_apicid);
-                       printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-                               reg_00.bits.ID);
-                       mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
-               }
-
-               /*
-                * Sanity check, is the ID really free? Every APIC in a
-                * system must have a unique ID or we get lots of nice
-                * 'stuck on smp_invalidate_needed IPI wait' messages.
-                */
-               if (check_apicid_used(phys_id_present_map,
-                                       mp_ioapics[apic].mp_apicid)) {
-                       printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
-                               apic, mp_ioapics[apic].mp_apicid);
-                       for (i = 0; i < get_physical_broadcast(); i++)
-                               if (!physid_isset(i, phys_id_present_map))
-                                       break;
-                       if (i >= get_physical_broadcast())
-                               panic("Max APIC ID exceeded!\n");
-                       printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
-                               i);
-                       physid_set(i, phys_id_present_map);
-                       mp_ioapics[apic].mp_apicid = i;
-               } else {
-                       physid_mask_t tmp;
-                       tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
-                       apic_printk(APIC_VERBOSE, "Setting %d in the "
-                                       "phys_id_present_map\n",
-                                       mp_ioapics[apic].mp_apicid);
-                       physids_or(phys_id_present_map, phys_id_present_map, tmp);
-               }
-
-
-               /*
-                * We need to adjust the IRQ routing table
-                * if the ID changed.
-                */
-               if (old_id != mp_ioapics[apic].mp_apicid)
-                       for (i = 0; i < mp_irq_entries; i++)
-                               if (mp_irqs[i].mp_dstapic == old_id)
-                                       mp_irqs[i].mp_dstapic
-                                               = mp_ioapics[apic].mp_apicid;
-
-               /*
-                * Read the right value from the MPC table and
-                * write it into the ID register.
-                */
-               apic_printk(APIC_VERBOSE, KERN_INFO
-                       "...changing IO-APIC physical APIC ID to %d ...",
-                       mp_ioapics[apic].mp_apicid);
-
-               reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
-               spin_lock_irqsave(&ioapic_lock, flags);
-               io_apic_write(apic, 0, reg_00.raw);
-               spin_unlock_irqrestore(&ioapic_lock, flags);
-
-               /*
-                * Sanity check
-                */
-               spin_lock_irqsave(&ioapic_lock, flags);
-               reg_00.raw = io_apic_read(apic, 0);
-               spin_unlock_irqrestore(&ioapic_lock, flags);
-               if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
-                       printk("could not set ID!\n");
-               else
-                       apic_printk(APIC_VERBOSE, " ok.\n");
-       }
-}
-#endif
-
-int no_timer_check __initdata;
-
-static int __init notimercheck(char *s)
-{
-       no_timer_check = 1;
-       return 1;
-}
-__setup("no_timer_check", notimercheck);
-
-/*
- * There is a nasty bug in some older SMP boards, their mptable lies
- * about the timer IRQ. We do the following to work around the situation:
- *
- *     - timer IRQ defaults to IO-APIC IRQ
- *     - if this function detects that timer IRQs are defunct, then we fall
- *       back to ISA timer IRQs
- */
-static int __init timer_irq_works(void)
-{
-       unsigned long t1 = jiffies;
-       unsigned long flags;
-
-       if (no_timer_check)
-               return 1;
-
-       local_save_flags(flags);
-       local_irq_enable();
-       /* Let ten ticks pass... */
-       mdelay((10 * 1000) / HZ);
-       local_irq_restore(flags);
-
-       /*
-        * Expect a few ticks at least, to be sure some possible
-        * glue logic does not lock up after one or two first
-        * ticks in a non-ExtINT mode.  Also the local APIC
-        * might have cached one ExtINT interrupt.  Finally, at
-        * least one tick may be lost due to delays.
-        */
-
-       /* jiffies wrap? */
-       if (time_after(jiffies, t1 + 4))
-               return 1;
-       return 0;
-}
-
-/*
- * In the SMP+IOAPIC case it might happen that there are an unspecified
- * number of pending IRQ events unhandled. These cases are very rare,
- * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
- * better to do it this way as thus we do not have to be aware of
- * 'pending' interrupts in the IRQ path, except at this point.
- */
-/*
- * Edge triggered needs to resend any interrupt
- * that was delayed but this is now handled in the device
- * independent code.
- */
-
-/*
- * Starting up a edge-triggered IO-APIC interrupt is
- * nasty - we need to make sure that we get the edge.
- * If it is already asserted for some reason, we need
- * return 1 to indicate that is was pending.
- *
- * This is not complete - we should be able to fake
- * an edge even if it isn't on the 8259A...
- */
-
-static unsigned int startup_ioapic_irq(unsigned int irq)
-{
-       int was_pending = 0;
-       unsigned long flags;
-       struct irq_cfg *cfg;
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       if (irq < NR_IRQS_LEGACY) {
-               disable_8259A_irq(irq);
-               if (i8259A_irq_pending(irq))
-                       was_pending = 1;
-       }
-       cfg = irq_cfg(irq);
-       __unmask_IO_APIC_irq(cfg);
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-
-       return was_pending;
-}
-
-#ifdef CONFIG_X86_64
-static int ioapic_retrigger_irq(unsigned int irq)
-{
-
-       struct irq_cfg *cfg = irq_cfg(irq);
-       unsigned long flags;
-
-       spin_lock_irqsave(&vector_lock, flags);
-       send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
-       spin_unlock_irqrestore(&vector_lock, flags);
-
-       return 1;
-}
-#else
-static int ioapic_retrigger_irq(unsigned int irq)
-{
-       send_IPI_self(irq_cfg(irq)->vector);
-
-       return 1;
-}
-#endif
-
-/*
- * Level and edge triggered IO-APIC interrupts need different handling,
- * so we use two separate IRQ descriptors. Edge triggered IRQs can be
- * handled with the level-triggered descriptor, but that one has slightly
- * more overhead. Level-triggered interrupts cannot be handled with the
- * edge-triggered handler, without risking IRQ storms and other ugly
- * races.
- */
-
-#ifdef CONFIG_SMP
-
-#ifdef CONFIG_INTR_REMAP
-static void ir_irq_migration(struct work_struct *work);
-
-static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
-
-/*
- * Migrate the IO-APIC irq in the presence of intr-remapping.
- *
- * For edge triggered, irq migration is a simple atomic update(of vector
- * and cpu destination) of IRTE and flush the hardware cache.
- *
- * For level triggered, we need to modify the io-apic RTE aswell with the update
- * vector information, along with modifying IRTE with vector and destination.
- * So irq migration for level triggered is little  bit more complex compared to
- * edge triggered migration. But the good news is, we use the same algorithm
- * for level triggered migration as we have today, only difference being,
- * we now initiate the irq migration from process context instead of the
- * interrupt context.
- *
- * In future, when we do a directed EOI (combined with cpu EOI broadcast
- * suppression) to the IO-APIC, level triggered irq migration will also be
- * as simple as edge triggered migration and we can do the irq migration
- * with a simple atomic update to IO-APIC RTE.
- */
-static void
-migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
-{
-       struct irq_cfg *cfg;
-       struct irte irte;
-       int modify_ioapic_rte;
-       unsigned int dest;
-       unsigned long flags;
-       unsigned int irq;
-
-       if (!cpumask_intersects(mask, cpu_online_mask))
-               return;
-
-       irq = desc->irq;
-       if (get_irte(irq, &irte))
-               return;
-
-       cfg = desc->chip_data;
-       if (assign_irq_vector(irq, cfg, mask))
-               return;
-
-       set_extra_move_desc(desc, mask);
-
-       dest = cpu_mask_to_apicid_and(cfg->domain, mask);
-
-       modify_ioapic_rte = desc->status & IRQ_LEVEL;
-       if (modify_ioapic_rte) {
-               spin_lock_irqsave(&ioapic_lock, flags);
-               __target_IO_APIC_irq(irq, dest, cfg);
-               spin_unlock_irqrestore(&ioapic_lock, flags);
-       }
-
-       irte.vector = cfg->vector;
-       irte.dest_id = IRTE_DEST(dest);
-
-       /*
-        * Modified the IRTE and flushes the Interrupt entry cache.
-        */
-       modify_irte(irq, &irte);
-
-       if (cfg->move_in_progress)
-               send_cleanup_vector(cfg);
-
-       cpumask_copy(&desc->affinity, mask);
-}
-
-static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
-{
-       int ret = -1;
-       struct irq_cfg *cfg = desc->chip_data;
-
-       mask_IO_APIC_irq_desc(desc);
-
-       if (io_apic_level_ack_pending(cfg)) {
-               /*
-                * Interrupt in progress. Migrating irq now will change the
-                * vector information in the IO-APIC RTE and that will confuse
-                * the EOI broadcast performed by cpu.
-                * So, delay the irq migration to the next instance.
-                */
-               schedule_delayed_work(&ir_migration_work, 1);
-               goto unmask;
-       }
-
-       /* everthing is clear. we have right of way */
-       migrate_ioapic_irq_desc(desc, &desc->pending_mask);
-
-       ret = 0;
-       desc->status &= ~IRQ_MOVE_PENDING;
-       cpumask_clear(&desc->pending_mask);
-
-unmask:
-       unmask_IO_APIC_irq_desc(desc);
-
-       return ret;
-}
-
-static void ir_irq_migration(struct work_struct *work)
-{
-       unsigned int irq;
-       struct irq_desc *desc;
-
-       for_each_irq_desc(irq, desc) {
-               if (desc->status & IRQ_MOVE_PENDING) {
-                       unsigned long flags;
-
-                       spin_lock_irqsave(&desc->lock, flags);
-                       if (!desc->chip->set_affinity ||
-                           !(desc->status & IRQ_MOVE_PENDING)) {
-                               desc->status &= ~IRQ_MOVE_PENDING;
-                               spin_unlock_irqrestore(&desc->lock, flags);
-                               continue;
-                       }
-
-                       desc->chip->set_affinity(irq, &desc->pending_mask);
-                       spin_unlock_irqrestore(&desc->lock, flags);
-               }
-       }
-}
-
-/*
- * Migrates the IRQ destination in the process context.
- */
-static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
-                                           const struct cpumask *mask)
-{
-       if (desc->status & IRQ_LEVEL) {
-               desc->status |= IRQ_MOVE_PENDING;
-               cpumask_copy(&desc->pending_mask, mask);
-               migrate_irq_remapped_level_desc(desc);
-               return;
-       }
-
-       migrate_ioapic_irq_desc(desc, mask);
-}
-static void set_ir_ioapic_affinity_irq(unsigned int irq,
-                                      const struct cpumask *mask)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       set_ir_ioapic_affinity_irq_desc(desc, mask);
-}
-#endif
-
-asmlinkage void smp_irq_move_cleanup_interrupt(void)
-{
-       unsigned vector, me;
-
-       ack_APIC_irq();
-       exit_idle();
-       irq_enter();
-
-       me = smp_processor_id();
-       for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-               unsigned int irq;
-               struct irq_desc *desc;
-               struct irq_cfg *cfg;
-               irq = __get_cpu_var(vector_irq)[vector];
-
-               if (irq == -1)
-                       continue;
-
-               desc = irq_to_desc(irq);
-               if (!desc)
-                       continue;
-
-               cfg = irq_cfg(irq);
-               spin_lock(&desc->lock);
-               if (!cfg->move_cleanup_count)
-                       goto unlock;
-
-               if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
-                       goto unlock;
-
-               __get_cpu_var(vector_irq)[vector] = -1;
-               cfg->move_cleanup_count--;
-unlock:
-               spin_unlock(&desc->lock);
-       }
-
-       irq_exit();
-}
-
-static void irq_complete_move(struct irq_desc **descp)
-{
-       struct irq_desc *desc = *descp;
-       struct irq_cfg *cfg = desc->chip_data;
-       unsigned vector, me;
-
-       if (likely(!cfg->move_in_progress)) {
-#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
-               if (likely(!cfg->move_desc_pending))
-                       return;
-
-               /* domain has not changed, but affinity did */
-               me = smp_processor_id();
-               if (cpu_isset(me, desc->affinity)) {
-                       *descp = desc = move_irq_desc(desc, me);
-                       /* get the new one */
-                       cfg = desc->chip_data;
-                       cfg->move_desc_pending = 0;
-               }
-#endif
-               return;
-       }
-
-       vector = ~get_irq_regs()->orig_ax;
-       me = smp_processor_id();
-
-       if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) {
-#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
-               *descp = desc = move_irq_desc(desc, me);
-               /* get the new one */
-               cfg = desc->chip_data;
-#endif
-               send_cleanup_vector(cfg);
-       }
-}
-#else
-static inline void irq_complete_move(struct irq_desc **descp) {}
-#endif
-
-#ifdef CONFIG_INTR_REMAP
-static void ack_x2apic_level(unsigned int irq)
-{
-       ack_x2APIC_irq();
-}
-
-static void ack_x2apic_edge(unsigned int irq)
-{
-       ack_x2APIC_irq();
-}
-
-#endif
-
-static void ack_apic_edge(unsigned int irq)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-
-       irq_complete_move(&desc);
-       move_native_irq(irq);
-       ack_APIC_irq();
-}
-
-atomic_t irq_mis_count;
-
-static void ack_apic_level(unsigned int irq)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-
-#ifdef CONFIG_X86_32
-       unsigned long v;
-       int i;
-#endif
-       struct irq_cfg *cfg;
-       int do_unmask_irq = 0;
-
-       irq_complete_move(&desc);
-#ifdef CONFIG_GENERIC_PENDING_IRQ
-       /* If we are moving the irq we need to mask it */
-       if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
-               do_unmask_irq = 1;
-               mask_IO_APIC_irq_desc(desc);
-       }
-#endif
-
-#ifdef CONFIG_X86_32
-       /*
-       * It appears there is an erratum which affects at least version 0x11
-       * of I/O APIC (that's the 82093AA and cores integrated into various
-       * chipsets).  Under certain conditions a level-triggered interrupt is
-       * erroneously delivered as edge-triggered one but the respective IRR
-       * bit gets set nevertheless.  As a result the I/O unit expects an EOI
-       * message but it will never arrive and further interrupts are blocked
-       * from the source.  The exact reason is so far unknown, but the
-       * phenomenon was observed when two consecutive interrupt requests
-       * from a given source get delivered to the same CPU and the source is
-       * temporarily disabled in between.
-       *
-       * A workaround is to simulate an EOI message manually.  We achieve it
-       * by setting the trigger mode to edge and then to level when the edge
-       * trigger mode gets detected in the TMR of a local APIC for a
-       * level-triggered interrupt.  We mask the source for the time of the
-       * operation to prevent an edge-triggered interrupt escaping meanwhile.
-       * The idea is from Manfred Spraul.  --macro
-       */
-       cfg = desc->chip_data;
-       i = cfg->vector;
-
-       v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
-#endif
-
-       /*
-        * We must acknowledge the irq before we move it or the acknowledge will
-        * not propagate properly.
-        */
-       ack_APIC_irq();
-
-       /* Now we can move and renable the irq */
-       if (unlikely(do_unmask_irq)) {
-               /* Only migrate the irq if the ack has been received.
-                *
-                * On rare occasions the broadcast level triggered ack gets
-                * delayed going to ioapics, and if we reprogram the
-                * vector while Remote IRR is still set the irq will never
-                * fire again.
-                *
-                * To prevent this scenario we read the Remote IRR bit
-                * of the ioapic.  This has two effects.
-                * - On any sane system the read of the ioapic will
-                *   flush writes (and acks) going to the ioapic from
-                *   this cpu.
-                * - We get to see if the ACK has actually been delivered.
-                *
-                * Based on failed experiments of reprogramming the
-                * ioapic entry from outside of irq context starting
-                * with masking the ioapic entry and then polling until
-                * Remote IRR was clear before reprogramming the
-                * ioapic I don't trust the Remote IRR bit to be
-                * completey accurate.
-                *
-                * However there appears to be no other way to plug
-                * this race, so if the Remote IRR bit is not
-                * accurate and is causing problems then it is a hardware bug
-                * and you can go talk to the chipset vendor about it.
-                */
-               cfg = desc->chip_data;
-               if (!io_apic_level_ack_pending(cfg))
-                       move_masked_irq(irq);
-               unmask_IO_APIC_irq_desc(desc);
-       }
-
-#ifdef CONFIG_X86_32
-       if (!(v & (1 << (i & 0x1f)))) {
-               atomic_inc(&irq_mis_count);
-               spin_lock(&ioapic_lock);
-               __mask_and_edge_IO_APIC_irq(cfg);
-               __unmask_and_level_IO_APIC_irq(cfg);
-               spin_unlock(&ioapic_lock);
-       }
-#endif
-}
-
-static struct irq_chip ioapic_chip __read_mostly = {
-       .name           = "IO-APIC",
-       .startup        = startup_ioapic_irq,
-       .mask           = mask_IO_APIC_irq,
-       .unmask         = unmask_IO_APIC_irq,
-       .ack            = ack_apic_edge,
-       .eoi            = ack_apic_level,
-#ifdef CONFIG_SMP
-       .set_affinity   = set_ioapic_affinity_irq,
-#endif
-       .retrigger      = ioapic_retrigger_irq,
-};
-
-#ifdef CONFIG_INTR_REMAP
-static struct irq_chip ir_ioapic_chip __read_mostly = {
-       .name           = "IR-IO-APIC",
-       .startup        = startup_ioapic_irq,
-       .mask           = mask_IO_APIC_irq,
-       .unmask         = unmask_IO_APIC_irq,
-       .ack            = ack_x2apic_edge,
-       .eoi            = ack_x2apic_level,
-#ifdef CONFIG_SMP
-       .set_affinity   = set_ir_ioapic_affinity_irq,
-#endif
-       .retrigger      = ioapic_retrigger_irq,
-};
-#endif
-
-static inline void init_IO_APIC_traps(void)
-{
-       int irq;
-       struct irq_desc *desc;
-       struct irq_cfg *cfg;
-
-       /*
-        * NOTE! The local APIC isn't very good at handling
-        * multiple interrupts at the same interrupt level.
-        * As the interrupt level is determined by taking the
-        * vector number and shifting that right by 4, we
-        * want to spread these out a bit so that they don't
-        * all fall in the same interrupt level.
-        *
-        * Also, we've got to be careful not to trash gate
-        * 0x80, because int 0x80 is hm, kind of importantish. ;)
-        */
-       for_each_irq_desc(irq, desc) {
-               cfg = desc->chip_data;
-               if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
-                       /*
-                        * Hmm.. We don't have an entry for this,
-                        * so default to an old-fashioned 8259
-                        * interrupt if we can..
-                        */
-                       if (irq < NR_IRQS_LEGACY)
-                               make_8259A_irq(irq);
-                       else
-                               /* Strange. Oh, well.. */
-                               desc->chip = &no_irq_chip;
-               }
-       }
-}
-
-/*
- * The local APIC irq-chip implementation:
- */
-
-static void mask_lapic_irq(unsigned int irq)
-{
-       unsigned long v;
-
-       v = apic_read(APIC_LVT0);
-       apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
-}
-
-static void unmask_lapic_irq(unsigned int irq)
-{
-       unsigned long v;
-
-       v = apic_read(APIC_LVT0);
-       apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
-}
-
-static void ack_lapic_irq(unsigned int irq)
-{
-       ack_APIC_irq();
-}
-
-static struct irq_chip lapic_chip __read_mostly = {
-       .name           = "local-APIC",
-       .mask           = mask_lapic_irq,
-       .unmask         = unmask_lapic_irq,
-       .ack            = ack_lapic_irq,
-};
-
-static void lapic_register_intr(int irq, struct irq_desc *desc)
-{
-       desc->status &= ~IRQ_LEVEL;
-       set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
-                                     "edge");
-}
-
-static void __init setup_nmi(void)
-{
-       /*
-        * Dirty trick to enable the NMI watchdog ...
-        * We put the 8259A master into AEOI mode and
-        * unmask on all local APICs LVT0 as NMI.
-        *
-        * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
-        * is from Maciej W. Rozycki - so we do not have to EOI from
-        * the NMI handler or the timer interrupt.
-        */
-       apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
-
-       enable_NMI_through_LVT0();
-
-       apic_printk(APIC_VERBOSE, " done.\n");
-}
-
-/*
- * This looks a bit hackish but it's about the only one way of sending
- * a few INTA cycles to 8259As and any associated glue logic.  ICR does
- * not support the ExtINT mode, unfortunately.  We need to send these
- * cycles as some i82489DX-based boards have glue logic that keeps the
- * 8259A interrupt line asserted until INTA.  --macro
- */
-static inline void __init unlock_ExtINT_logic(void)
-{
-       int apic, pin, i;
-       struct IO_APIC_route_entry entry0, entry1;
-       unsigned char save_control, save_freq_select;
-
-       pin  = find_isa_irq_pin(8, mp_INT);
-       if (pin == -1) {
-               WARN_ON_ONCE(1);
-               return;
-       }
-       apic = find_isa_irq_apic(8, mp_INT);
-       if (apic == -1) {
-               WARN_ON_ONCE(1);
-               return;
-       }
-
-       entry0 = ioapic_read_entry(apic, pin);
-       clear_IO_APIC_pin(apic, pin);
-
-       memset(&entry1, 0, sizeof(entry1));
-
-       entry1.dest_mode = 0;                   /* physical delivery */
-       entry1.mask = 0;                        /* unmask IRQ now */
-       entry1.dest = hard_smp_processor_id();
-       entry1.delivery_mode = dest_ExtINT;
-       entry1.polarity = entry0.polarity;
-       entry1.trigger = 0;
-       entry1.vector = 0;
-
-       ioapic_write_entry(apic, pin, entry1);
-
-       save_control = CMOS_READ(RTC_CONTROL);
-       save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
-       CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
-                  RTC_FREQ_SELECT);
-       CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
-
-       i = 100;
-       while (i-- > 0) {
-               mdelay(10);
-               if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
-                       i -= 10;
-       }
-
-       CMOS_WRITE(save_control, RTC_CONTROL);
-       CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
-       clear_IO_APIC_pin(apic, pin);
-
-       ioapic_write_entry(apic, pin, entry0);
-}
-
-static int disable_timer_pin_1 __initdata;
-/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
-static int __init disable_timer_pin_setup(char *arg)
-{
-       disable_timer_pin_1 = 1;
-       return 0;
-}
-early_param("disable_timer_pin_1", disable_timer_pin_setup);
-
-int timer_through_8259 __initdata;
-
-/*
- * This code may look a bit paranoid, but it's supposed to cooperate with
- * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
- * is so screwy.  Thanks to Brian Perkins for testing/hacking this beast
- * fanatically on his truly buggy board.
- *
- * FIXME: really need to revamp this for all platforms.
- */
-static inline void __init check_timer(void)
-{
-       struct irq_desc *desc = irq_to_desc(0);
-       struct irq_cfg *cfg = desc->chip_data;
-       int cpu = boot_cpu_id;
-       int apic1, pin1, apic2, pin2;
-       unsigned long flags;
-       unsigned int ver;
-       int no_pin1 = 0;
-
-       local_irq_save(flags);
-
-       ver = apic_read(APIC_LVR);
-       ver = GET_APIC_VERSION(ver);
-
-       /*
-        * get/set the timer IRQ vector:
-        */
-       disable_8259A_irq(0);
-       assign_irq_vector(0, cfg, TARGET_CPUS);
-
-       /*
-        * As IRQ0 is to be enabled in the 8259A, the virtual
-        * wire has to be disabled in the local APIC.  Also
-        * timer interrupts need to be acknowledged manually in
-        * the 8259A for the i82489DX when using the NMI
-        * watchdog as that APIC treats NMIs as level-triggered.
-        * The AEOI mode will finish them in the 8259A
-        * automatically.
-        */
-       apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
-       init_8259A(1);
-#ifdef CONFIG_X86_32
-       timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
-#endif
-
-       pin1  = find_isa_irq_pin(0, mp_INT);
-       apic1 = find_isa_irq_apic(0, mp_INT);
-       pin2  = ioapic_i8259.pin;
-       apic2 = ioapic_i8259.apic;
-
-       apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
-                   "apic1=%d pin1=%d apic2=%d pin2=%d\n",
-                   cfg->vector, apic1, pin1, apic2, pin2);
-
-       /*
-        * Some BIOS writers are clueless and report the ExtINTA
-        * I/O APIC input from the cascaded 8259A as the timer
-        * interrupt input.  So just in case, if only one pin
-        * was found above, try it both directly and through the
-        * 8259A.
-        */
-       if (pin1 == -1) {
-#ifdef CONFIG_INTR_REMAP
-               if (intr_remapping_enabled)
-                       panic("BIOS bug: timer not connected to IO-APIC");
-#endif
-               pin1 = pin2;
-               apic1 = apic2;
-               no_pin1 = 1;
-       } else if (pin2 == -1) {
-               pin2 = pin1;
-               apic2 = apic1;
-       }
-
-       if (pin1 != -1) {
-               /*
-                * Ok, does IRQ0 through the IOAPIC work?
-                */
-               if (no_pin1) {
-                       add_pin_to_irq_cpu(cfg, cpu, apic1, pin1);
-                       setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
-               }
-               unmask_IO_APIC_irq_desc(desc);
-               if (timer_irq_works()) {
-                       if (nmi_watchdog == NMI_IO_APIC) {
-                               setup_nmi();
-                               enable_8259A_irq(0);
-                       }
-                       if (disable_timer_pin_1 > 0)
-                               clear_IO_APIC_pin(0, pin1);
-                       goto out;
-               }
-#ifdef CONFIG_INTR_REMAP
-               if (intr_remapping_enabled)
-                       panic("timer doesn't work through Interrupt-remapped IO-APIC");
-#endif
-               clear_IO_APIC_pin(apic1, pin1);
-               if (!no_pin1)
-                       apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
-                                   "8254 timer not connected to IO-APIC\n");
-
-               apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
-                           "(IRQ0) through the 8259A ...\n");
-               apic_printk(APIC_QUIET, KERN_INFO
-                           "..... (found apic %d pin %d) ...\n", apic2, pin2);
-               /*
-                * legacy devices should be connected to IO APIC #0
-                */
-               replace_pin_at_irq_cpu(cfg, cpu, apic1, pin1, apic2, pin2);
-               setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
-               unmask_IO_APIC_irq_desc(desc);
-               enable_8259A_irq(0);
-               if (timer_irq_works()) {
-                       apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
-                       timer_through_8259 = 1;
-                       if (nmi_watchdog == NMI_IO_APIC) {
-                               disable_8259A_irq(0);
-                               setup_nmi();
-                               enable_8259A_irq(0);
-                       }
-                       goto out;
-               }
-               /*
-                * Cleanup, just in case ...
-                */
-               disable_8259A_irq(0);
-               clear_IO_APIC_pin(apic2, pin2);
-               apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
-       }
-
-       if (nmi_watchdog == NMI_IO_APIC) {
-               apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
-                           "through the IO-APIC - disabling NMI Watchdog!\n");
-               nmi_watchdog = NMI_NONE;
-       }
-#ifdef CONFIG_X86_32
-       timer_ack = 0;
-#endif
-
-       apic_printk(APIC_QUIET, KERN_INFO
-                   "...trying to set up timer as Virtual Wire IRQ...\n");
-
-       lapic_register_intr(0, desc);
-       apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector);     /* Fixed mode */
-       enable_8259A_irq(0);
-
-       if (timer_irq_works()) {
-               apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
-               goto out;
-       }
-       disable_8259A_irq(0);
-       apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
-       apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
-
-       apic_printk(APIC_QUIET, KERN_INFO
-                   "...trying to set up timer as ExtINT IRQ...\n");
-
-       init_8259A(0);
-       make_8259A_irq(0);
-       apic_write(APIC_LVT0, APIC_DM_EXTINT);
-
-       unlock_ExtINT_logic();
-
-       if (timer_irq_works()) {
-               apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
-               goto out;
-       }
-       apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
-       panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
-               "report.  Then try booting with the 'noapic' option.\n");
-out:
-       local_irq_restore(flags);
-}
-
-/*
- * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
- * to devices.  However there may be an I/O APIC pin available for
- * this interrupt regardless.  The pin may be left unconnected, but
- * typically it will be reused as an ExtINT cascade interrupt for
- * the master 8259A.  In the MPS case such a pin will normally be
- * reported as an ExtINT interrupt in the MP table.  With ACPI
- * there is no provision for ExtINT interrupts, and in the absence
- * of an override it would be treated as an ordinary ISA I/O APIC
- * interrupt, that is edge-triggered and unmasked by default.  We
- * used to do this, but it caused problems on some systems because
- * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
- * the same ExtINT cascade interrupt to drive the local APIC of the
- * bootstrap processor.  Therefore we refrain from routing IRQ2 to
- * the I/O APIC in all cases now.  No actual device should request
- * it anyway.  --macro
- */
-#define PIC_IRQS       (1 << PIC_CASCADE_IR)
-
-void __init setup_IO_APIC(void)
-{
-
-#ifdef CONFIG_X86_32
-       enable_IO_APIC();
-#else
-       /*
-        * calling enable_IO_APIC() is moved to setup_local_APIC for BP
-        */
-#endif
-
-       io_apic_irqs = ~PIC_IRQS;
-
-       apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
-       /*
-         * Set up IO-APIC IRQ routing.
-         */
-#ifdef CONFIG_X86_32
-       if (!acpi_ioapic)
-               setup_ioapic_ids_from_mpc();
-#endif
-       sync_Arb_IDs();
-       setup_IO_APIC_irqs();
-       init_IO_APIC_traps();
-       check_timer();
-}
-
-/*
- *      Called after all the initialization is done. If we didnt find any
- *      APIC bugs then we can allow the modify fast path
- */
-
-static int __init io_apic_bug_finalize(void)
-{
-       if (sis_apic_bug == -1)
-               sis_apic_bug = 0;
-       return 0;
-}
-
-late_initcall(io_apic_bug_finalize);
-
-struct sysfs_ioapic_data {
-       struct sys_device dev;
-       struct IO_APIC_route_entry entry[0];
-};
-static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
-
-static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
-{
-       struct IO_APIC_route_entry *entry;
-       struct sysfs_ioapic_data *data;
-       int i;
-
-       data = container_of(dev, struct sysfs_ioapic_data, dev);
-       entry = data->entry;
-       for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
-               *entry = ioapic_read_entry(dev->id, i);
-
-       return 0;
-}
-
-static int ioapic_resume(struct sys_device *dev)
-{
-       struct IO_APIC_route_entry *entry;
-       struct sysfs_ioapic_data *data;
-       unsigned long flags;
-       union IO_APIC_reg_00 reg_00;
-       int i;
-
-       data = container_of(dev, struct sysfs_ioapic_data, dev);
-       entry = data->entry;
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       reg_00.raw = io_apic_read(dev->id, 0);
-       if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
-               reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
-               io_apic_write(dev->id, 0, reg_00.raw);
-       }
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-       for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
-               ioapic_write_entry(dev->id, i, entry[i]);
-
-       return 0;
-}
-
-static struct sysdev_class ioapic_sysdev_class = {
-       .name = "ioapic",
-       .suspend = ioapic_suspend,
-       .resume = ioapic_resume,
-};
-
-static int __init ioapic_init_sysfs(void)
-{
-       struct sys_device * dev;
-       int i, size, error;
-
-       error = sysdev_class_register(&ioapic_sysdev_class);
-       if (error)
-               return error;
-
-       for (i = 0; i < nr_ioapics; i++ ) {
-               size = sizeof(struct sys_device) + nr_ioapic_registers[i]
-                       * sizeof(struct IO_APIC_route_entry);
-               mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
-               if (!mp_ioapic_data[i]) {
-                       printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-                       continue;
-               }
-               dev = &mp_ioapic_data[i]->dev;
-               dev->id = i;
-               dev->cls = &ioapic_sysdev_class;
-               error = sysdev_register(dev);
-               if (error) {
-                       kfree(mp_ioapic_data[i]);
-                       mp_ioapic_data[i] = NULL;
-                       printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
-                       continue;
-               }
-       }
-
-       return 0;
-}
-
-device_initcall(ioapic_init_sysfs);
-
-/*
- * Dynamic irq allocate and deallocation
- */
-unsigned int create_irq_nr(unsigned int irq_want)
-{
-       /* Allocate an unused irq */
-       unsigned int irq;
-       unsigned int new;
-       unsigned long flags;
-       struct irq_cfg *cfg_new = NULL;
-       int cpu = boot_cpu_id;
-       struct irq_desc *desc_new = NULL;
-
-       irq = 0;
-       spin_lock_irqsave(&vector_lock, flags);
-       for (new = irq_want; new < NR_IRQS; new++) {
-               if (platform_legacy_irq(new))
-                       continue;
-
-               desc_new = irq_to_desc_alloc_cpu(new, cpu);
-               if (!desc_new) {
-                       printk(KERN_INFO "can not get irq_desc for %d\n", new);
-                       continue;
-               }
-               cfg_new = desc_new->chip_data;
-
-               if (cfg_new->vector != 0)
-                       continue;
-               if (__assign_irq_vector(new, cfg_new, TARGET_CPUS) == 0)
-                       irq = new;
-               break;
-       }
-       spin_unlock_irqrestore(&vector_lock, flags);
-
-       if (irq > 0) {
-               dynamic_irq_init(irq);
-               /* restore it, in case dynamic_irq_init clear it */
-               if (desc_new)
-                       desc_new->chip_data = cfg_new;
-       }
-       return irq;
-}
-
-static int nr_irqs_gsi = NR_IRQS_LEGACY;
-int create_irq(void)
-{
-       unsigned int irq_want;
-       int irq;
-
-       irq_want = nr_irqs_gsi;
-       irq = create_irq_nr(irq_want);
-
-       if (irq == 0)
-               irq = -1;
-
-       return irq;
-}
-
-void destroy_irq(unsigned int irq)
-{
-       unsigned long flags;
-       struct irq_cfg *cfg;
-       struct irq_desc *desc;
-
-       /* store it, in case dynamic_irq_cleanup clear it */
-       desc = irq_to_desc(irq);
-       cfg = desc->chip_data;
-       dynamic_irq_cleanup(irq);
-       /* connect back irq_cfg */
-       if (desc)
-               desc->chip_data = cfg;
-
-#ifdef CONFIG_INTR_REMAP
-       free_irte(irq);
-#endif
-       spin_lock_irqsave(&vector_lock, flags);
-       __clear_irq_vector(irq, cfg);
-       spin_unlock_irqrestore(&vector_lock, flags);
-}
-
-/*
- * MSI message composition
- */
-#ifdef CONFIG_PCI_MSI
-static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
-{
-       struct irq_cfg *cfg;
-       int err;
-       unsigned dest;
-
-       cfg = irq_cfg(irq);
-       err = assign_irq_vector(irq, cfg, TARGET_CPUS);
-       if (err)
-               return err;
-
-       dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
-
-#ifdef CONFIG_INTR_REMAP
-       if (irq_remapped(irq)) {
-               struct irte irte;
-               int ir_index;
-               u16 sub_handle;
-
-               ir_index = map_irq_to_irte_handle(irq, &sub_handle);
-               BUG_ON(ir_index == -1);
-
-               memset (&irte, 0, sizeof(irte));
-
-               irte.present = 1;
-               irte.dst_mode = INT_DEST_MODE;
-               irte.trigger_mode = 0; /* edge */
-               irte.dlvry_mode = INT_DELIVERY_MODE;
-               irte.vector = cfg->vector;
-               irte.dest_id = IRTE_DEST(dest);
-
-               modify_irte(irq, &irte);
-
-               msg->address_hi = MSI_ADDR_BASE_HI;
-               msg->data = sub_handle;
-               msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
-                                 MSI_ADDR_IR_SHV |
-                                 MSI_ADDR_IR_INDEX1(ir_index) |
-                                 MSI_ADDR_IR_INDEX2(ir_index);
-       } else
-#endif
-       {
-               msg->address_hi = MSI_ADDR_BASE_HI;
-               msg->address_lo =
-                       MSI_ADDR_BASE_LO |
-                       ((INT_DEST_MODE == 0) ?
-                               MSI_ADDR_DEST_MODE_PHYSICAL:
-                               MSI_ADDR_DEST_MODE_LOGICAL) |
-                       ((INT_DELIVERY_MODE != dest_LowestPrio) ?
-                               MSI_ADDR_REDIRECTION_CPU:
-                               MSI_ADDR_REDIRECTION_LOWPRI) |
-                       MSI_ADDR_DEST_ID(dest);
-
-               msg->data =
-                       MSI_DATA_TRIGGER_EDGE |
-                       MSI_DATA_LEVEL_ASSERT |
-                       ((INT_DELIVERY_MODE != dest_LowestPrio) ?
-                               MSI_DATA_DELIVERY_FIXED:
-                               MSI_DATA_DELIVERY_LOWPRI) |
-                       MSI_DATA_VECTOR(cfg->vector);
-       }
-       return err;
-}
-
-#ifdef CONFIG_SMP
-static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-       struct irq_cfg *cfg;
-       struct msi_msg msg;
-       unsigned int dest;
-
-       dest = set_desc_affinity(desc, mask);
-       if (dest == BAD_APICID)
-               return;
-
-       cfg = desc->chip_data;
-
-       read_msi_msg_desc(desc, &msg);
-
-       msg.data &= ~MSI_DATA_VECTOR_MASK;
-       msg.data |= MSI_DATA_VECTOR(cfg->vector);
-       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
-       write_msi_msg_desc(desc, &msg);
-}
-#ifdef CONFIG_INTR_REMAP
-/*
- * Migrate the MSI irq to another cpumask. This migration is
- * done in the process context using interrupt-remapping hardware.
- */
-static void
-ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-       struct irq_cfg *cfg = desc->chip_data;
-       unsigned int dest;
-       struct irte irte;
-
-       if (get_irte(irq, &irte))
-               return;
-
-       dest = set_desc_affinity(desc, mask);
-       if (dest == BAD_APICID)
-               return;
-
-       irte.vector = cfg->vector;
-       irte.dest_id = IRTE_DEST(dest);
-
-       /*
-        * atomically update the IRTE with the new destination and vector.
-        */
-       modify_irte(irq, &irte);
-
-       /*
-        * After this point, all the interrupts will start arriving
-        * at the new destination. So, time to cleanup the previous
-        * vector allocation.
-        */
-       if (cfg->move_in_progress)
-               send_cleanup_vector(cfg);
-}
-
-#endif
-#endif /* CONFIG_SMP */
-
-/*
- * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
- * which implement the MSI or MSI-X Capability Structure.
- */
-static struct irq_chip msi_chip = {
-       .name           = "PCI-MSI",
-       .unmask         = unmask_msi_irq,
-       .mask           = mask_msi_irq,
-       .ack            = ack_apic_edge,
-#ifdef CONFIG_SMP
-       .set_affinity   = set_msi_irq_affinity,
-#endif
-       .retrigger      = ioapic_retrigger_irq,
-};
-
-#ifdef CONFIG_INTR_REMAP
-static struct irq_chip msi_ir_chip = {
-       .name           = "IR-PCI-MSI",
-       .unmask         = unmask_msi_irq,
-       .mask           = mask_msi_irq,
-       .ack            = ack_x2apic_edge,
-#ifdef CONFIG_SMP
-       .set_affinity   = ir_set_msi_irq_affinity,
-#endif
-       .retrigger      = ioapic_retrigger_irq,
-};
-
-/*
- * Map the PCI dev to the corresponding remapping hardware unit
- * and allocate 'nvec' consecutive interrupt-remapping table entries
- * in it.
- */
-static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
-{
-       struct intel_iommu *iommu;
-       int index;
-
-       iommu = map_dev_to_ir(dev);
-       if (!iommu) {
-               printk(KERN_ERR
-                      "Unable to map PCI %s to iommu\n", pci_name(dev));
-               return -ENOENT;
-       }
-
-       index = alloc_irte(iommu, irq, nvec);
-       if (index < 0) {
-               printk(KERN_ERR
-                      "Unable to allocate %d IRTE for PCI %s\n", nvec,
-                      pci_name(dev));
-               return -ENOSPC;
-       }
-       return index;
-}
-#endif
-
-static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
-{
-       int ret;
-       struct msi_msg msg;
-
-       ret = msi_compose_msg(dev, irq, &msg);
-       if (ret < 0)
-               return ret;
-
-       set_irq_msi(irq, msidesc);
-       write_msi_msg(irq, &msg);
-
-#ifdef CONFIG_INTR_REMAP
-       if (irq_remapped(irq)) {
-               struct irq_desc *desc = irq_to_desc(irq);
-               /*
-                * irq migration in process context
-                */
-               desc->status |= IRQ_MOVE_PCNTXT;
-               set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
-       } else
-#endif
-               set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
-
-       dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
-
-       return 0;
-}
-
-int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc)
-{
-       unsigned int irq;
-       int ret;
-       unsigned int irq_want;
-
-       irq_want = nr_irqs_gsi;
-       irq = create_irq_nr(irq_want);
-       if (irq == 0)
-               return -1;
-
-#ifdef CONFIG_INTR_REMAP
-       if (!intr_remapping_enabled)
-               goto no_ir;
-
-       ret = msi_alloc_irte(dev, irq, 1);
-       if (ret < 0)
-               goto error;
-no_ir:
-#endif
-       ret = setup_msi_irq(dev, msidesc, irq);
-       if (ret < 0) {
-               destroy_irq(irq);
-               return ret;
-       }
-       return 0;
-
-#ifdef CONFIG_INTR_REMAP
-error:
-       destroy_irq(irq);
-       return ret;
-#endif
-}
-
-int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
-       unsigned int irq;
-       int ret, sub_handle;
-       struct msi_desc *msidesc;
-       unsigned int irq_want;
-
-#ifdef CONFIG_INTR_REMAP
-       struct intel_iommu *iommu = 0;
-       int index = 0;
-#endif
-
-       irq_want = nr_irqs_gsi;
-       sub_handle = 0;
-       list_for_each_entry(msidesc, &dev->msi_list, list) {
-               irq = create_irq_nr(irq_want);
-               irq_want++;
-               if (irq == 0)
-                       return -1;
-#ifdef CONFIG_INTR_REMAP
-               if (!intr_remapping_enabled)
-                       goto no_ir;
-
-               if (!sub_handle) {
-                       /*
-                        * allocate the consecutive block of IRTE's
-                        * for 'nvec'
-                        */
-                       index = msi_alloc_irte(dev, irq, nvec);
-                       if (index < 0) {
-                               ret = index;
-                               goto error;
-                       }
-               } else {
-                       iommu = map_dev_to_ir(dev);
-                       if (!iommu) {
-                               ret = -ENOENT;
-                               goto error;
-                       }
-                       /*
-                        * setup the mapping between the irq and the IRTE
-                        * base index, the sub_handle pointing to the
-                        * appropriate interrupt remap table entry.
-                        */
-                       set_irte_irq(irq, iommu, index, sub_handle);
-               }
-no_ir:
-#endif
-               ret = setup_msi_irq(dev, msidesc, irq);
-               if (ret < 0)
-                       goto error;
-               sub_handle++;
-       }
-       return 0;
-
-error:
-       destroy_irq(irq);
-       return ret;
-}
-
-void arch_teardown_msi_irq(unsigned int irq)
-{
-       destroy_irq(irq);
-}
-
-#ifdef CONFIG_DMAR
-#ifdef CONFIG_SMP
-static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-       struct irq_cfg *cfg;
-       struct msi_msg msg;
-       unsigned int dest;
-
-       dest = set_desc_affinity(desc, mask);
-       if (dest == BAD_APICID)
-               return;
-
-       cfg = desc->chip_data;
-
-       dmar_msi_read(irq, &msg);
-
-       msg.data &= ~MSI_DATA_VECTOR_MASK;
-       msg.data |= MSI_DATA_VECTOR(cfg->vector);
-       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
-       dmar_msi_write(irq, &msg);
-}
-
-#endif /* CONFIG_SMP */
-
-struct irq_chip dmar_msi_type = {
-       .name = "DMAR_MSI",
-       .unmask = dmar_msi_unmask,
-       .mask = dmar_msi_mask,
-       .ack = ack_apic_edge,
-#ifdef CONFIG_SMP
-       .set_affinity = dmar_msi_set_affinity,
-#endif
-       .retrigger = ioapic_retrigger_irq,
-};
-
-int arch_setup_dmar_msi(unsigned int irq)
-{
-       int ret;
-       struct msi_msg msg;
-
-       ret = msi_compose_msg(NULL, irq, &msg);
-       if (ret < 0)
-               return ret;
-       dmar_msi_write(irq, &msg);
-       set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
-               "edge");
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_HPET_TIMER
-
-#ifdef CONFIG_SMP
-static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-       struct irq_cfg *cfg;
-       struct msi_msg msg;
-       unsigned int dest;
-
-       dest = set_desc_affinity(desc, mask);
-       if (dest == BAD_APICID)
-               return;
-
-       cfg = desc->chip_data;
-
-       hpet_msi_read(irq, &msg);
-
-       msg.data &= ~MSI_DATA_VECTOR_MASK;
-       msg.data |= MSI_DATA_VECTOR(cfg->vector);
-       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
-       hpet_msi_write(irq, &msg);
-}
-
-#endif /* CONFIG_SMP */
-
-struct irq_chip hpet_msi_type = {
-       .name = "HPET_MSI",
-       .unmask = hpet_msi_unmask,
-       .mask = hpet_msi_mask,
-       .ack = ack_apic_edge,
-#ifdef CONFIG_SMP
-       .set_affinity = hpet_msi_set_affinity,
-#endif
-       .retrigger = ioapic_retrigger_irq,
-};
-
-int arch_setup_hpet_msi(unsigned int irq)
-{
-       int ret;
-       struct msi_msg msg;
-
-       ret = msi_compose_msg(NULL, irq, &msg);
-       if (ret < 0)
-               return ret;
-
-       hpet_msi_write(irq, &msg);
-       set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
-               "edge");
-
-       return 0;
-}
-#endif
-
-#endif /* CONFIG_PCI_MSI */
-/*
- * Hypertransport interrupt support
- */
-#ifdef CONFIG_HT_IRQ
-
-#ifdef CONFIG_SMP
-
-static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
-{
-       struct ht_irq_msg msg;
-       fetch_ht_irq_msg(irq, &msg);
-
-       msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
-       msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
-
-       msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
-       msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
-
-       write_ht_irq_msg(irq, &msg);
-}
-
-static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
-{
-       struct irq_desc *desc = irq_to_desc(irq);
-       struct irq_cfg *cfg;
-       unsigned int dest;
-
-       dest = set_desc_affinity(desc, mask);
-       if (dest == BAD_APICID)
-               return;
-
-       cfg = desc->chip_data;
-
-       target_ht_irq(irq, dest, cfg->vector);
-}
-
-#endif
-
-static struct irq_chip ht_irq_chip = {
-       .name           = "PCI-HT",
-       .mask           = mask_ht_irq,
-       .unmask         = unmask_ht_irq,
-       .ack            = ack_apic_edge,
-#ifdef CONFIG_SMP
-       .set_affinity   = set_ht_irq_affinity,
-#endif
-       .retrigger      = ioapic_retrigger_irq,
-};
-
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
-{
-       struct irq_cfg *cfg;
-       int err;
-
-       cfg = irq_cfg(irq);
-       err = assign_irq_vector(irq, cfg, TARGET_CPUS);
-       if (!err) {
-               struct ht_irq_msg msg;
-               unsigned dest;
-
-               dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
-
-               msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
-
-               msg.address_lo =
-                       HT_IRQ_LOW_BASE |
-                       HT_IRQ_LOW_DEST_ID(dest) |
-                       HT_IRQ_LOW_VECTOR(cfg->vector) |
-                       ((INT_DEST_MODE == 0) ?
-                               HT_IRQ_LOW_DM_PHYSICAL :
-                               HT_IRQ_LOW_DM_LOGICAL) |
-                       HT_IRQ_LOW_RQEOI_EDGE |
-                       ((INT_DELIVERY_MODE != dest_LowestPrio) ?
-                               HT_IRQ_LOW_MT_FIXED :
-                               HT_IRQ_LOW_MT_ARBITRATED) |
-                       HT_IRQ_LOW_IRQ_MASKED;
-
-               write_ht_irq_msg(irq, &msg);
-
-               set_irq_chip_and_handler_name(irq, &ht_irq_chip,
-                                             handle_edge_irq, "edge");
-
-               dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
-       }
-       return err;
-}
-#endif /* CONFIG_HT_IRQ */
-
-#ifdef CONFIG_X86_64
-/*
- * Re-target the irq to the specified CPU and enable the specified MMR located
- * on the specified blade to allow the sending of MSIs to the specified CPU.
- */
-int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
-                      unsigned long mmr_offset)
-{
-       const struct cpumask *eligible_cpu = cpumask_of(cpu);
-       struct irq_cfg *cfg;
-       int mmr_pnode;
-       unsigned long mmr_value;
-       struct uv_IO_APIC_route_entry *entry;
-       unsigned long flags;
-       int err;
-
-       cfg = irq_cfg(irq);
-
-       err = assign_irq_vector(irq, cfg, eligible_cpu);
-       if (err != 0)
-               return err;
-
-       spin_lock_irqsave(&vector_lock, flags);
-       set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
-                                     irq_name);
-       spin_unlock_irqrestore(&vector_lock, flags);
-
-       mmr_value = 0;
-       entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
-       BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
-
-       entry->vector = cfg->vector;
-       entry->delivery_mode = INT_DELIVERY_MODE;
-       entry->dest_mode = INT_DEST_MODE;
-       entry->polarity = 0;
-       entry->trigger = 0;
-       entry->mask = 0;
-       entry->dest = cpu_mask_to_apicid(eligible_cpu);
-
-       mmr_pnode = uv_blade_to_pnode(mmr_blade);
-       uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
-
-       return irq;
-}
-
-/*
- * Disable the specified MMR located on the specified blade so that MSIs are
- * longer allowed to be sent.
- */
-void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
-{
-       unsigned long mmr_value;
-       struct uv_IO_APIC_route_entry *entry;
-       int mmr_pnode;
-
-       mmr_value = 0;
-       entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
-       BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
-
-       entry->mask = 1;
-
-       mmr_pnode = uv_blade_to_pnode(mmr_blade);
-       uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
-}
-#endif /* CONFIG_X86_64 */
-
-int __init io_apic_get_redir_entries (int ioapic)
-{
-       union IO_APIC_reg_01    reg_01;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       reg_01.raw = io_apic_read(ioapic, 1);
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-
-       return reg_01.bits.entries;
-}
-
-void __init probe_nr_irqs_gsi(void)
-{
-       int nr = 0;
-
-       nr = acpi_probe_gsi();
-       if (nr > nr_irqs_gsi) {
-               nr_irqs_gsi = nr;
-       } else {
-               /* for acpi=off or acpi is not compiled in */
-               int idx;
-
-               nr = 0;
-               for (idx = 0; idx < nr_ioapics; idx++)
-                       nr += io_apic_get_redir_entries(idx) + 1;
-
-               if (nr > nr_irqs_gsi)
-                       nr_irqs_gsi = nr;
-       }
-
-       printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
-}
-
-/* --------------------------------------------------------------------------
-                          ACPI-based IOAPIC Configuration
-   -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI
-
-#ifdef CONFIG_X86_32
-int __init io_apic_get_unique_id(int ioapic, int apic_id)
-{
-       union IO_APIC_reg_00 reg_00;
-       static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
-       physid_mask_t tmp;
-       unsigned long flags;
-       int i = 0;
-
-       /*
-        * The P4 platform supports up to 256 APIC IDs on two separate APIC
-        * buses (one for LAPICs, one for IOAPICs), where predecessors only
-        * supports up to 16 on one shared APIC bus.
-        *
-        * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
-        *      advantage of new APIC bus architecture.
-        */
-
-       if (physids_empty(apic_id_map))
-               apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       reg_00.raw = io_apic_read(ioapic, 0);
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-
-       if (apic_id >= get_physical_broadcast()) {
-               printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
-                       "%d\n", ioapic, apic_id, reg_00.bits.ID);
-               apic_id = reg_00.bits.ID;
-       }
-
-       /*
-        * Every APIC in a system must have a unique ID or we get lots of nice
-        * 'stuck on smp_invalidate_needed IPI wait' messages.
-        */
-       if (check_apicid_used(apic_id_map, apic_id)) {
-
-               for (i = 0; i < get_physical_broadcast(); i++) {
-                       if (!check_apicid_used(apic_id_map, i))
-                               break;
-               }
-
-               if (i == get_physical_broadcast())
-                       panic("Max apic_id exceeded!\n");
-
-               printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
-                       "trying %d\n", ioapic, apic_id, i);
-
-               apic_id = i;
-       }
-
-       tmp = apicid_to_cpu_present(apic_id);
-       physids_or(apic_id_map, apic_id_map, tmp);
-
-       if (reg_00.bits.ID != apic_id) {
-               reg_00.bits.ID = apic_id;
-
-               spin_lock_irqsave(&ioapic_lock, flags);
-               io_apic_write(ioapic, 0, reg_00.raw);
-               reg_00.raw = io_apic_read(ioapic, 0);
-               spin_unlock_irqrestore(&ioapic_lock, flags);
-
-               /* Sanity check */
-               if (reg_00.bits.ID != apic_id) {
-                       printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
-                       return -1;
-               }
-       }
-
-       apic_printk(APIC_VERBOSE, KERN_INFO
-                       "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
-
-       return apic_id;
-}
-
-int __init io_apic_get_version(int ioapic)
-{
-       union IO_APIC_reg_01    reg_01;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ioapic_lock, flags);
-       reg_01.raw = io_apic_read(ioapic, 1);
-       spin_unlock_irqrestore(&ioapic_lock, flags);
-
-       return reg_01.bits.version;
-}
-#endif
-
-int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
-{
-       struct irq_desc *desc;
-       struct irq_cfg *cfg;
-       int cpu = boot_cpu_id;
-
-       if (!IO_APIC_IRQ(irq)) {
-               apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
-                       ioapic);
-               return -EINVAL;
-       }
-
-       desc = irq_to_desc_alloc_cpu(irq, cpu);
-       if (!desc) {
-               printk(KERN_INFO "can not get irq_desc %d\n", irq);
-               return 0;
-       }
-
-       /*
-        * IRQs < 16 are already in the irq_2_pin[] map
-        */
-       if (irq >= NR_IRQS_LEGACY) {
-               cfg = desc->chip_data;
-               add_pin_to_irq_cpu(cfg, cpu, ioapic, pin);
-       }
-
-       setup_IO_APIC_irq(ioapic, pin, irq, desc, triggering, polarity);
-
-       return 0;
-}
-
-
-int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
-{
-       int i;
-
-       if (skip_ioapic_setup)
-               return -1;
-
-       for (i = 0; i < mp_irq_entries; i++)
-               if (mp_irqs[i].mp_irqtype == mp_INT &&
-                   mp_irqs[i].mp_srcbusirq == bus_irq)
-                       break;
-       if (i >= mp_irq_entries)
-               return -1;
-
-       *trigger = irq_trigger(i);
-       *polarity = irq_polarity(i);
-       return 0;
-}
-
-#endif /* CONFIG_ACPI */
-
-/*
- * This function currently is only a helper for the i386 smp boot process where
- * we need to reprogram the ioredtbls to cater for the cpus which have come online
- * so mask in all cases should simply be TARGET_CPUS
- */
-#ifdef CONFIG_SMP
-void __init setup_ioapic_dest(void)
-{
-       int pin, ioapic, irq, irq_entry;
-       struct irq_desc *desc;
-       struct irq_cfg *cfg;
-       const struct cpumask *mask;
-
-       if (skip_ioapic_setup == 1)
-               return;
-
-       for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
-               for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
-                       irq_entry = find_irq_entry(ioapic, pin, mp_INT);
-                       if (irq_entry == -1)
-                               continue;
-                       irq = pin_2_irq(irq_entry, ioapic, pin);
-
-                       /* setup_IO_APIC_irqs could fail to get vector for some device
-                        * when you have too many devices, because at that time only boot
-                        * cpu is online.
-                        */
-                       desc = irq_to_desc(irq);
-                       cfg = desc->chip_data;
-                       if (!cfg->vector) {
-                               setup_IO_APIC_irq(ioapic, pin, irq, desc,
-                                                 irq_trigger(irq_entry),
-                                                 irq_polarity(irq_entry));
-                               continue;
-
-                       }
-
-                       /*
-                        * Honour affinities which have been set in early boot
-                        */
-                       if (desc->status &
-                           (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
-                               mask = &desc->affinity;
-                       else
-                               mask = TARGET_CPUS;
-
-#ifdef CONFIG_INTR_REMAP
-                       if (intr_remapping_enabled)
-                               set_ir_ioapic_affinity_irq_desc(desc, mask);
-                       else
-#endif
-                               set_ioapic_affinity_irq_desc(desc, mask);
-               }
-
-       }
-}
-#endif
-
-#define IOAPIC_RESOURCE_NAME_SIZE 11
-
-static struct resource *ioapic_resources;
-
-static struct resource * __init ioapic_setup_resources(void)
-{
-       unsigned long n;
-       struct resource *res;
-       char *mem;
-       int i;
-
-       if (nr_ioapics <= 0)
-               return NULL;
-
-       n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
-       n *= nr_ioapics;
-
-       mem = alloc_bootmem(n);
-       res = (void *)mem;
-
-       if (mem != NULL) {
-               mem += sizeof(struct resource) * nr_ioapics;
-
-               for (i = 0; i < nr_ioapics; i++) {
-                       res[i].name = mem;
-                       res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-                       sprintf(mem,  "IOAPIC %u", i);
-                       mem += IOAPIC_RESOURCE_NAME_SIZE;
-               }
-       }
-
-       ioapic_resources = res;
-
-       return res;
-}
-
-void __init ioapic_init_mappings(void)
-{
-       unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
-       struct resource *ioapic_res;
-       int i;
-
-       ioapic_res = ioapic_setup_resources();
-       for (i = 0; i < nr_ioapics; i++) {
-               if (smp_found_config) {
-                       ioapic_phys = mp_ioapics[i].mp_apicaddr;
-#ifdef CONFIG_X86_32
-                       if (!ioapic_phys) {
-                               printk(KERN_ERR
-                                      "WARNING: bogus zero IO-APIC "
-                                      "address found in MPTABLE, "
-                                      "disabling IO/APIC support!\n");
-                               smp_found_config = 0;
-                               skip_ioapic_setup = 1;
-                               goto fake_ioapic_page;
-                       }
-#endif
-               } else {
-#ifdef CONFIG_X86_32
-fake_ioapic_page:
-#endif
-                       ioapic_phys = (unsigned long)
-                               alloc_bootmem_pages(PAGE_SIZE);
-                       ioapic_phys = __pa(ioapic_phys);
-               }
-               set_fixmap_nocache(idx, ioapic_phys);
-               apic_printk(APIC_VERBOSE,
-                           "mapped IOAPIC to %08lx (%08lx)\n",
-                           __fix_to_virt(idx), ioapic_phys);
-               idx++;
-
-               if (ioapic_res != NULL) {
-                       ioapic_res->start = ioapic_phys;
-                       ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
-                       ioapic_res++;
-               }
-       }
-}
-
-static int __init ioapic_insert_resources(void)
-{
-       int i;
-       struct resource *r = ioapic_resources;
-
-       if (!r) {
-               printk(KERN_ERR
-                      "IO APIC resources could be not be allocated.\n");
-               return -1;
-       }
-
-       for (i = 0; i < nr_ioapics; i++) {
-               insert_resource(&iomem_resource, r);
-               r++;
-       }
-
-       return 0;
-}
-
-/* Insert the IO APIC resources after PCI initialization has occured to handle
- * IO APICS that are mapped in on a BAR in PCI space. */
-late_initcall(ioapic_insert_resources);
index b12208f4dfee80cbe208c29149aeceafed394f38..99c4d308f16b0db61ca0821adca78d3bec2e8b96 100644 (file)
@@ -85,19 +85,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
 
        t->io_bitmap_max = bytes;
 
-#ifdef CONFIG_X86_32
-       /*
-        * Sets the lazy trigger so that the next I/O operation will
-        * reload the correct bitmap.
-        * Reset the owner so that a process switch will not set
-        * tss->io_bitmap_base to IO_BITMAP_OFFSET.
-        */
-       tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
-       tss->io_bitmap_owner = NULL;
-#else
        /* Update the TSS: */
        memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
-#endif
 
        put_cpu();
 
@@ -131,9 +120,8 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
 }
 
 #ifdef CONFIG_X86_32
-asmlinkage long sys_iopl(unsigned long regsp)
+long sys_iopl(struct pt_regs *regs)
 {
-       struct pt_regs *regs = (struct pt_regs *)&regsp;
        unsigned int level = regs->bx;
        struct thread_struct *t = &current->thread;
        int rc;
diff --git a/arch/x86/kernel/ipi.c b/arch/x86/kernel/ipi.c
deleted file mode 100644 (file)
index 285bbf8..0000000
+++ /dev/null
@@ -1,190 +0,0 @@
-#include <linux/cpumask.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/kernel_stat.h>
-#include <linux/mc146818rtc.h>
-#include <linux/cache.h>
-#include <linux/cpu.h>
-#include <linux/module.h>
-
-#include <asm/smp.h>
-#include <asm/mtrr.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-#include <asm/apic.h>
-#include <asm/proto.h>
-
-#ifdef CONFIG_X86_32
-#include <mach_apic.h>
-#include <mach_ipi.h>
-
-/*
- * the following functions deal with sending IPIs between CPUs.
- *
- * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
- */
-
-static inline int __prepare_ICR(unsigned int shortcut, int vector)
-{
-       unsigned int icr = shortcut | APIC_DEST_LOGICAL;
-
-       switch (vector) {
-       default:
-               icr |= APIC_DM_FIXED | vector;
-               break;
-       case NMI_VECTOR:
-               icr |= APIC_DM_NMI;
-               break;
-       }
-       return icr;
-}
-
-static inline int __prepare_ICR2(unsigned int mask)
-{
-       return SET_APIC_DEST_FIELD(mask);
-}
-
-void __send_IPI_shortcut(unsigned int shortcut, int vector)
-{
-       /*
-        * Subtle. In the case of the 'never do double writes' workaround
-        * we have to lock out interrupts to be safe.  As we don't care
-        * of the value read we use an atomic rmw access to avoid costly
-        * cli/sti.  Otherwise we use an even cheaper single atomic write
-        * to the APIC.
-        */
-       unsigned int cfg;
-
-       /*
-        * Wait for idle.
-        */
-       apic_wait_icr_idle();
-
-       /*
-        * No need to touch the target chip field
-        */
-       cfg = __prepare_ICR(shortcut, vector);
-
-       /*
-        * Send the IPI. The write to APIC_ICR fires this off.
-        */
-       apic_write(APIC_ICR, cfg);
-}
-
-void send_IPI_self(int vector)
-{
-       __send_IPI_shortcut(APIC_DEST_SELF, vector);
-}
-
-/*
- * This is used to send an IPI with no shorthand notation (the destination is
- * specified in bits 56 to 63 of the ICR).
- */
-static inline void __send_IPI_dest_field(unsigned long mask, int vector)
-{
-       unsigned long cfg;
-
-       /*
-        * Wait for idle.
-        */
-       if (unlikely(vector == NMI_VECTOR))
-               safe_apic_wait_icr_idle();
-       else
-               apic_wait_icr_idle();
-
-       /*
-        * prepare target chip field
-        */
-       cfg = __prepare_ICR2(mask);
-       apic_write(APIC_ICR2, cfg);
-
-       /*
-        * program the ICR
-        */
-       cfg = __prepare_ICR(0, vector);
-
-       /*
-        * Send the IPI. The write to APIC_ICR fires this off.
-        */
-       apic_write(APIC_ICR, cfg);
-}
-
-/*
- * This is only used on smaller machines.
- */
-void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
-{
-       unsigned long mask = cpumask_bits(cpumask)[0];
-       unsigned long flags;
-
-       local_irq_save(flags);
-       WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
-       __send_IPI_dest_field(mask, vector);
-       local_irq_restore(flags);
-}
-
-void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
-{
-       unsigned long flags;
-       unsigned int query_cpu;
-
-       /*
-        * Hack. The clustered APIC addressing mode doesn't allow us to send
-        * to an arbitrary mask, so I do a unicasts to each CPU instead. This
-        * should be modified to do 1 message per cluster ID - mbligh
-        */
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
-               __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
-       local_irq_restore(flags);
-}
-
-void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
-{
-       unsigned long flags;
-       unsigned int query_cpu;
-       unsigned int this_cpu = smp_processor_id();
-
-       /* See Hack comment above */
-
-       local_irq_save(flags);
-       for_each_cpu(query_cpu, mask)
-               if (query_cpu != this_cpu)
-                       __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
-                                             vector);
-       local_irq_restore(flags);
-}
-
-/* must come after the send_IPI functions above for inlining */
-static int convert_apicid_to_cpu(int apic_id)
-{
-       int i;
-
-       for_each_possible_cpu(i) {
-               if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
-                       return i;
-       }
-       return -1;
-}
-
-int safe_smp_processor_id(void)
-{
-       int apicid, cpuid;
-
-       if (!boot_cpu_has(X86_FEATURE_APIC))
-               return 0;
-
-       apicid = hard_smp_processor_id();
-       if (apicid == BAD_APICID)
-               return 0;
-
-       cpuid = convert_apicid_to_cpu(apicid);
-
-       return cpuid >= 0 ? cpuid : 0;
-}
-#endif
index 3973e2df7f877c3a2fd7868691e3fd55214a178e..b8ac3b6cf7761a02b86a3271cfe18cbfec7d87ae 100644 (file)
@@ -6,13 +6,18 @@
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
 #include <linux/smp.h>
+#include <linux/ftrace.h>
 
 #include <asm/apic.h>
 #include <asm/io_apic.h>
 #include <asm/irq.h>
+#include <asm/idle.h>
 
 atomic_t irq_err_count;
 
+/* Function pointer for generic interrupt vector handling */
+void (*generic_interrupt_extension)(void) = NULL;
+
 /*
  * 'what should we do if we get a hw irq event on an illegal vector'.
  * each architecture has to answer this themselves.
@@ -36,63 +41,65 @@ void ack_bad_irq(unsigned int irq)
 #endif
 }
 
-#ifdef CONFIG_X86_32
-# define irq_stats(x)          (&per_cpu(irq_stat, x))
-#else
-# define irq_stats(x)          cpu_pda(x)
-#endif
+#define irq_stats(x)           (&per_cpu(irq_stat, x))
 /*
  * /proc/interrupts printing:
  */
-static int show_other_interrupts(struct seq_file *p)
+static int show_other_interrupts(struct seq_file *p, int prec)
 {
        int j;
 
-       seq_printf(p, "NMI: ");
+       seq_printf(p, "%*s: ", prec, "NMI");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
        seq_printf(p, "  Non-maskable interrupts\n");
 #ifdef CONFIG_X86_LOCAL_APIC
-       seq_printf(p, "LOC: ");
+       seq_printf(p, "%*s: ", prec, "LOC");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
        seq_printf(p, "  Local timer interrupts\n");
 #endif
+       if (generic_interrupt_extension) {
+               seq_printf(p, "PLT: ");
+               for_each_online_cpu(j)
+                       seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
+               seq_printf(p, "  Platform interrupts\n");
+       }
 #ifdef CONFIG_SMP
-       seq_printf(p, "RES: ");
+       seq_printf(p, "%*s: ", prec, "RES");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
        seq_printf(p, "  Rescheduling interrupts\n");
-       seq_printf(p, "CAL: ");
+       seq_printf(p, "%*s: ", prec, "CAL");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
        seq_printf(p, "  Function call interrupts\n");
-       seq_printf(p, "TLB: ");
+       seq_printf(p, "%*s: ", prec, "TLB");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
        seq_printf(p, "  TLB shootdowns\n");
 #endif
 #ifdef CONFIG_X86_MCE
-       seq_printf(p, "TRM: ");
+       seq_printf(p, "%*s: ", prec, "TRM");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
        seq_printf(p, "  Thermal event interrupts\n");
 # ifdef CONFIG_X86_64
-       seq_printf(p, "THR: ");
+       seq_printf(p, "%*s: ", prec, "THR");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
        seq_printf(p, "  Threshold APIC interrupts\n");
 # endif
 #endif
 #ifdef CONFIG_X86_LOCAL_APIC
-       seq_printf(p, "SPU: ");
+       seq_printf(p, "%*s: ", prec, "SPU");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
        seq_printf(p, "  Spurious interrupts\n");
 #endif
-       seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
+       seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
 #if defined(CONFIG_X86_IO_APIC)
-       seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
+       seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
 #endif
        return 0;
 }
@@ -100,19 +107,22 @@ static int show_other_interrupts(struct seq_file *p)
 int show_interrupts(struct seq_file *p, void *v)
 {
        unsigned long flags, any_count = 0;
-       int i = *(loff_t *) v, j;
+       int i = *(loff_t *) v, j, prec;
        struct irqaction *action;
        struct irq_desc *desc;
 
        if (i > nr_irqs)
                return 0;
 
+       for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
+               j *= 10;
+
        if (i == nr_irqs)
-               return show_other_interrupts(p);
+               return show_other_interrupts(p, prec);
 
        /* print header */
        if (i == 0) {
-               seq_printf(p, "           ");
+               seq_printf(p, "%*s", prec + 8, "");
                for_each_online_cpu(j)
                        seq_printf(p, "CPU%-8d", j);
                seq_putc(p, '\n');
@@ -133,7 +143,7 @@ int show_interrupts(struct seq_file *p, void *v)
        if (!action && !any_count)
                goto out;
 
-       seq_printf(p, "%3d: ", i);
+       seq_printf(p, "%*d: ", prec, i);
 #ifndef CONFIG_SMP
        seq_printf(p, "%10u ", kstat_irqs(i));
 #else
@@ -165,6 +175,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
 #ifdef CONFIG_X86_LOCAL_APIC
        sum += irq_stats(cpu)->apic_timer_irqs;
 #endif
+       if (generic_interrupt_extension)
+               sum += irq_stats(cpu)->generic_irqs;
 #ifdef CONFIG_SMP
        sum += irq_stats(cpu)->irq_resched_count;
        sum += irq_stats(cpu)->irq_call_count;
@@ -192,4 +204,63 @@ u64 arch_irq_stat(void)
        return sum;
 }
 
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       /* high bit used in ret_from_ code  */
+       unsigned vector = ~regs->orig_ax;
+       unsigned irq;
+
+       exit_idle();
+       irq_enter();
+
+       irq = __get_cpu_var(vector_irq)[vector];
+
+       if (!handle_irq(irq, regs)) {
+#ifdef CONFIG_X86_64
+               if (!disable_apic)
+                       ack_APIC_irq();
+#endif
+
+               if (printk_ratelimit())
+                       printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n",
+                              __func__, smp_processor_id(), vector, irq);
+       }
+
+       irq_exit();
+
+       set_irq_regs(old_regs);
+       return 1;
+}
+
+/*
+ * Handler for GENERIC_INTERRUPT_VECTOR.
+ */
+void smp_generic_interrupt(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
+
+       ack_APIC_irq();
+
+       exit_idle();
+
+       irq_enter();
+
+       inc_irq_stat(generic_irqs);
+
+       if (generic_interrupt_extension)
+               generic_interrupt_extension();
+
+       irq_exit();
+
+       set_irq_regs(old_regs);
+}
+
 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
index 74b9ff7341e9763d84ec9fdf862a182c3b0a0154..3b09634a51535768cb3f99781dfa58c6a16b6ba4 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/uaccess.h>
+#include <linux/percpu.h>
 
 #include <asm/apic.h>
 
@@ -55,13 +56,13 @@ static inline void print_stack_overflow(void) { }
 union irq_ctx {
        struct thread_info      tinfo;
        u32                     stack[THREAD_SIZE/sizeof(u32)];
-};
+} __attribute__((aligned(PAGE_SIZE)));
 
-static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
-static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
+static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
+static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
 
-static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
-static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
+static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack);
+static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack);
 
 static void call_on_stack(void *func, void *stack)
 {
@@ -81,7 +82,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
        u32 *isp, arg1, arg2;
 
        curctx = (union irq_ctx *) current_thread_info();
-       irqctx = hardirq_ctx[smp_processor_id()];
+       irqctx = __get_cpu_var(hardirq_ctx);
 
        /*
         * this is where we switch to the IRQ stack. However, if we are
@@ -125,34 +126,34 @@ void __cpuinit irq_ctx_init(int cpu)
 {
        union irq_ctx *irqctx;
 
-       if (hardirq_ctx[cpu])
+       if (per_cpu(hardirq_ctx, cpu))
                return;
 
-       irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
+       irqctx = &per_cpu(hardirq_stack, cpu);
        irqctx->tinfo.task              = NULL;
        irqctx->tinfo.exec_domain       = NULL;
        irqctx->tinfo.cpu               = cpu;
        irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
        irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
 
-       hardirq_ctx[cpu] = irqctx;
+       per_cpu(hardirq_ctx, cpu) = irqctx;
 
-       irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE];
+       irqctx = &per_cpu(softirq_stack, cpu);
        irqctx->tinfo.task              = NULL;
        irqctx->tinfo.exec_domain       = NULL;
        irqctx->tinfo.cpu               = cpu;
        irqctx->tinfo.preempt_count     = 0;
        irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
 
-       softirq_ctx[cpu] = irqctx;
+       per_cpu(softirq_ctx, cpu) = irqctx;
 
        printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
-              cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
+              cpu, per_cpu(hardirq_ctx, cpu),  per_cpu(softirq_ctx, cpu));
 }
 
 void irq_ctx_exit(int cpu)
 {
-       hardirq_ctx[cpu] = NULL;
+       per_cpu(hardirq_ctx, cpu) = NULL;
 }
 
 asmlinkage void do_softirq(void)
@@ -169,7 +170,7 @@ asmlinkage void do_softirq(void)
 
        if (local_softirq_pending()) {
                curctx = current_thread_info();
-               irqctx = softirq_ctx[smp_processor_id()];
+               irqctx = __get_cpu_var(softirq_ctx);
                irqctx->tinfo.task = curctx->task;
                irqctx->tinfo.previous_esp = current_stack_pointer;
 
@@ -191,33 +192,16 @@ static inline int
 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
 #endif
 
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-unsigned int do_IRQ(struct pt_regs *regs)
+bool handle_irq(unsigned irq, struct pt_regs *regs)
 {
-       struct pt_regs *old_regs;
-       /* high bit used in ret_from_ code */
-       int overflow;
-       unsigned vector = ~regs->orig_ax;
        struct irq_desc *desc;
-       unsigned irq;
-
-
-       old_regs = set_irq_regs(regs);
-       irq_enter();
-       irq = __get_cpu_var(vector_irq)[vector];
+       int overflow;
 
        overflow = check_stack_overflow();
 
        desc = irq_to_desc(irq);
-       if (unlikely(!desc)) {
-               printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n",
-                                       __func__, irq, vector, smp_processor_id());
-               BUG();
-       }
+       if (unlikely(!desc))
+               return false;
 
        if (!execute_on_irq_stack(overflow, desc, irq)) {
                if (unlikely(overflow))
@@ -225,13 +209,10 @@ unsigned int do_IRQ(struct pt_regs *regs)
                desc->handle_irq(irq, desc);
        }
 
-       irq_exit();
-       set_irq_regs(old_regs);
-       return 1;
+       return true;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-#include <mach_apic.h>
 
 /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
 void fixup_irqs(void)
@@ -248,7 +229,7 @@ void fixup_irqs(void)
                if (irq == 2)
                        continue;
 
-               affinity = &desc->affinity;
+               affinity = desc->affinity;
                if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
                        printk("Breaking affinity for irq %i\n", irq);
                        affinity = cpu_all_mask;
index 63c88e6ec025ade8ed75215d98e161d14940e076..977d8b43a0ddc06597ff46942730e6759d2ca035 100644 (file)
 #include <linux/smp.h>
 #include <asm/io_apic.h>
 #include <asm/idle.h>
+#include <asm/apic.h>
+
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
+DEFINE_PER_CPU(struct pt_regs *, irq_regs);
+EXPORT_PER_CPU_SYMBOL(irq_regs);
 
 /*
  * Probabilistic stack overflow check:
@@ -41,42 +48,18 @@ static inline void stack_overflow_check(struct pt_regs *regs)
 #endif
 }
 
-/*
- * do_IRQ handles all normal device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- */
-asmlinkage unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
+bool handle_irq(unsigned irq, struct pt_regs *regs)
 {
-       struct pt_regs *old_regs = set_irq_regs(regs);
        struct irq_desc *desc;
 
-       /* high bit used in ret_from_ code  */
-       unsigned vector = ~regs->orig_ax;
-       unsigned irq;
-
-       exit_idle();
-       irq_enter();
-       irq = __get_cpu_var(vector_irq)[vector];
-
        stack_overflow_check(regs);
 
        desc = irq_to_desc(irq);
-       if (likely(desc))
-               generic_handle_irq_desc(irq, desc);
-       else {
-               if (!disable_apic)
-                       ack_APIC_irq();
-
-               if (printk_ratelimit())
-                       printk(KERN_EMERG "%s: %d.%d No irq handler for vector\n",
-                               __func__, smp_processor_id(), vector);
-       }
-
-       irq_exit();
+       if (unlikely(!desc))
+               return false;
 
-       set_irq_regs(old_regs);
-       return 1;
+       generic_handle_irq_desc(irq, desc);
+       return true;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -100,7 +83,7 @@ void fixup_irqs(void)
                /* interrupt's are disabled at this point */
                spin_lock(&desc->lock);
 
-               affinity = &desc->affinity;
+               affinity = desc->affinity;
                if (!irq_has_action(irq) ||
                    cpumask_equal(affinity, cpu_online_mask)) {
                        spin_unlock(&desc->lock);
index 10a09c2f1828c102893d71a5add47ae320e2df55..bc132610544829de0f38a12dd8461d73104b39a2 100644 (file)
@@ -18,7 +18,7 @@
 #include <asm/pgtable.h>
 #include <asm/desc.h>
 #include <asm/apic.h>
-#include <asm/arch_hooks.h>
+#include <asm/setup.h>
 #include <asm/i8259.h>
 #include <asm/traps.h>
 
@@ -78,6 +78,15 @@ void __init init_ISA_irqs(void)
        }
 }
 
+/*
+ * IRQ2 is cascade interrupt to second interrupt controller
+ */
+static struct irqaction irq2 = {
+       .handler = no_action,
+       .mask = CPU_MASK_NONE,
+       .name = "cascade",
+};
+
 DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
        [0 ... IRQ0_VECTOR - 1] = -1,
        [IRQ0_VECTOR] = 0,
@@ -118,8 +127,8 @@ void __init native_init_IRQ(void)
 {
        int i;
 
-       /* all the set up before the call gates are initialised */
-       pre_intr_init_hook();
+       /* Execute any quirks before the call gates are initialised: */
+       x86_quirk_pre_intr_init();
 
        /*
         * Cover the whole vector space, no vector can escape
@@ -140,8 +149,15 @@ void __init native_init_IRQ(void)
         */
        alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
 
-       /* IPI for invalidation */
-       alloc_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
+       /* IPIs for invalidation */
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
+       alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
 
        /* IPI for generic function call */
        alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
@@ -159,6 +175,9 @@ void __init native_init_IRQ(void)
        /* self generated IPI for local APIC timer */
        alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
 
+       /* generic IPI for platform specific use */
+       alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
+
        /* IPI vectors for APIC spurious and error interrupts */
        alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
        alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
@@ -169,10 +188,14 @@ void __init native_init_IRQ(void)
        alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
 #endif
 
-       /* setup after call gates are initialised (usually add in
-        * the architecture specific gates)
+       if (!acpi_ioapic)
+               setup_irq(2, &irq2);
+
+       /*
+        * Call quirks after call gates are initialised (usually add in
+        * the architecture specific gates):
         */
-       intr_init_hook();
+       x86_quirk_intr_init();
 
        /*
         * External FPU? Set up irq13 if so, for
index da481a1e3f303f8fe3bf10995e8e218cc556f2d4..c7a49e0ffbfbc0b4b2b0cd12b01d5294ca73745c 100644 (file)
@@ -147,6 +147,9 @@ static void __init apic_intr_init(void)
        /* self generated IPI for local APIC timer */
        alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
 
+       /* generic IPI for platform specific use */
+       alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
+
        /* IPI vectors for APIC spurious and error interrupts */
        alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
        alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
index 10435a120d2227bffd79a3df193f4238049ce2b0..eedfaebe1063d37ed03bc739322eee014eddf8fc 100644 (file)
@@ -46,7 +46,7 @@
 #include <asm/apicdef.h>
 #include <asm/system.h>
 
-#include <mach_ipi.h>
+#include <asm/apic.h>
 
 /*
  * Put the error code here just in case the user cares:
@@ -347,7 +347,7 @@ void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
  */
 void kgdb_roundup_cpus(unsigned long flags)
 {
-       send_IPI_allbutself(APIC_DM_NMI);
+       apic->send_IPI_allbutself(APIC_DM_NMI);
 }
 #endif
 
index 652fce6d2cceb9166f2bcacfa447c75060ede8c3..137f2e8132df1b0f5ac953134a5e0f727573edbe 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/clocksource.h>
 #include <linux/kvm_para.h>
 #include <asm/pvclock.h>
-#include <asm/arch_hooks.h>
 #include <asm/msr.h>
 #include <asm/apic.h>
 #include <linux/percpu.h>
index 37f420018a41c806bf57048d50163d0d28c640aa..e7368c1da01dfa26eeedc563390e8191eb6eb08f 100644 (file)
 #include <linux/ftrace.h>
 #include <linux/suspend.h>
 #include <linux/gfp.h>
+#include <linux/io.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
-#include <asm/io.h>
 #include <asm/apic.h>
 #include <asm/cpufeature.h>
 #include <asm/desc.h>
@@ -63,7 +63,7 @@ static void load_segments(void)
                "\tmovl %%eax,%%fs\n"
                "\tmovl %%eax,%%gs\n"
                "\tmovl %%eax,%%ss\n"
-               ::: "eax", "memory");
+               : : : "eax", "memory");
 #undef STR
 #undef __STR
 }
@@ -121,7 +121,7 @@ static void machine_kexec_page_table_set_one(
 static void machine_kexec_prepare_page_tables(struct kimage *image)
 {
        void *control_page;
-       pmd_t *pmd = 0;
+       pmd_t *pmd = NULL;
 
        control_page = page_address(image->control_code_page);
 #ifdef CONFIG_X86_PAE
@@ -205,7 +205,8 @@ void machine_kexec(struct kimage *image)
 
        if (image->preserve_context) {
 #ifdef CONFIG_X86_IO_APIC
-               /* We need to put APICs in legacy mode so that we can
+               /*
+                * We need to put APICs in legacy mode so that we can
                 * get timer interrupts in second kernel. kexec/kdump
                 * paths already have calls to disable_IO_APIC() in
                 * one form or other. kexec jump path also need
@@ -227,7 +228,8 @@ void machine_kexec(struct kimage *image)
                page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
                                                << PAGE_SHIFT);
 
-       /* The segment registers are funny things, they have both a
+       /*
+        * The segment registers are funny things, they have both a
         * visible and an invisible part.  Whenever the visible part is
         * set to a specific selector, the invisible part is loaded
         * with from a table in memory.  At no other time is the
@@ -237,11 +239,12 @@ void machine_kexec(struct kimage *image)
         * segments, before I zap the gdt with an invalid value.
         */
        load_segments();
-       /* The gdt & idt are now invalid.
+       /*
+        * The gdt & idt are now invalid.
         * If you want to load them you must set up your own idt & gdt.
         */
-       set_gdt(phys_to_virt(0),0);
-       set_idt(phys_to_virt(0),0);
+       set_gdt(phys_to_virt(0), 0);
+       set_idt(phys_to_virt(0), 0);
 
        /* now call it */
        image->start = relocate_kernel_ptr((unsigned long)image->head,
index c43caa3a91f349ecd527da1ec936872785815092..89cea4d44679641146411bbce9e3e94b9b2bff84 100644 (file)
 #include <linux/reboot.h>
 #include <linux/numa.h>
 #include <linux/ftrace.h>
+#include <linux/io.h>
+#include <linux/suspend.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
-#include <asm/io.h>
 
-#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
-static u64 kexec_pgd[512] PAGE_ALIGNED;
-static u64 kexec_pud0[512] PAGE_ALIGNED;
-static u64 kexec_pmd0[512] PAGE_ALIGNED;
-static u64 kexec_pte0[512] PAGE_ALIGNED;
-static u64 kexec_pud1[512] PAGE_ALIGNED;
-static u64 kexec_pmd1[512] PAGE_ALIGNED;
-static u64 kexec_pte1[512] PAGE_ALIGNED;
+static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
+                               unsigned long addr)
+{
+       pud_t *pud;
+       pmd_t *pmd;
+       struct page *page;
+       int result = -ENOMEM;
+
+       addr &= PMD_MASK;
+       pgd += pgd_index(addr);
+       if (!pgd_present(*pgd)) {
+               page = kimage_alloc_control_pages(image, 0);
+               if (!page)
+                       goto out;
+               pud = (pud_t *)page_address(page);
+               memset(pud, 0, PAGE_SIZE);
+               set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
+       }
+       pud = pud_offset(pgd, addr);
+       if (!pud_present(*pud)) {
+               page = kimage_alloc_control_pages(image, 0);
+               if (!page)
+                       goto out;
+               pmd = (pmd_t *)page_address(page);
+               memset(pmd, 0, PAGE_SIZE);
+               set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+       }
+       pmd = pmd_offset(pud, addr);
+       if (!pmd_present(*pmd))
+               set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
+       result = 0;
+out:
+       return result;
+}
 
 static void init_level2_page(pmd_t *level2p, unsigned long addr)
 {
@@ -92,9 +119,8 @@ static int init_level4_page(struct kimage *image, pgd_t *level4p,
                }
                level3p = (pud_t *)page_address(page);
                result = init_level3_page(image, level3p, addr, last_addr);
-               if (result) {
+               if (result)
                        goto out;
-               }
                set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
                addr += PGDIR_SIZE;
        }
@@ -107,12 +133,72 @@ out:
        return result;
 }
 
+static void free_transition_pgtable(struct kimage *image)
+{
+       free_page((unsigned long)image->arch.pud);
+       free_page((unsigned long)image->arch.pmd);
+       free_page((unsigned long)image->arch.pte);
+}
+
+static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
+{
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long vaddr, paddr;
+       int result = -ENOMEM;
+
+       vaddr = (unsigned long)relocate_kernel;
+       paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
+       pgd += pgd_index(vaddr);
+       if (!pgd_present(*pgd)) {
+               pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
+               if (!pud)
+                       goto err;
+               image->arch.pud = pud;
+               set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
+       }
+       pud = pud_offset(pgd, vaddr);
+       if (!pud_present(*pud)) {
+               pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
+               if (!pmd)
+                       goto err;
+               image->arch.pmd = pmd;
+               set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+       }
+       pmd = pmd_offset(pud, vaddr);
+       if (!pmd_present(*pmd)) {
+               pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
+               if (!pte)
+                       goto err;
+               image->arch.pte = pte;
+               set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
+       }
+       pte = pte_offset_kernel(pmd, vaddr);
+       set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
+       return 0;
+err:
+       free_transition_pgtable(image);
+       return result;
+}
+
 
 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
 {
        pgd_t *level4p;
+       int result;
        level4p = (pgd_t *)__va(start_pgtable);
-       return init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
+       result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
+       if (result)
+               return result;
+       /*
+        * image->start may be outside 0 ~ max_pfn, for example when
+        * jump back to original kernel from kexeced kernel
+        */
+       result = init_one_level2_page(image, level4p, image->start);
+       if (result)
+               return result;
+       return init_transition_pgtable(image, level4p);
 }
 
 static void set_idt(void *newidt, u16 limit)
@@ -174,7 +260,7 @@ int machine_kexec_prepare(struct kimage *image)
 
 void machine_kexec_cleanup(struct kimage *image)
 {
-       return;
+       free_transition_pgtable(image);
 }
 
 /*
@@ -185,36 +271,45 @@ void machine_kexec(struct kimage *image)
 {
        unsigned long page_list[PAGES_NR];
        void *control_page;
+       int save_ftrace_enabled;
 
-       tracer_disable();
+#ifdef CONFIG_KEXEC_JUMP
+       if (kexec_image->preserve_context)
+               save_processor_state();
+#endif
+
+       save_ftrace_enabled = __ftrace_enabled_save();
 
        /* Interrupts aren't acceptable while we reboot */
        local_irq_disable();
 
+       if (image->preserve_context) {
+#ifdef CONFIG_X86_IO_APIC
+               /*
+                * We need to put APICs in legacy mode so that we can
+                * get timer interrupts in second kernel. kexec/kdump
+                * paths already have calls to disable_IO_APIC() in
+                * one form or other. kexec jump path also need
+                * one.
+                */
+               disable_IO_APIC();
+#endif
+       }
+
        control_page = page_address(image->control_code_page) + PAGE_SIZE;
-       memcpy(control_page, relocate_kernel, PAGE_SIZE);
+       memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
 
        page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
-       page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
-       page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
-       page_list[VA_PGD] = (unsigned long)kexec_pgd;
-       page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
-       page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
-       page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
-       page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
-       page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
-       page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
-       page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
-       page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
-       page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
-       page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
-       page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
-       page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
-
+       page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
        page_list[PA_TABLE_PAGE] =
          (unsigned long)__pa(page_address(image->control_code_page));
 
-       /* The segment registers are funny things, they have both a
+       if (image->type == KEXEC_TYPE_DEFAULT)
+               page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
+                                               << PAGE_SHIFT);
+
+       /*
+        * The segment registers are funny things, they have both a
         * visible and an invisible part.  Whenever the visible part is
         * set to a specific selector, the invisible part is loaded
         * with from a table in memory.  At no other time is the
@@ -224,15 +319,25 @@ void machine_kexec(struct kimage *image)
         * segments, before I zap the gdt with an invalid value.
         */
        load_segments();
-       /* The gdt & idt are now invalid.
+       /*
+        * The gdt & idt are now invalid.
         * If you want to load them you must set up your own idt & gdt.
         */
-       set_gdt(phys_to_virt(0),0);
-       set_idt(phys_to_virt(0),0);
+       set_gdt(phys_to_virt(0), 0);
+       set_idt(phys_to_virt(0), 0);
 
        /* now call it */
-       relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
-                       image->start);
+       image->start = relocate_kernel((unsigned long)image->head,
+                                      (unsigned long)page_list,
+                                      image->start,
+                                      image->preserve_context);
+
+#ifdef CONFIG_KEXEC_JUMP
+       if (kexec_image->preserve_context)
+               restore_processor_state();
+#endif
+
+       __ftrace_enabled_restore(save_ftrace_enabled);
 }
 
 void arch_crash_save_vmcoreinfo(void)
index 2dc183758be345cd3b4347427769063fcebc837c..845d80ce1ef1143b591852b7ac16177080d3c07a 100644 (file)
@@ -51,7 +51,6 @@
 #include <linux/ioport.h>
 #include <asm/uaccess.h>
 #include <linux/init.h>
-#include <asm/arch_hooks.h>
 
 static unsigned char which_scsi;
 
@@ -474,6 +473,4 @@ void __kprobes mca_handle_nmi(void)
         * adapter was responsible for the error.
         */
        bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback);
-
-       mca_nmi_hook();
-} /* mca_handle_nmi */
+}
index b7f4c929e61505d368ac02de9e63e9a917472ca8..5e9f4fc51385280d73c772376dd94878cb38b57a 100644 (file)
@@ -87,9 +87,9 @@
 #include <linux/cpu.h>
 #include <linux/firmware.h>
 #include <linux/platform_device.h>
+#include <linux/uaccess.h>
 
 #include <asm/msr.h>
-#include <asm/uaccess.h>
 #include <asm/processor.h>
 #include <asm/microcode.h>
 
@@ -196,7 +196,7 @@ static inline int update_match_cpu(struct cpu_signature *csig, int sig, int pf)
        return (!sigmatch(sig, csig->sig, pf, csig->pf)) ? 0 : 1;
 }
 
-static inline int 
+static inline int
 update_match_revision(struct microcode_header_intel *mc_header,        int rev)
 {
        return (mc_header->rev <= rev) ? 0 : 1;
@@ -442,8 +442,8 @@ static int request_microcode_fw(int cpu, struct device *device)
                return ret;
        }
 
-       ret = generic_load_microcode(cpu, (void*)firmware->data, firmware->size,
-                       &get_ucode_fw);
+       ret = generic_load_microcode(cpu, (void *)firmware->data,
+                                    firmware->size, &get_ucode_fw);
 
        release_firmware(firmware);
 
@@ -460,7 +460,7 @@ static int request_microcode_user(int cpu, const void __user *buf, size_t size)
        /* We should bind the task to the CPU */
        BUG_ON(cpu != raw_smp_processor_id());
 
-       return generic_load_microcode(cpu, (void*)buf, size, &get_ucode_user);
+       return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
 }
 
 static void microcode_fini_cpu(int cpu)
index 666e43df51f99ce58b92c844149e1f8efcfa258f..712d15fdc416f9cd84139208434644ee887400a1 100644 (file)
@@ -226,7 +226,7 @@ static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
         return 0;
 }
 
-static struct dmi_system_id __devinitdata mmconf_dmi_table[] = {
+static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
         {
                 .callback = set_check_enable_amd_mmconf,
                 .ident = "Sun Microsystems Machine",
index 3db0a5442eb1bab48d9bbdfa93a0a9ccd5b74fa0..0edd819050e74d7ab7e5c148f64ec12fe610ec91 100644 (file)
@@ -42,7 +42,7 @@ void module_free(struct module *mod, void *module_region)
 {
        vfree(module_region);
        /* FIXME: If module_region == mod->init_region, trim exception
-           table entries. */
+          table entries. */
 }
 
 /* We don't need anything special. */
@@ -113,13 +113,13 @@ int module_finalize(const Elf_Ehdr *hdr,
                *para = NULL;
        char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 
-       for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { 
+       for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
                if (!strcmp(".text", secstrings + s->sh_name))
                        text = s;
                if (!strcmp(".altinstructions", secstrings + s->sh_name))
                        alt = s;
                if (!strcmp(".smp_locks", secstrings + s->sh_name))
-                       locks= s;
+                       locks = s;
                if (!strcmp(".parainstructions", secstrings + s->sh_name))
                        para = s;
        }
index 6ba87830d4b130e6aac27eef6aa5442c2e1d1e40..c23880b90b5c7d9f89ede70564ba741afc729ce5 100644 (file)
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
-#define DEBUGP(fmt...) 
+#define DEBUGP(fmt...)
 
 #ifndef CONFIG_UML
 void module_free(struct module *mod, void *module_region)
 {
        vfree(module_region);
        /* FIXME: If module_region == mod->init_region, trim exception
-           table entries. */
+          table entries. */
 }
 
 void *module_alloc(unsigned long size)
@@ -77,7 +77,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
        Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
        Elf64_Sym *sym;
        void *loc;
-       u64 val; 
+       u64 val;
 
        DEBUGP("Applying relocate section %u to %u\n", relsec,
               sechdrs[relsec].sh_info);
@@ -91,11 +91,11 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
                        + ELF64_R_SYM(rel[i].r_info);
 
-               DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
-                      (int)ELF64_R_TYPE(rel[i].r_info), 
-                      sym->st_value, rel[i].r_addend, (u64)loc);
+               DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
+                       (int)ELF64_R_TYPE(rel[i].r_info),
+                       sym->st_value, rel[i].r_addend, (u64)loc);
 
-               val = sym->st_value + rel[i].r_addend; 
+               val = sym->st_value + rel[i].r_addend;
 
                switch (ELF64_R_TYPE(rel[i].r_info)) {
                case R_X86_64_NONE:
@@ -113,16 +113,16 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                        if ((s64)val != *(s32 *)loc)
                                goto overflow;
                        break;
-               case R_X86_64_PC32: 
+               case R_X86_64_PC32:
                        val -= (u64)loc;
                        *(u32 *)loc = val;
 #if 0
                        if ((s64)val != *(s32 *)loc)
-                               goto overflow; 
+                               goto overflow;
 #endif
                        break;
                default:
-                       printk(KERN_ERR "module %s: Unknown rela relocation: %Lu\n",
+                       printk(KERN_ERR "module %s: Unknown rela relocation: %llu\n",
                               me->name, ELF64_R_TYPE(rel[i].r_info));
                        return -ENOEXEC;
                }
@@ -130,7 +130,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
        return 0;
 
 overflow:
-       printk(KERN_ERR "overflow in relocation type %d val %Lx\n", 
+       printk(KERN_ERR "overflow in relocation type %d val %Lx\n",
               (int)ELF64_R_TYPE(rel[i].r_info), val);
        printk(KERN_ERR "`%s' likely not compiled with -mcmodel=kernel\n",
               me->name);
@@ -143,13 +143,13 @@ int apply_relocate(Elf_Shdr *sechdrs,
                   unsigned int relsec,
                   struct module *me)
 {
-       printk("non add relocation not supported\n");
+       printk(KERN_ERR "non add relocation not supported\n");
        return -ENOSYS;
-} 
+}
 
 int module_finalize(const Elf_Ehdr *hdr,
-                    const Elf_Shdr *sechdrs,
-                    struct module *me)
+                   const Elf_Shdr *sechdrs,
+                   struct module *me)
 {
        const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
                *para = NULL;
@@ -161,7 +161,7 @@ int module_finalize(const Elf_Ehdr *hdr,
                if (!strcmp(".altinstructions", secstrings + s->sh_name))
                        alt = s;
                if (!strcmp(".smp_locks", secstrings + s->sh_name))
-                       locks= s;
+                       locks = s;
                if (!strcmp(".parainstructions", secstrings + s->sh_name))
                        para = s;
        }
index a649a4ccad43213ae1bb20cd62b48f65791a6ae7..47673e02ae58110709fce90ef77da2a4d5f63b64 100644 (file)
@@ -3,7 +3,7 @@
  *     compliant MP-table parsing routines.
  *
  *     (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- *     (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *     (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  *      (c) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  */
 
 #include <asm/setup.h>
 #include <asm/smp.h>
 
-#include <mach_apic.h>
-#ifdef CONFIG_X86_32
-#include <mach_apicdef.h>
-#include <mach_mpparse.h>
-#endif
-
+#include <asm/apic.h>
 /*
  * Checksum an MP configuration block.
  */
@@ -144,11 +139,11 @@ static void __init MP_ioapic_info(struct mpc_ioapic *m)
        if (bad_ioapic(m->apicaddr))
                return;
 
-       mp_ioapics[nr_ioapics].mp_apicaddr = m->apicaddr;
-       mp_ioapics[nr_ioapics].mp_apicid = m->apicid;
-       mp_ioapics[nr_ioapics].mp_type = m->type;
-       mp_ioapics[nr_ioapics].mp_apicver = m->apicver;
-       mp_ioapics[nr_ioapics].mp_flags = m->flags;
+       mp_ioapics[nr_ioapics].apicaddr = m->apicaddr;
+       mp_ioapics[nr_ioapics].apicid = m->apicid;
+       mp_ioapics[nr_ioapics].type = m->type;
+       mp_ioapics[nr_ioapics].apicver = m->apicver;
+       mp_ioapics[nr_ioapics].flags = m->flags;
        nr_ioapics++;
 }
 
@@ -160,55 +155,55 @@ static void print_MP_intsrc_info(struct mpc_intsrc *m)
                m->srcbusirq, m->dstapic, m->dstirq);
 }
 
-static void __init print_mp_irq_info(struct mp_config_intsrc *mp_irq)
+static void __init print_mp_irq_info(struct mpc_intsrc *mp_irq)
 {
        apic_printk(APIC_VERBOSE, "Int: type %d, pol %d, trig %d, bus %02x,"
                " IRQ %02x, APIC ID %x, APIC INT %02x\n",
-               mp_irq->mp_irqtype, mp_irq->mp_irqflag & 3,
-               (mp_irq->mp_irqflag >> 2) & 3, mp_irq->mp_srcbus,
-               mp_irq->mp_srcbusirq, mp_irq->mp_dstapic, mp_irq->mp_dstirq);
+               mp_irq->irqtype, mp_irq->irqflag & 3,
+               (mp_irq->irqflag >> 2) & 3, mp_irq->srcbus,
+               mp_irq->srcbusirq, mp_irq->dstapic, mp_irq->dstirq);
 }
 
 static void __init assign_to_mp_irq(struct mpc_intsrc *m,
-                                   struct mp_config_intsrc *mp_irq)
+                                   struct mpc_intsrc *mp_irq)
 {
-       mp_irq->mp_dstapic = m->dstapic;
-       mp_irq->mp_type = m->type;
-       mp_irq->mp_irqtype = m->irqtype;
-       mp_irq->mp_irqflag = m->irqflag;
-       mp_irq->mp_srcbus = m->srcbus;
-       mp_irq->mp_srcbusirq = m->srcbusirq;
-       mp_irq->mp_dstirq = m->dstirq;
+       mp_irq->dstapic = m->dstapic;
+       mp_irq->type = m->type;
+       mp_irq->irqtype = m->irqtype;
+       mp_irq->irqflag = m->irqflag;
+       mp_irq->srcbus = m->srcbus;
+       mp_irq->srcbusirq = m->srcbusirq;
+       mp_irq->dstirq = m->dstirq;
 }
 
-static void __init assign_to_mpc_intsrc(struct mp_config_intsrc *mp_irq,
+static void __init assign_to_mpc_intsrc(struct mpc_intsrc *mp_irq,
                                        struct mpc_intsrc *m)
 {
-       m->dstapic = mp_irq->mp_dstapic;
-       m->type = mp_irq->mp_type;
-       m->irqtype = mp_irq->mp_irqtype;
-       m->irqflag = mp_irq->mp_irqflag;
-       m->srcbus = mp_irq->mp_srcbus;
-       m->srcbusirq = mp_irq->mp_srcbusirq;
-       m->dstirq = mp_irq->mp_dstirq;
+       m->dstapic = mp_irq->dstapic;
+       m->type = mp_irq->type;
+       m->irqtype = mp_irq->irqtype;
+       m->irqflag = mp_irq->irqflag;
+       m->srcbus = mp_irq->srcbus;
+       m->srcbusirq = mp_irq->srcbusirq;
+       m->dstirq = mp_irq->dstirq;
 }
 
-static int __init mp_irq_mpc_intsrc_cmp(struct mp_config_intsrc *mp_irq,
+static int __init mp_irq_mpc_intsrc_cmp(struct mpc_intsrc *mp_irq,
                                        struct mpc_intsrc *m)
 {
-       if (mp_irq->mp_dstapic != m->dstapic)
+       if (mp_irq->dstapic != m->dstapic)
                return 1;
-       if (mp_irq->mp_type != m->type)
+       if (mp_irq->type != m->type)
                return 2;
-       if (mp_irq->mp_irqtype != m->irqtype)
+       if (mp_irq->irqtype != m->irqtype)
                return 3;
-       if (mp_irq->mp_irqflag != m->irqflag)
+       if (mp_irq->irqflag != m->irqflag)
                return 4;
-       if (mp_irq->mp_srcbus != m->srcbus)
+       if (mp_irq->srcbus != m->srcbus)
                return 5;
-       if (mp_irq->mp_srcbusirq != m->srcbusirq)
+       if (mp_irq->srcbusirq != m->srcbusirq)
                return 6;
-       if (mp_irq->mp_dstirq != m->dstirq)
+       if (mp_irq->dstirq != m->dstirq)
                return 7;
 
        return 0;
@@ -292,16 +287,7 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
                return 0;
 
 #ifdef CONFIG_X86_32
-       /*
-        * need to make sure summit and es7000's mps_oem_check is safe to be
-        * called early via genericarch 's mps_oem_check
-        */
-       if (early) {
-#ifdef CONFIG_X86_NUMAQ
-               numaq_mps_oem_check(mpc, oem, str);
-#endif
-       } else
-               mps_oem_check(mpc, oem, str);
+       generic_mps_oem_check(mpc, oem, str);
 #endif
        /* save the local APIC address, it might be non-default */
        if (!acpi_lapic)
@@ -386,13 +372,13 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early)
                        (*x86_quirks->mpc_record)++;
        }
 
-#ifdef CONFIG_X86_GENERICARCH
-       generic_bigsmp_probe();
+#ifdef CONFIG_X86_BIGSMP
+       generic_bigsmp_probe();
 #endif
 
-#ifdef CONFIG_X86_32
-       setup_apic_routing();
-#endif
+       if (apic->setup_apic_routing)
+               apic->setup_apic_routing();
+
        if (!num_processors)
                printk(KERN_ERR "MPTABLE: no processors registered!\n");
        return num_processors;
@@ -417,7 +403,7 @@ static void __init construct_default_ioirq_mptable(int mpc_default_type)
        intsrc.type = MP_INTSRC;
        intsrc.irqflag = 0;     /* conforming */
        intsrc.srcbus = 0;
-       intsrc.dstapic = mp_ioapics[0].mp_apicid;
+       intsrc.dstapic = mp_ioapics[0].apicid;
 
        intsrc.irqtype = mp_INT;
 
@@ -570,14 +556,27 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
        }
 }
 
-static struct intel_mp_floating *mpf_found;
+static struct mpf_intel *mpf_found;
+
+static unsigned long __init get_mpc_size(unsigned long physptr)
+{
+       struct mpc_table *mpc;
+       unsigned long size;
+
+       mpc = early_ioremap(physptr, PAGE_SIZE);
+       size = mpc->length;
+       early_iounmap(mpc, PAGE_SIZE);
+       apic_printk(APIC_VERBOSE, "  mpc: %lx-%lx\n", physptr, physptr + size);
+
+       return size;
+}
 
 /*
  * Scan the memory blocks for an SMP configuration block.
  */
 static void __init __get_smp_config(unsigned int early)
 {
-       struct intel_mp_floating *mpf = mpf_found;
+       struct mpf_intel *mpf = mpf_found;
 
        if (!mpf)
                return;
@@ -598,9 +597,9 @@ static void __init __get_smp_config(unsigned int early)
        }
 
        printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n",
-              mpf->mpf_specification);
+              mpf->specification);
 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
-       if (mpf->mpf_feature2 & (1 << 7)) {
+       if (mpf->feature2 & (1 << 7)) {
                printk(KERN_INFO "    IMCR and PIC compatibility mode.\n");
                pic_mode = 1;
        } else {
@@ -611,7 +610,7 @@ static void __init __get_smp_config(unsigned int early)
        /*
         * Now see if we need to read further.
         */
-       if (mpf->mpf_feature1 != 0) {
+       if (mpf->feature1 != 0) {
                if (early) {
                        /*
                         * local APIC has default address
@@ -621,16 +620,20 @@ static void __init __get_smp_config(unsigned int early)
                }
 
                printk(KERN_INFO "Default MP configuration #%d\n",
-                      mpf->mpf_feature1);
-               construct_default_ISA_mptable(mpf->mpf_feature1);
+                      mpf->feature1);
+               construct_default_ISA_mptable(mpf->feature1);
 
-       } else if (mpf->mpf_physptr) {
+       } else if (mpf->physptr) {
+               struct mpc_table *mpc;
+               unsigned long size;
 
+               size = get_mpc_size(mpf->physptr);
+               mpc = early_ioremap(mpf->physptr, size);
                /*
                 * Read the physical hardware table.  Anything here will
                 * override the defaults.
                 */
-               if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr), early)) {
+               if (!smp_read_mpc(mpc, early)) {
 #ifdef CONFIG_X86_LOCAL_APIC
                        smp_found_config = 0;
 #endif
@@ -638,8 +641,10 @@ static void __init __get_smp_config(unsigned int early)
                               "BIOS bug, MP table errors detected!...\n");
                        printk(KERN_ERR "... disabling SMP support. "
                               "(tell your hw vendor)\n");
+                       early_iounmap(mpc, size);
                        return;
                }
+               early_iounmap(mpc, size);
 
                if (early)
                        return;
@@ -688,33 +693,33 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
                                  unsigned reserve)
 {
        unsigned int *bp = phys_to_virt(base);
-       struct intel_mp_floating *mpf;
+       struct mpf_intel *mpf;
 
        apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n",
                        bp, length);
        BUILD_BUG_ON(sizeof(*mpf) != 16);
 
        while (length > 0) {
-               mpf = (struct intel_mp_floating *)bp;
+               mpf = (struct mpf_intel *)bp;
                if ((*bp == SMP_MAGIC_IDENT) &&
-                   (mpf->mpf_length == 1) &&
+                   (mpf->length == 1) &&
                    !mpf_checksum((unsigned char *)bp, 16) &&
-                   ((mpf->mpf_specification == 1)
-                    || (mpf->mpf_specification == 4))) {
+                   ((mpf->specification == 1)
+                    || (mpf->specification == 4))) {
 #ifdef CONFIG_X86_LOCAL_APIC
                        smp_found_config = 1;
 #endif
                        mpf_found = mpf;
 
-                       printk(KERN_INFO "found SMP MP-table at [%p] %08lx\n",
-                              mpf, virt_to_phys(mpf));
+                       printk(KERN_INFO "found SMP MP-table at [%p] %llx\n",
+                              mpf, (u64)virt_to_phys(mpf));
 
                        if (!reserve)
                                return 1;
-                       reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE,
+                       reserve_bootmem_generic(virt_to_phys(mpf), sizeof(*mpf),
                                        BOOTMEM_DEFAULT);
-                       if (mpf->mpf_physptr) {
-                               unsigned long size = PAGE_SIZE;
+                       if (mpf->physptr) {
+                               unsigned long size = get_mpc_size(mpf->physptr);
 #ifdef CONFIG_X86_32
                                /*
                                 * We cannot access to MPC table to compute
@@ -722,15 +727,24 @@ static int __init smp_scan_config(unsigned long base, unsigned long length,
                                 * the bottom is mapped now.
                                 * PC-9800's MPC table places on the very last
                                 * of physical memory; so that simply reserving
-                                * PAGE_SIZE from mpg->mpf_physptr yields BUG()
+                                * PAGE_SIZE from mpf->physptr yields BUG()
                                 * in reserve_bootmem.
+                                * also need to make sure physptr is below than
+                                * max_low_pfn
+                                * we don't need reserve the area above max_low_pfn
                                 */
                                unsigned long end = max_low_pfn * PAGE_SIZE;
-                               if (mpf->mpf_physptr + size > end)
-                                       size = end - mpf->mpf_physptr;
-#endif
-                               reserve_bootmem_generic(mpf->mpf_physptr, size,
+
+                               if (mpf->physptr < end) {
+                                       if (mpf->physptr + size > end)
+                                               size = end - mpf->physptr;
+                                       reserve_bootmem_generic(mpf->physptr, size,
+                                                       BOOTMEM_DEFAULT);
+                               }
+#else
+                               reserve_bootmem_generic(mpf->physptr, size,
                                                BOOTMEM_DEFAULT);
+#endif
                        }
 
                        return 1;
@@ -809,15 +823,15 @@ static int  __init get_MP_intsrc_index(struct mpc_intsrc *m)
        /* not legacy */
 
        for (i = 0; i < mp_irq_entries; i++) {
-               if (mp_irqs[i].mp_irqtype != mp_INT)
+               if (mp_irqs[i].irqtype != mp_INT)
                        continue;
 
-               if (mp_irqs[i].mp_irqflag != 0x0f)
+               if (mp_irqs[i].irqflag != 0x0f)
                        continue;
 
-               if (mp_irqs[i].mp_srcbus != m->srcbus)
+               if (mp_irqs[i].srcbus != m->srcbus)
                        continue;
-               if (mp_irqs[i].mp_srcbusirq != m->srcbusirq)
+               if (mp_irqs[i].srcbusirq != m->srcbusirq)
                        continue;
                if (irq_used[i]) {
                        /* already claimed */
@@ -876,12 +890,12 @@ static int  __init replace_intsrc_all(struct mpc_table *mpc,
 #ifdef CONFIG_X86_IO_APIC
                                struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
 
-                               printk(KERN_INFO "OLD ");
+                               apic_printk(APIC_VERBOSE, "OLD ");
                                print_MP_intsrc_info(m);
                                i = get_MP_intsrc_index(m);
                                if (i > 0) {
                                        assign_to_mpc_intsrc(&mp_irqs[i], m);
-                                       printk(KERN_INFO "NEW ");
+                                       apic_printk(APIC_VERBOSE, "NEW ");
                                        print_mp_irq_info(&mp_irqs[i]);
                                } else if (!i) {
                                        /* legacy, do nothing */
@@ -922,14 +936,14 @@ static int  __init replace_intsrc_all(struct mpc_table *mpc,
                if (irq_used[i])
                        continue;
 
-               if (mp_irqs[i].mp_irqtype != mp_INT)
+               if (mp_irqs[i].irqtype != mp_INT)
                        continue;
 
-               if (mp_irqs[i].mp_irqflag != 0x0f)
+               if (mp_irqs[i].irqflag != 0x0f)
                        continue;
 
                if (nr_m_spare > 0) {
-                       printk(KERN_INFO "*NEW* found ");
+                       apic_printk(APIC_VERBOSE, "*NEW* found\n");
                        nr_m_spare--;
                        assign_to_mpc_intsrc(&mp_irqs[i], m_spare[nr_m_spare]);
                        m_spare[nr_m_spare] = NULL;
@@ -1001,7 +1015,7 @@ static int __init update_mp_table(void)
 {
        char str[16];
        char oem[10];
-       struct intel_mp_floating *mpf;
+       struct mpf_intel *mpf;
        struct mpc_table *mpc, *mpc_new;
 
        if (!enable_update_mptable)
@@ -1014,19 +1028,19 @@ static int __init update_mp_table(void)
        /*
         * Now see if we need to go further.
         */
-       if (mpf->mpf_feature1 != 0)
+       if (mpf->feature1 != 0)
                return 0;
 
-       if (!mpf->mpf_physptr)
+       if (!mpf->physptr)
                return 0;
 
-       mpc = phys_to_virt(mpf->mpf_physptr);
+       mpc = phys_to_virt(mpf->physptr);
 
        if (!smp_check_mpc(mpc, oem, str))
                return 0;
 
-       printk(KERN_INFO "mpf: %lx\n", virt_to_phys(mpf));
-       printk(KERN_INFO "mpf_physptr: %x\n", mpf->mpf_physptr);
+       printk(KERN_INFO "mpf: %llx\n", (u64)virt_to_phys(mpf));
+       printk(KERN_INFO "physptr: %x\n", mpf->physptr);
 
        if (mpc_new_phys && mpc->length > mpc_new_length) {
                mpc_new_phys = 0;
@@ -1047,23 +1061,23 @@ static int __init update_mp_table(void)
                }
                printk(KERN_INFO "use in-positon replacing\n");
        } else {
-               mpf->mpf_physptr = mpc_new_phys;
+               mpf->physptr = mpc_new_phys;
                mpc_new = phys_to_virt(mpc_new_phys);
                memcpy(mpc_new, mpc, mpc->length);
                mpc = mpc_new;
                /* check if we can modify that */
-               if (mpc_new_phys - mpf->mpf_physptr) {
-                       struct intel_mp_floating *mpf_new;
+               if (mpc_new_phys - mpf->physptr) {
+                       struct mpf_intel *mpf_new;
                        /* steal 16 bytes from [0, 1k) */
                        printk(KERN_INFO "mpf new: %x\n", 0x400 - 16);
                        mpf_new = phys_to_virt(0x400 - 16);
                        memcpy(mpf_new, mpf, 16);
                        mpf = mpf_new;
-                       mpf->mpf_physptr = mpc_new_phys;
+                       mpf->physptr = mpc_new_phys;
                }
-               mpf->mpf_checksum = 0;
-               mpf->mpf_checksum -= mpf_checksum((unsigned char *)mpf, 16);
-               printk(KERN_INFO "mpf_physptr new: %x\n", mpf->mpf_physptr);
+               mpf->checksum = 0;
+               mpf->checksum -= mpf_checksum((unsigned char *)mpf, 16);
+               printk(KERN_INFO "physptr new: %x\n", mpf->physptr);
        }
 
        /*
index 726266695b2cb4304221989744fb29fbb3e1bb78..3cf3413ec626936f334c309f9a20f2a32a36de96 100644 (file)
 #include <linux/device.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
+#include <linux/uaccess.h>
 
 #include <asm/processor.h>
 #include <asm/msr.h>
-#include <asm/uaccess.h>
 #include <asm/system.h>
 
 static struct class *msr_class;
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
deleted file mode 100644 (file)
index 7228979..0000000
+++ /dev/null
@@ -1,572 +0,0 @@
-/*
- *  NMI watchdog support on APIC systems
- *
- *  Started by Ingo Molnar <mingo@redhat.com>
- *
- *  Fixes:
- *  Mikael Pettersson  : AMD K7 support for local APIC NMI watchdog.
- *  Mikael Pettersson  : Power Management for local APIC NMI watchdog.
- *  Mikael Pettersson  : Pentium 4 support for local APIC NMI watchdog.
- *  Pavel Machek and
- *  Mikael Pettersson  : PM converted to driver model. Disable/enable API.
- */
-
-#include <asm/apic.h>
-
-#include <linux/nmi.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/sysdev.h>
-#include <linux/sysctl.h>
-#include <linux/percpu.h>
-#include <linux/kprobes.h>
-#include <linux/cpumask.h>
-#include <linux/kernel_stat.h>
-#include <linux/kdebug.h>
-#include <linux/smp.h>
-
-#include <asm/i8259.h>
-#include <asm/io_apic.h>
-#include <asm/proto.h>
-#include <asm/timer.h>
-
-#include <asm/mce.h>
-
-#include <mach_traps.h>
-
-int unknown_nmi_panic;
-int nmi_watchdog_enabled;
-
-static cpumask_t backtrace_mask = CPU_MASK_NONE;
-
-/* nmi_active:
- * >0: the lapic NMI watchdog is active, but can be disabled
- * <0: the lapic NMI watchdog has not been set up, and cannot
- *     be enabled
- *  0: the lapic NMI watchdog is disabled, but can be enabled
- */
-atomic_t nmi_active = ATOMIC_INIT(0);          /* oprofile uses this */
-EXPORT_SYMBOL(nmi_active);
-
-unsigned int nmi_watchdog = NMI_NONE;
-EXPORT_SYMBOL(nmi_watchdog);
-
-static int panic_on_timeout;
-
-static unsigned int nmi_hz = HZ;
-static DEFINE_PER_CPU(short, wd_enabled);
-static int endflag __initdata;
-
-static inline unsigned int get_nmi_count(int cpu)
-{
-#ifdef CONFIG_X86_64
-       return cpu_pda(cpu)->__nmi_count;
-#else
-       return nmi_count(cpu);
-#endif
-}
-
-static inline int mce_in_progress(void)
-{
-#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
-       return atomic_read(&mce_entry) > 0;
-#endif
-       return 0;
-}
-
-/*
- * Take the local apic timer and PIT/HPET into account. We don't
- * know which one is active, when we have highres/dyntick on
- */
-static inline unsigned int get_timer_irqs(int cpu)
-{
-#ifdef CONFIG_X86_64
-       return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
-#else
-       return per_cpu(irq_stat, cpu).apic_timer_irqs +
-               per_cpu(irq_stat, cpu).irq0_irqs;
-#endif
-}
-
-#ifdef CONFIG_SMP
-/*
- * The performance counters used by NMI_LOCAL_APIC don't trigger when
- * the CPU is idle. To make sure the NMI watchdog really ticks on all
- * CPUs during the test make them busy.
- */
-static __init void nmi_cpu_busy(void *data)
-{
-       local_irq_enable_in_hardirq();
-       /*
-        * Intentionally don't use cpu_relax here. This is
-        * to make sure that the performance counter really ticks,
-        * even if there is a simulator or similar that catches the
-        * pause instruction. On a real HT machine this is fine because
-        * all other CPUs are busy with "useless" delay loops and don't
-        * care if they get somewhat less cycles.
-        */
-       while (endflag == 0)
-               mb();
-}
-#endif
-
-static void report_broken_nmi(int cpu, int *prev_nmi_count)
-{
-       printk(KERN_CONT "\n");
-
-       printk(KERN_WARNING
-               "WARNING: CPU#%d: NMI appears to be stuck (%d->%d)!\n",
-                       cpu, prev_nmi_count[cpu], get_nmi_count(cpu));
-
-       printk(KERN_WARNING
-               "Please report this to bugzilla.kernel.org,\n");
-       printk(KERN_WARNING
-               "and attach the output of the 'dmesg' command.\n");
-
-       per_cpu(wd_enabled, cpu) = 0;
-       atomic_dec(&nmi_active);
-}
-
-static void __acpi_nmi_disable(void *__unused)
-{
-       apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
-}
-
-int __init check_nmi_watchdog(void)
-{
-       unsigned int *prev_nmi_count;
-       int cpu;
-
-       if (!nmi_watchdog_active() || !atomic_read(&nmi_active))
-               return 0;
-
-       prev_nmi_count = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
-       if (!prev_nmi_count)
-               goto error;
-
-       printk(KERN_INFO "Testing NMI watchdog ... ");
-
-#ifdef CONFIG_SMP
-       if (nmi_watchdog == NMI_LOCAL_APIC)
-               smp_call_function(nmi_cpu_busy, (void *)&endflag, 0);
-#endif
-
-       for_each_possible_cpu(cpu)
-               prev_nmi_count[cpu] = get_nmi_count(cpu);
-       local_irq_enable();
-       mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
-
-       for_each_online_cpu(cpu) {
-               if (!per_cpu(wd_enabled, cpu))
-                       continue;
-               if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5)
-                       report_broken_nmi(cpu, prev_nmi_count);
-       }
-       endflag = 1;
-       if (!atomic_read(&nmi_active)) {
-               kfree(prev_nmi_count);
-               atomic_set(&nmi_active, -1);
-               goto error;
-       }
-       printk("OK.\n");
-
-       /*
-        * now that we know it works we can reduce NMI frequency to
-        * something more reasonable; makes a difference in some configs
-        */
-       if (nmi_watchdog == NMI_LOCAL_APIC)
-               nmi_hz = lapic_adjust_nmi_hz(1);
-
-       kfree(prev_nmi_count);
-       return 0;
-error:
-       if (nmi_watchdog == NMI_IO_APIC) {
-               if (!timer_through_8259)
-                       disable_8259A_irq(0);
-               on_each_cpu(__acpi_nmi_disable, NULL, 1);
-       }
-
-#ifdef CONFIG_X86_32
-       timer_ack = 0;
-#endif
-       return -1;
-}
-
-static int __init setup_nmi_watchdog(char *str)
-{
-       unsigned int nmi;
-
-       if (!strncmp(str, "panic", 5)) {
-               panic_on_timeout = 1;
-               str = strchr(str, ',');
-               if (!str)
-                       return 1;
-               ++str;
-       }
-
-       if (!strncmp(str, "lapic", 5))
-               nmi_watchdog = NMI_LOCAL_APIC;
-       else if (!strncmp(str, "ioapic", 6))
-               nmi_watchdog = NMI_IO_APIC;
-       else {
-               get_option(&str, &nmi);
-               if (nmi >= NMI_INVALID)
-                       return 0;
-               nmi_watchdog = nmi;
-       }
-
-       return 1;
-}
-__setup("nmi_watchdog=", setup_nmi_watchdog);
-
-/*
- * Suspend/resume support
- */
-#ifdef CONFIG_PM
-
-static int nmi_pm_active; /* nmi_active before suspend */
-
-static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
-{
-       /* only CPU0 goes here, other CPUs should be offline */
-       nmi_pm_active = atomic_read(&nmi_active);
-       stop_apic_nmi_watchdog(NULL);
-       BUG_ON(atomic_read(&nmi_active) != 0);
-       return 0;
-}
-
-static int lapic_nmi_resume(struct sys_device *dev)
-{
-       /* only CPU0 goes here, other CPUs should be offline */
-       if (nmi_pm_active > 0) {
-               setup_apic_nmi_watchdog(NULL);
-               touch_nmi_watchdog();
-       }
-       return 0;
-}
-
-static struct sysdev_class nmi_sysclass = {
-       .name           = "lapic_nmi",
-       .resume         = lapic_nmi_resume,
-       .suspend        = lapic_nmi_suspend,
-};
-
-static struct sys_device device_lapic_nmi = {
-       .id     = 0,
-       .cls    = &nmi_sysclass,
-};
-
-static int __init init_lapic_nmi_sysfs(void)
-{
-       int error;
-
-       /*
-        * should really be a BUG_ON but b/c this is an
-        * init call, it just doesn't work.  -dcz
-        */
-       if (nmi_watchdog != NMI_LOCAL_APIC)
-               return 0;
-
-       if (atomic_read(&nmi_active) < 0)
-               return 0;
-
-       error = sysdev_class_register(&nmi_sysclass);
-       if (!error)
-               error = sysdev_register(&device_lapic_nmi);
-       return error;
-}
-
-/* must come after the local APIC's device_initcall() */
-late_initcall(init_lapic_nmi_sysfs);
-
-#endif /* CONFIG_PM */
-
-static void __acpi_nmi_enable(void *__unused)
-{
-       apic_write(APIC_LVT0, APIC_DM_NMI);
-}
-
-/*
- * Enable timer based NMIs on all CPUs:
- */
-void acpi_nmi_enable(void)
-{
-       if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
-               on_each_cpu(__acpi_nmi_enable, NULL, 1);
-}
-
-/*
- * Disable timer based NMIs on all CPUs:
- */
-void acpi_nmi_disable(void)
-{
-       if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
-               on_each_cpu(__acpi_nmi_disable, NULL, 1);
-}
-
-/*
- * This function is called as soon the LAPIC NMI watchdog driver has everything
- * in place and it's ready to check if the NMIs belong to the NMI watchdog
- */
-void cpu_nmi_set_wd_enabled(void)
-{
-       __get_cpu_var(wd_enabled) = 1;
-}
-
-void setup_apic_nmi_watchdog(void *unused)
-{
-       if (__get_cpu_var(wd_enabled))
-               return;
-
-       /* cheap hack to support suspend/resume */
-       /* if cpu0 is not active neither should the other cpus */
-       if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0)
-               return;
-
-       switch (nmi_watchdog) {
-       case NMI_LOCAL_APIC:
-               if (lapic_watchdog_init(nmi_hz) < 0) {
-                       __get_cpu_var(wd_enabled) = 0;
-                       return;
-               }
-               /* FALL THROUGH */
-       case NMI_IO_APIC:
-               __get_cpu_var(wd_enabled) = 1;
-               atomic_inc(&nmi_active);
-       }
-}
-
-void stop_apic_nmi_watchdog(void *unused)
-{
-       /* only support LOCAL and IO APICs for now */
-       if (!nmi_watchdog_active())
-               return;
-       if (__get_cpu_var(wd_enabled) == 0)
-               return;
-       if (nmi_watchdog == NMI_LOCAL_APIC)
-               lapic_watchdog_stop();
-       else
-               __acpi_nmi_disable(NULL);
-       __get_cpu_var(wd_enabled) = 0;
-       atomic_dec(&nmi_active);
-}
-
-/*
- * the best way to detect whether a CPU has a 'hard lockup' problem
- * is to check it's local APIC timer IRQ counts. If they are not
- * changing then that CPU has some problem.
- *
- * as these watchdog NMI IRQs are generated on every CPU, we only
- * have to check the current processor.
- *
- * since NMIs don't listen to _any_ locks, we have to be extremely
- * careful not to rely on unsafe variables. The printk might lock
- * up though, so we have to break up any console locks first ...
- * [when there will be more tty-related locks, break them up here too!]
- */
-
-static DEFINE_PER_CPU(unsigned, last_irq_sum);
-static DEFINE_PER_CPU(local_t, alert_counter);
-static DEFINE_PER_CPU(int, nmi_touch);
-
-void touch_nmi_watchdog(void)
-{
-       if (nmi_watchdog_active()) {
-               unsigned cpu;
-
-               /*
-                * Tell other CPUs to reset their alert counters. We cannot
-                * do it ourselves because the alert count increase is not
-                * atomic.
-                */
-               for_each_present_cpu(cpu) {
-                       if (per_cpu(nmi_touch, cpu) != 1)
-                               per_cpu(nmi_touch, cpu) = 1;
-               }
-       }
-
-       /*
-        * Tickle the softlockup detector too:
-        */
-       touch_softlockup_watchdog();
-}
-EXPORT_SYMBOL(touch_nmi_watchdog);
-
-notrace __kprobes int
-nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
-{
-       /*
-        * Since current_thread_info()-> is always on the stack, and we
-        * always switch the stack NMI-atomically, it's safe to use
-        * smp_processor_id().
-        */
-       unsigned int sum;
-       int touched = 0;
-       int cpu = smp_processor_id();
-       int rc = 0;
-
-       /* check for other users first */
-       if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
-                       == NOTIFY_STOP) {
-               rc = 1;
-               touched = 1;
-       }
-
-       sum = get_timer_irqs(cpu);
-
-       if (__get_cpu_var(nmi_touch)) {
-               __get_cpu_var(nmi_touch) = 0;
-               touched = 1;
-       }
-
-       if (cpu_isset(cpu, backtrace_mask)) {
-               static DEFINE_SPINLOCK(lock);   /* Serialise the printks */
-
-               spin_lock(&lock);
-               printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
-               dump_stack();
-               spin_unlock(&lock);
-               cpu_clear(cpu, backtrace_mask);
-       }
-
-       /* Could check oops_in_progress here too, but it's safer not to */
-       if (mce_in_progress())
-               touched = 1;
-
-       /* if the none of the timers isn't firing, this cpu isn't doing much */
-       if (!touched && __get_cpu_var(last_irq_sum) == sum) {
-               /*
-                * Ayiee, looks like this CPU is stuck ...
-                * wait a few IRQs (5 seconds) before doing the oops ...
-                */
-               local_inc(&__get_cpu_var(alert_counter));
-               if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
-                       /*
-                        * die_nmi will return ONLY if NOTIFY_STOP happens..
-                        */
-                       die_nmi("BUG: NMI Watchdog detected LOCKUP",
-                               regs, panic_on_timeout);
-       } else {
-               __get_cpu_var(last_irq_sum) = sum;
-               local_set(&__get_cpu_var(alert_counter), 0);
-       }
-
-       /* see if the nmi watchdog went off */
-       if (!__get_cpu_var(wd_enabled))
-               return rc;
-       switch (nmi_watchdog) {
-       case NMI_LOCAL_APIC:
-               rc |= lapic_wd_event(nmi_hz);
-               break;
-       case NMI_IO_APIC:
-               /*
-                * don't know how to accurately check for this.
-                * just assume it was a watchdog timer interrupt
-                * This matches the old behaviour.
-                */
-               rc = 1;
-               break;
-       }
-       return rc;
-}
-
-#ifdef CONFIG_SYSCTL
-
-static void enable_ioapic_nmi_watchdog_single(void *unused)
-{
-       __get_cpu_var(wd_enabled) = 1;
-       atomic_inc(&nmi_active);
-       __acpi_nmi_enable(NULL);
-}
-
-static void enable_ioapic_nmi_watchdog(void)
-{
-       on_each_cpu(enable_ioapic_nmi_watchdog_single, NULL, 1);
-       touch_nmi_watchdog();
-}
-
-static void disable_ioapic_nmi_watchdog(void)
-{
-       on_each_cpu(stop_apic_nmi_watchdog, NULL, 1);
-}
-
-static int __init setup_unknown_nmi_panic(char *str)
-{
-       unknown_nmi_panic = 1;
-       return 1;
-}
-__setup("unknown_nmi_panic", setup_unknown_nmi_panic);
-
-static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
-{
-       unsigned char reason = get_nmi_reason();
-       char buf[64];
-
-       sprintf(buf, "NMI received for unknown reason %02x\n", reason);
-       die_nmi(buf, regs, 1); /* Always panic here */
-       return 0;
-}
-
-/*
- * proc handler for /proc/sys/kernel/nmi
- */
-int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
-                       void __user *buffer, size_t *length, loff_t *ppos)
-{
-       int old_state;
-
-       nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
-       old_state = nmi_watchdog_enabled;
-       proc_dointvec(table, write, file, buffer, length, ppos);
-       if (!!old_state == !!nmi_watchdog_enabled)
-               return 0;
-
-       if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) {
-               printk(KERN_WARNING
-                       "NMI watchdog is permanently disabled\n");
-               return -EIO;
-       }
-
-       if (nmi_watchdog == NMI_LOCAL_APIC) {
-               if (nmi_watchdog_enabled)
-                       enable_lapic_nmi_watchdog();
-               else
-                       disable_lapic_nmi_watchdog();
-       } else if (nmi_watchdog == NMI_IO_APIC) {
-               if (nmi_watchdog_enabled)
-                       enable_ioapic_nmi_watchdog();
-               else
-                       disable_ioapic_nmi_watchdog();
-       } else {
-               printk(KERN_WARNING
-                       "NMI watchdog doesn't know what hardware to touch\n");
-               return -EIO;
-       }
-       return 0;
-}
-
-#endif /* CONFIG_SYSCTL */
-
-int do_nmi_callback(struct pt_regs *regs, int cpu)
-{
-#ifdef CONFIG_SYSCTL
-       if (unknown_nmi_panic)
-               return unknown_nmi_panic_callback(regs, cpu);
-#endif
-       return 0;
-}
-
-void __trigger_all_cpu_backtrace(void)
-{
-       int i;
-
-       backtrace_mask = cpu_online_map;
-       /* Wait for up to 10 seconds for all CPUs to do the backtrace */
-       for (i = 0; i < 10 * 1000; i++) {
-               if (cpus_empty(backtrace_mask))
-                       break;
-               mdelay(1);
-       }
-}
diff --git a/arch/x86/kernel/numaq_32.c b/arch/x86/kernel/numaq_32.c
deleted file mode 100644 (file)
index f2191d4..0000000
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * Written by: Patricia Gaughen, IBM Corporation
- *
- * Copyright (C) 2002, IBM Corp.
- *
- * All rights reserved.          
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send feedback to <gone@us.ibm.com>
- */
-
-#include <linux/mm.h>
-#include <linux/bootmem.h>
-#include <linux/mmzone.h>
-#include <linux/module.h>
-#include <linux/nodemask.h>
-#include <asm/numaq.h>
-#include <asm/topology.h>
-#include <asm/processor.h>
-#include <asm/genapic.h>
-#include <asm/e820.h>
-#include <asm/setup.h>
-
-#define        MB_TO_PAGES(addr) ((addr) << (20 - PAGE_SHIFT))
-
-/*
- * Function: smp_dump_qct()
- *
- * Description: gets memory layout from the quad config table.  This
- * function also updates node_online_map with the nodes (quads) present.
- */
-static void __init smp_dump_qct(void)
-{
-       int node;
-       struct eachquadmem *eq;
-       struct sys_cfg_data *scd =
-               (struct sys_cfg_data *)__va(SYS_CFG_DATA_PRIV_ADDR);
-
-       nodes_clear(node_online_map);
-       for_each_node(node) {
-               if (scd->quads_present31_0 & (1 << node)) {
-                       node_set_online(node);
-                       eq = &scd->eq[node];
-                       /* Convert to pages */
-                       node_start_pfn[node] = MB_TO_PAGES(
-                               eq->hi_shrd_mem_start - eq->priv_mem_size);
-                       node_end_pfn[node] = MB_TO_PAGES(
-                               eq->hi_shrd_mem_start + eq->hi_shrd_mem_size);
-
-                       e820_register_active_regions(node, node_start_pfn[node],
-                                                       node_end_pfn[node]);
-                       memory_present(node,
-                               node_start_pfn[node], node_end_pfn[node]);
-                       node_remap_size[node] = node_memmap_size_bytes(node,
-                                                       node_start_pfn[node],
-                                                       node_end_pfn[node]);
-               }
-       }
-}
-
-
-void __cpuinit numaq_tsc_disable(void)
-{
-       if (!found_numaq)
-               return;
-
-       if (num_online_nodes() > 1) {
-               printk(KERN_DEBUG "NUMAQ: disabling TSC\n");
-               setup_clear_cpu_cap(X86_FEATURE_TSC);
-       }
-}
-
-static int __init numaq_pre_time_init(void)
-{
-       numaq_tsc_disable();
-       return 0;
-}
-
-int found_numaq;
-/*
- * Have to match translation table entries to main table entries by counter
- * hence the mpc_record variable .... can't see a less disgusting way of
- * doing this ....
- */
-struct mpc_config_translation {
-       unsigned char mpc_type;
-       unsigned char trans_len;
-       unsigned char trans_type;
-       unsigned char trans_quad;
-       unsigned char trans_global;
-       unsigned char trans_local;
-       unsigned short trans_reserved;
-};
-
-/* x86_quirks member */
-static int mpc_record;
-static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY]
-    __cpuinitdata;
-
-static inline int generate_logical_apicid(int quad, int phys_apicid)
-{
-       return (quad << 4) + (phys_apicid ? phys_apicid << 1 : 1);
-}
-
-/* x86_quirks member */
-static int mpc_apic_id(struct mpc_cpu *m)
-{
-       int quad = translation_table[mpc_record]->trans_quad;
-       int logical_apicid = generate_logical_apicid(quad, m->apicid);
-
-       printk(KERN_DEBUG "Processor #%d %u:%u APIC version %d (quad %d, apic %d)\n",
-              m->apicid, (m->cpufeature & CPU_FAMILY_MASK) >> 8,
-              (m->cpufeature & CPU_MODEL_MASK) >> 4,
-              m->apicver, quad, logical_apicid);
-       return logical_apicid;
-}
-
-int mp_bus_id_to_node[MAX_MP_BUSSES];
-
-int mp_bus_id_to_local[MAX_MP_BUSSES];
-
-/* x86_quirks member */
-static void mpc_oem_bus_info(struct mpc_bus *m, char *name)
-{
-       int quad = translation_table[mpc_record]->trans_quad;
-       int local = translation_table[mpc_record]->trans_local;
-
-       mp_bus_id_to_node[m->busid] = quad;
-       mp_bus_id_to_local[m->busid] = local;
-       printk(KERN_INFO "Bus #%d is %s (node %d)\n",
-              m->busid, name, quad);
-}
-
-int quad_local_to_mp_bus_id [NR_CPUS/4][4];
-
-/* x86_quirks member */
-static void mpc_oem_pci_bus(struct mpc_bus *m)
-{
-       int quad = translation_table[mpc_record]->trans_quad;
-       int local = translation_table[mpc_record]->trans_local;
-
-       quad_local_to_mp_bus_id[quad][local] = m->busid;
-}
-
-static void __init MP_translation_info(struct mpc_config_translation *m)
-{
-       printk(KERN_INFO
-              "Translation: record %d, type %d, quad %d, global %d, local %d\n",
-              mpc_record, m->trans_type, m->trans_quad, m->trans_global,
-              m->trans_local);
-
-       if (mpc_record >= MAX_MPC_ENTRY)
-               printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
-       else
-               translation_table[mpc_record] = m;      /* stash this for later */
-       if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
-               node_set_online(m->trans_quad);
-}
-
-static int __init mpf_checksum(unsigned char *mp, int len)
-{
-       int sum = 0;
-
-       while (len--)
-               sum += *mp++;
-
-       return sum & 0xFF;
-}
-
-/*
- * Read/parse the MPC oem tables
- */
-
-static void __init smp_read_mpc_oem(struct mpc_oemtable *oemtable,
-                                   unsigned short oemsize)
-{
-       int count = sizeof(*oemtable);  /* the header size */
-       unsigned char *oemptr = ((unsigned char *)oemtable) + count;
-
-       mpc_record = 0;
-       printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n",
-              oemtable);
-       if (memcmp(oemtable->signature, MPC_OEM_SIGNATURE, 4)) {
-               printk(KERN_WARNING
-                      "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
-                      oemtable->signature[0], oemtable->signature[1],
-                      oemtable->signature[2], oemtable->signature[3]);
-               return;
-       }
-       if (mpf_checksum((unsigned char *)oemtable, oemtable->length)) {
-               printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
-               return;
-       }
-       while (count < oemtable->length) {
-               switch (*oemptr) {
-               case MP_TRANSLATION:
-                       {
-                               struct mpc_config_translation *m =
-                                   (struct mpc_config_translation *)oemptr;
-                               MP_translation_info(m);
-                               oemptr += sizeof(*m);
-                               count += sizeof(*m);
-                               ++mpc_record;
-                               break;
-                       }
-               default:
-                       {
-                               printk(KERN_WARNING
-                                      "Unrecognised OEM table entry type! - %d\n",
-                                      (int)*oemptr);
-                               return;
-                       }
-               }
-       }
-}
-
-static int __init numaq_setup_ioapic_ids(void)
-{
-       /* so can skip it */
-       return 1;
-}
-
-static int __init numaq_update_genapic(void)
-{
-       genapic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
-
-       return 0;
-}
-
-static struct x86_quirks numaq_x86_quirks __initdata = {
-       .arch_pre_time_init     = numaq_pre_time_init,
-       .arch_time_init         = NULL,
-       .arch_pre_intr_init     = NULL,
-       .arch_memory_setup      = NULL,
-       .arch_intr_init         = NULL,
-       .arch_trap_init         = NULL,
-       .mach_get_smp_config    = NULL,
-       .mach_find_smp_config   = NULL,
-       .mpc_record             = &mpc_record,
-       .mpc_apic_id            = mpc_apic_id,
-       .mpc_oem_bus_info       = mpc_oem_bus_info,
-       .mpc_oem_pci_bus        = mpc_oem_pci_bus,
-       .smp_read_mpc_oem       = smp_read_mpc_oem,
-       .setup_ioapic_ids       = numaq_setup_ioapic_ids,
-       .update_genapic         = numaq_update_genapic,
-};
-
-void numaq_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
-       if (strncmp(oem, "IBM NUMA", 8))
-               printk("Warning!  Not a NUMA-Q system!\n");
-       else
-               found_numaq = 1;
-}
-
-static __init void early_check_numaq(void)
-{
-       /*
-        * Find possible boot-time SMP configuration:
-        */
-       early_find_smp_config();
-       /*
-        * get boot-time SMP configuration:
-        */
-       if (smp_found_config)
-               early_get_smp_config();
-
-       if (found_numaq)
-               x86_quirks = &numaq_x86_quirks;
-}
-
-int __init get_memcfg_numaq(void)
-{
-       early_check_numaq();
-       if (!found_numaq)
-               return 0;
-       smp_dump_qct();
-       return 1;
-}
index 95777b0faa7388f9c236bb093fae1a4934bb91c0..3a7c5a44082eee1639fa695e70d8fa55bea4389c 100644 (file)
@@ -26,13 +26,3 @@ struct pv_lock_ops pv_lock_ops = {
 };
 EXPORT_SYMBOL(pv_lock_ops);
 
-void __init paravirt_use_bytelocks(void)
-{
-#ifdef CONFIG_SMP
-       pv_lock_ops.spin_is_locked = __byte_spin_is_locked;
-       pv_lock_ops.spin_is_contended = __byte_spin_is_contended;
-       pv_lock_ops.spin_lock = __byte_spin_lock;
-       pv_lock_ops.spin_trylock = __byte_spin_trylock;
-       pv_lock_ops.spin_unlock = __byte_spin_unlock;
-#endif
-}
index c6520a4e85d4fe3479070d03867bfd65e50dd081..63dd358d8ee13be0708c0139bf348d5d237aecfb 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/paravirt.h>
 #include <asm/desc.h>
 #include <asm/setup.h>
-#include <asm/arch_hooks.h>
 #include <asm/pgtable.h>
 #include <asm/time.h>
 #include <asm/pgalloc.h>
@@ -44,6 +43,17 @@ void _paravirt_nop(void)
 {
 }
 
+/* identity function, which can be inlined */
+u32 _paravirt_ident_32(u32 x)
+{
+       return x;
+}
+
+u64 _paravirt_ident_64(u64 x)
+{
+       return x;
+}
+
 static void __init default_banner(void)
 {
        printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@ -138,9 +148,16 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
        if (opfunc == NULL)
                /* If there's no function, patch it with a ud2a (BUG) */
                ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
-       else if (opfunc == paravirt_nop)
+       else if (opfunc == _paravirt_nop)
                /* If the operation is a nop, then nop the callsite */
                ret = paravirt_patch_nop();
+
+       /* identity functions just return their single argument */
+       else if (opfunc == _paravirt_ident_32)
+               ret = paravirt_patch_ident_32(insnbuf, len);
+       else if (opfunc == _paravirt_ident_64)
+               ret = paravirt_patch_ident_64(insnbuf, len);
+
        else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
                 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
                 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
@@ -318,10 +335,10 @@ struct pv_time_ops pv_time_ops = {
 
 struct pv_irq_ops pv_irq_ops = {
        .init_IRQ = native_init_IRQ,
-       .save_fl = native_save_fl,
-       .restore_fl = native_restore_fl,
-       .irq_disable = native_irq_disable,
-       .irq_enable = native_irq_enable,
+       .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
+       .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
+       .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
+       .irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
        .safe_halt = native_safe_halt,
        .halt = native_halt,
 #ifdef CONFIG_X86_64
@@ -399,6 +416,14 @@ struct pv_apic_ops pv_apic_ops = {
 #endif
 };
 
+#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
+/* 32-bit pagetable entries */
+#define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
+#else
+/* 64-bit pagetable entries */
+#define PTE_IDENT      __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
+#endif
+
 struct pv_mmu_ops pv_mmu_ops = {
 #ifndef CONFIG_X86_64
        .pagetable_setup_start = native_pagetable_setup_start,
@@ -450,22 +475,23 @@ struct pv_mmu_ops pv_mmu_ops = {
        .pmd_clear = native_pmd_clear,
 #endif
        .set_pud = native_set_pud,
-       .pmd_val = native_pmd_val,
-       .make_pmd = native_make_pmd,
+
+       .pmd_val = PTE_IDENT,
+       .make_pmd = PTE_IDENT,
 
 #if PAGETABLE_LEVELS == 4
-       .pud_val = native_pud_val,
-       .make_pud = native_make_pud,
+       .pud_val = PTE_IDENT,
+       .make_pud = PTE_IDENT,
+
        .set_pgd = native_set_pgd,
 #endif
 #endif /* PAGETABLE_LEVELS >= 3 */
 
-       .pte_val = native_pte_val,
-       .pte_flags = native_pte_flags,
-       .pgd_val = native_pgd_val,
+       .pte_val = PTE_IDENT,
+       .pgd_val = PTE_IDENT,
 
-       .make_pte = native_make_pte,
-       .make_pgd = native_make_pgd,
+       .make_pte = PTE_IDENT,
+       .make_pgd = PTE_IDENT,
 
        .dup_mmap = paravirt_nop,
        .exit_mmap = paravirt_nop,
index 9fe644f4861d4f675206ce16f2c8f7c799a6aae5..d9f32e6d6ab65476be60c34d06c522215b83a26b 100644 (file)
@@ -12,6 +12,18 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
 DEF_NATIVE(pv_cpu_ops, clts, "clts");
 DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
 
+unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
+{
+       /* arg in %eax, return in %eax */
+       return 0;
+}
+
+unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
+{
+       /* arg in %edx:%eax, return in %edx:%eax */
+       return 0;
+}
+
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
 {
index 061d01df9ae6aa0ff09ff17d43eab28e61d21b10..3f08f34f93ebc480041d0b448e3b12869450a91d 100644 (file)
@@ -19,6 +19,21 @@ DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
 DEF_NATIVE(pv_cpu_ops, usergs_sysret32, "swapgs; sysretl");
 DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
 
+DEF_NATIVE(, mov32, "mov %edi, %eax");
+DEF_NATIVE(, mov64, "mov %rdi, %rax");
+
+unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
+{
+       return paravirt_patch_insns(insnbuf, len,
+                                   start__mov32, end__mov32);
+}
+
+unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
+{
+       return paravirt_patch_insns(insnbuf, len,
+                                   start__mov64, end__mov64);
+}
+
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
 {
index 675a48c404a5abde75e35a655ea8ed56b938bc89..071e7fea42e5b3fd07bb16a7a953f4d4145f9110 100644 (file)
@@ -18,7 +18,7 @@
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/io.h>
-#include <setup_arch.h>
+#include <asm/setup_arch.h>
 
 static struct resource system_rom_resource = {
        .name   = "System ROM",
index 6d12f7e37f8c1da5a465b110ff75ecae8c01f0c8..6afa5232dbb739f99c07ff0641cc265428db74eb 100644 (file)
@@ -1,8 +1,8 @@
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
-#include <asm/idle.h>
 #include <linux/smp.h>
+#include <linux/prctl.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/module.h>
@@ -11,6 +11,9 @@
 #include <linux/ftrace.h>
 #include <asm/system.h>
 #include <asm/apic.h>
+#include <asm/idle.h>
+#include <asm/uaccess.h>
+#include <asm/i387.h>
 
 unsigned long idle_halt;
 EXPORT_SYMBOL(idle_halt);
@@ -55,6 +58,192 @@ void arch_task_cache_init(void)
                                  SLAB_PANIC, NULL);
 }
 
+/*
+ * Free current thread data structures etc..
+ */
+void exit_thread(void)
+{
+       struct task_struct *me = current;
+       struct thread_struct *t = &me->thread;
+
+       if (me->thread.io_bitmap_ptr) {
+               struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
+
+               kfree(t->io_bitmap_ptr);
+               t->io_bitmap_ptr = NULL;
+               clear_thread_flag(TIF_IO_BITMAP);
+               /*
+                * Careful, clear this in the TSS too:
+                */
+               memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
+               t->io_bitmap_max = 0;
+               put_cpu();
+       }
+
+       ds_exit_thread(current);
+}
+
+void flush_thread(void)
+{
+       struct task_struct *tsk = current;
+
+#ifdef CONFIG_X86_64
+       if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
+               clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
+               if (test_tsk_thread_flag(tsk, TIF_IA32)) {
+                       clear_tsk_thread_flag(tsk, TIF_IA32);
+               } else {
+                       set_tsk_thread_flag(tsk, TIF_IA32);
+                       current_thread_info()->status |= TS_COMPAT;
+               }
+       }
+#endif
+
+       clear_tsk_thread_flag(tsk, TIF_DEBUG);
+
+       tsk->thread.debugreg0 = 0;
+       tsk->thread.debugreg1 = 0;
+       tsk->thread.debugreg2 = 0;
+       tsk->thread.debugreg3 = 0;
+       tsk->thread.debugreg6 = 0;
+       tsk->thread.debugreg7 = 0;
+       memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
+       /*
+        * Forget coprocessor state..
+        */
+       tsk->fpu_counter = 0;
+       clear_fpu(tsk);
+       clear_used_math();
+}
+
+static void hard_disable_TSC(void)
+{
+       write_cr4(read_cr4() | X86_CR4_TSD);
+}
+
+void disable_TSC(void)
+{
+       preempt_disable();
+       if (!test_and_set_thread_flag(TIF_NOTSC))
+               /*
+                * Must flip the CPU state synchronously with
+                * TIF_NOTSC in the current running context.
+                */
+               hard_disable_TSC();
+       preempt_enable();
+}
+
+static void hard_enable_TSC(void)
+{
+       write_cr4(read_cr4() & ~X86_CR4_TSD);
+}
+
+static void enable_TSC(void)
+{
+       preempt_disable();
+       if (test_and_clear_thread_flag(TIF_NOTSC))
+               /*
+                * Must flip the CPU state synchronously with
+                * TIF_NOTSC in the current running context.
+                */
+               hard_enable_TSC();
+       preempt_enable();
+}
+
+int get_tsc_mode(unsigned long adr)
+{
+       unsigned int val;
+
+       if (test_thread_flag(TIF_NOTSC))
+               val = PR_TSC_SIGSEGV;
+       else
+               val = PR_TSC_ENABLE;
+
+       return put_user(val, (unsigned int __user *)adr);
+}
+
+int set_tsc_mode(unsigned int val)
+{
+       if (val == PR_TSC_SIGSEGV)
+               disable_TSC();
+       else if (val == PR_TSC_ENABLE)
+               enable_TSC();
+       else
+               return -EINVAL;
+
+       return 0;
+}
+
+void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
+                     struct tss_struct *tss)
+{
+       struct thread_struct *prev, *next;
+
+       prev = &prev_p->thread;
+       next = &next_p->thread;
+
+       if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
+           test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
+               ds_switch_to(prev_p, next_p);
+       else if (next->debugctlmsr != prev->debugctlmsr)
+               update_debugctlmsr(next->debugctlmsr);
+
+       if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
+               set_debugreg(next->debugreg0, 0);
+               set_debugreg(next->debugreg1, 1);
+               set_debugreg(next->debugreg2, 2);
+               set_debugreg(next->debugreg3, 3);
+               /* no 4 and 5 */
+               set_debugreg(next->debugreg6, 6);
+               set_debugreg(next->debugreg7, 7);
+       }
+
+       if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
+           test_tsk_thread_flag(next_p, TIF_NOTSC)) {
+               /* prev and next are different */
+               if (test_tsk_thread_flag(next_p, TIF_NOTSC))
+                       hard_disable_TSC();
+               else
+                       hard_enable_TSC();
+       }
+
+       if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+               /*
+                * Copy the relevant range of the IO bitmap.
+                * Normally this is 128 bytes or less:
+                */
+               memcpy(tss->io_bitmap, next->io_bitmap_ptr,
+                      max(prev->io_bitmap_max, next->io_bitmap_max));
+       } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
+               /*
+                * Clear any possible leftover bits:
+                */
+               memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
+       }
+}
+
+int sys_fork(struct pt_regs *regs)
+{
+       return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
+}
+
+/*
+ * This is trivial, and on the face of it looks like it
+ * could equally well be done in user mode.
+ *
+ * Not so, for quite unobvious reasons - register pressure.
+ * In user mode vfork() cannot have a stack frame, and if
+ * done by calling the "clone()" system call directly, you
+ * do not have enough call-clobbered registers to hold all
+ * the information you need.
+ */
+int sys_vfork(struct pt_regs *regs)
+{
+       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
+                      NULL, NULL);
+}
+
+
 /*
  * Idle related variables and functions
  */
@@ -350,7 +539,7 @@ static void c1e_idle(void)
 
 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
        if (pm_idle == poll_idle && smp_num_siblings > 1) {
                printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
                        " performance may degrade.\n");
index bd4da2af08aec72ff1933eea7164a2b3d8f73139..14014d766cadba461c27705a0626560dc308bb70 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <stdarg.h>
 
+#include <linux/stackprotector.h>
 #include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -66,9 +67,6 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 EXPORT_PER_CPU_SYMBOL(current_task);
 
-DEFINE_PER_CPU(int, cpu_number);
-EXPORT_PER_CPU_SYMBOL(cpu_number);
-
 /*
  * Return saved PC of a blocked thread.
  */
@@ -94,6 +92,15 @@ void cpu_idle(void)
 {
        int cpu = smp_processor_id();
 
+       /*
+        * If we're the non-boot CPU, nothing set the stack canary up
+        * for us.  CPU0 already has it initialized but no harm in
+        * doing it again.  This is a good place for updating it, as
+        * we wont ever return from this function (so the invalid
+        * canaries already on the stack wont ever trigger).
+        */
+       boot_init_stack_canary();
+
        current_thread_info()->status |= TS_POLLING;
 
        /* endless idle loop with no priority at all */
@@ -108,7 +115,6 @@ void cpu_idle(void)
                                play_dead();
 
                        local_irq_disable();
-                       __get_cpu_var(irq_stat).idle_timestamp = jiffies;
                        /* Don't trace irqs off for idle */
                        stop_critical_timings();
                        pm_idle();
@@ -132,7 +138,7 @@ void __show_regs(struct pt_regs *regs, int all)
        if (user_mode_vm(regs)) {
                sp = regs->sp;
                ss = regs->ss & 0xffff;
-               savesegment(gs, gs);
+               gs = get_user_gs(regs);
        } else {
                sp = (unsigned long) (&regs->sp);
                savesegment(ss, ss);
@@ -213,6 +219,7 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
        regs.ds = __USER_DS;
        regs.es = __USER_DS;
        regs.fs = __KERNEL_PERCPU;
+       regs.gs = __KERNEL_STACK_CANARY;
        regs.orig_ax = -1;
        regs.ip = (unsigned long) kernel_thread_helper;
        regs.cs = __KERNEL_CS | get_kernel_rpl();
@@ -223,55 +230,6 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
 }
 EXPORT_SYMBOL(kernel_thread);
 
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(void)
-{
-       /* The process may have allocated an io port bitmap... nuke it. */
-       if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
-               struct task_struct *tsk = current;
-               struct thread_struct *t = &tsk->thread;
-               int cpu = get_cpu();
-               struct tss_struct *tss = &per_cpu(init_tss, cpu);
-
-               kfree(t->io_bitmap_ptr);
-               t->io_bitmap_ptr = NULL;
-               clear_thread_flag(TIF_IO_BITMAP);
-               /*
-                * Careful, clear this in the TSS too:
-                */
-               memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
-               t->io_bitmap_max = 0;
-               tss->io_bitmap_owner = NULL;
-               tss->io_bitmap_max = 0;
-               tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
-               put_cpu();
-       }
-
-       ds_exit_thread(current);
-}
-
-void flush_thread(void)
-{
-       struct task_struct *tsk = current;
-
-       tsk->thread.debugreg0 = 0;
-       tsk->thread.debugreg1 = 0;
-       tsk->thread.debugreg2 = 0;
-       tsk->thread.debugreg3 = 0;
-       tsk->thread.debugreg6 = 0;
-       tsk->thread.debugreg7 = 0;
-       memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
-       clear_tsk_thread_flag(tsk, TIF_DEBUG);
-       /*
-        * Forget coprocessor state..
-        */
-       tsk->fpu_counter = 0;
-       clear_fpu(tsk);
-       clear_used_math();
-}
-
 void release_thread(struct task_struct *dead_task)
 {
        BUG_ON(dead_task->mm);
@@ -305,7 +263,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
 
        p->thread.ip = (unsigned long) ret_from_fork;
 
-       savesegment(gs, p->thread.gs);
+       task_user_gs(p) = get_user_gs(regs);
 
        tsk = current;
        if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
@@ -343,7 +301,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
 void
 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 {
-       __asm__("movl %0, %%gs" : : "r"(0));
+       set_user_gs(regs, 0);
        regs->fs                = 0;
        set_fs(USER_DS);
        regs->ds                = __USER_DS;
@@ -359,127 +317,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 }
 EXPORT_SYMBOL_GPL(start_thread);
 
-static void hard_disable_TSC(void)
-{
-       write_cr4(read_cr4() | X86_CR4_TSD);
-}
-
-void disable_TSC(void)
-{
-       preempt_disable();
-       if (!test_and_set_thread_flag(TIF_NOTSC))
-               /*
-                * Must flip the CPU state synchronously with
-                * TIF_NOTSC in the current running context.
-                */
-               hard_disable_TSC();
-       preempt_enable();
-}
-
-static void hard_enable_TSC(void)
-{
-       write_cr4(read_cr4() & ~X86_CR4_TSD);
-}
-
-static void enable_TSC(void)
-{
-       preempt_disable();
-       if (test_and_clear_thread_flag(TIF_NOTSC))
-               /*
-                * Must flip the CPU state synchronously with
-                * TIF_NOTSC in the current running context.
-                */
-               hard_enable_TSC();
-       preempt_enable();
-}
-
-int get_tsc_mode(unsigned long adr)
-{
-       unsigned int val;
-
-       if (test_thread_flag(TIF_NOTSC))
-               val = PR_TSC_SIGSEGV;
-       else
-               val = PR_TSC_ENABLE;
-
-       return put_user(val, (unsigned int __user *)adr);
-}
-
-int set_tsc_mode(unsigned int val)
-{
-       if (val == PR_TSC_SIGSEGV)
-               disable_TSC();
-       else if (val == PR_TSC_ENABLE)
-               enable_TSC();
-       else
-               return -EINVAL;
-
-       return 0;
-}
-
-static noinline void
-__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
-                struct tss_struct *tss)
-{
-       struct thread_struct *prev, *next;
-
-       prev = &prev_p->thread;
-       next = &next_p->thread;
-
-       if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
-           test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
-               ds_switch_to(prev_p, next_p);
-       else if (next->debugctlmsr != prev->debugctlmsr)
-               update_debugctlmsr(next->debugctlmsr);
-
-       if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
-               set_debugreg(next->debugreg0, 0);
-               set_debugreg(next->debugreg1, 1);
-               set_debugreg(next->debugreg2, 2);
-               set_debugreg(next->debugreg3, 3);
-               /* no 4 and 5 */
-               set_debugreg(next->debugreg6, 6);
-               set_debugreg(next->debugreg7, 7);
-       }
-
-       if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
-           test_tsk_thread_flag(next_p, TIF_NOTSC)) {
-               /* prev and next are different */
-               if (test_tsk_thread_flag(next_p, TIF_NOTSC))
-                       hard_disable_TSC();
-               else
-                       hard_enable_TSC();
-       }
-
-       if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
-               /*
-                * Disable the bitmap via an invalid offset. We still cache
-                * the previous bitmap owner and the IO bitmap contents:
-                */
-               tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
-               return;
-       }
-
-       if (likely(next == tss->io_bitmap_owner)) {
-               /*
-                * Previous owner of the bitmap (hence the bitmap content)
-                * matches the next task, we dont have to do anything but
-                * to set a valid offset in the TSS:
-                */
-               tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
-               return;
-       }
-       /*
-        * Lazy TSS's I/O bitmap copy. We set an invalid offset here
-        * and we let the task to get a GPF in case an I/O instruction
-        * is performed.  The handler of the GPF will verify that the
-        * faulting task has a valid I/O bitmap and, it true, does the
-        * real copy and restart the instruction.  This will save us
-        * redundant copies when the currently switched task does not
-        * perform any I/O during its timeslice.
-        */
-       tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
-}
 
 /*
  *     switch_to(x,yn) should switch tasks from x to y.
@@ -540,7 +377,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * used %fs or %gs (it does not today), or if the kernel is
         * running inside of a hypervisor layer.
         */
-       savesegment(gs, prev->gs);
+       lazy_save_gs(prev->gs);
 
        /*
         * Load the per-thread Thread-Local Storage descriptor.
@@ -586,64 +423,44 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Restore %gs if needed (which is common)
         */
        if (prev->gs | next->gs)
-               loadsegment(gs, next->gs);
+               lazy_load_gs(next->gs);
 
-       x86_write_percpu(current_task, next_p);
+       percpu_write(current_task, next_p);
 
        return prev_p;
 }
 
-asmlinkage int sys_fork(struct pt_regs regs)
-{
-       return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
-}
-
-asmlinkage int sys_clone(struct pt_regs regs)
+int sys_clone(struct pt_regs *regs)
 {
        unsigned long clone_flags;
        unsigned long newsp;
        int __user *parent_tidptr, *child_tidptr;
 
-       clone_flags = regs.bx;
-       newsp = regs.cx;
-       parent_tidptr = (int __user *)regs.dx;
-       child_tidptr = (int __user *)regs.di;
+       clone_flags = regs->bx;
+       newsp = regs->cx;
+       parent_tidptr = (int __user *)regs->dx;
+       child_tidptr = (int __user *)regs->di;
        if (!newsp)
-               newsp = regs.sp;
-       return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
-}
-
-/*
- * This is trivial, and on the face of it looks like it
- * could equally well be done in user mode.
- *
- * Not so, for quite unobvious reasons - register pressure.
- * In user mode vfork() cannot have a stack frame, and if
- * done by calling the "clone()" system call directly, you
- * do not have enough call-clobbered registers to hold all
- * the information you need.
- */
-asmlinkage int sys_vfork(struct pt_regs regs)
-{
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
+               newsp = regs->sp;
+       return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr);
 }
 
 /*
  * sys_execve() executes a new program.
  */
-asmlinkage int sys_execve(struct pt_regs regs)
+int sys_execve(struct pt_regs *regs)
 {
        int error;
        char *filename;
 
-       filename = getname((char __user *) regs.bx);
+       filename = getname((char __user *) regs->bx);
        error = PTR_ERR(filename);
        if (IS_ERR(filename))
                goto out;
        error = do_execve(filename,
-                       (char __user * __user *) regs.cx,
-                       (char __user * __user *) regs.dx,
-                       &regs);
+                       (char __user * __user *) regs->cx,
+                       (char __user * __user *) regs->dx,
+                       regs);
        if (error == 0) {
                /* Make sure we don't return using sysenter.. */
                set_thread_flag(TIF_IRET);
index 85b4cb5c1980f7e089f1f731fb51c6a680ed7623..abb7e6a7f0c62c813de2331349c5aa4a348d14b1 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <stdarg.h>
 
+#include <linux/stackprotector.h>
 #include <linux/cpu.h>
 #include <linux/errno.h>
 #include <linux/sched.h>
@@ -47,7 +48,6 @@
 #include <asm/processor.h>
 #include <asm/i387.h>
 #include <asm/mmu_context.h>
-#include <asm/pda.h>
 #include <asm/prctl.h>
 #include <asm/desc.h>
 #include <asm/proto.h>
 
 asmlinkage extern void ret_from_fork(void);
 
+DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
+EXPORT_PER_CPU_SYMBOL(current_task);
+
+DEFINE_PER_CPU(unsigned long, old_rsp);
+static DEFINE_PER_CPU(unsigned char, is_idle);
+
 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
 
 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
@@ -76,13 +82,13 @@ EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 
 void enter_idle(void)
 {
-       write_pda(isidle, 1);
+       percpu_write(is_idle, 1);
        atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
 }
 
 static void __exit_idle(void)
 {
-       if (test_and_clear_bit_pda(0, isidle) == 0)
+       if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
                return;
        atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
 }
@@ -112,6 +118,16 @@ static inline void play_dead(void)
 void cpu_idle(void)
 {
        current_thread_info()->status |= TS_POLLING;
+
+       /*
+        * If we're the non-boot CPU, nothing set the stack canary up
+        * for us.  CPU0 already has it initialized but no harm in
+        * doing it again.  This is a good place for updating it, as
+        * we wont ever return from this function (so the invalid
+        * canaries already on the stack wont ever trigger).
+        */
+       boot_init_stack_canary();
+
        /* endless idle loop with no priority at all */
        while (1) {
                tick_nohz_stop_sched_tick(1);
@@ -221,61 +237,6 @@ void show_regs(struct pt_regs *regs)
        show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
 }
 
-/*
- * Free current thread data structures etc..
- */
-void exit_thread(void)
-{
-       struct task_struct *me = current;
-       struct thread_struct *t = &me->thread;
-
-       if (me->thread.io_bitmap_ptr) {
-               struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
-
-               kfree(t->io_bitmap_ptr);
-               t->io_bitmap_ptr = NULL;
-               clear_thread_flag(TIF_IO_BITMAP);
-               /*
-                * Careful, clear this in the TSS too:
-                */
-               memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
-               t->io_bitmap_max = 0;
-               put_cpu();
-       }
-
-       ds_exit_thread(current);
-}
-
-void flush_thread(void)
-{
-       struct task_struct *tsk = current;
-
-       if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
-               clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
-               if (test_tsk_thread_flag(tsk, TIF_IA32)) {
-                       clear_tsk_thread_flag(tsk, TIF_IA32);
-               } else {
-                       set_tsk_thread_flag(tsk, TIF_IA32);
-                       current_thread_info()->status |= TS_COMPAT;
-               }
-       }
-       clear_tsk_thread_flag(tsk, TIF_DEBUG);
-
-       tsk->thread.debugreg0 = 0;
-       tsk->thread.debugreg1 = 0;
-       tsk->thread.debugreg2 = 0;
-       tsk->thread.debugreg3 = 0;
-       tsk->thread.debugreg6 = 0;
-       tsk->thread.debugreg7 = 0;
-       memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
-       /*
-        * Forget coprocessor state..
-        */
-       tsk->fpu_counter = 0;
-       clear_fpu(tsk);
-       clear_used_math();
-}
-
 void release_thread(struct task_struct *dead_task)
 {
        if (dead_task->mm) {
@@ -397,7 +358,7 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
        load_gs_index(0);
        regs->ip                = new_ip;
        regs->sp                = new_sp;
-       write_pda(oldrsp, new_sp);
+       percpu_write(old_rsp, new_sp);
        regs->cs                = __USER_CS;
        regs->ss                = __USER_DS;
        regs->flags             = 0x200;
@@ -409,118 +370,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
 }
 EXPORT_SYMBOL_GPL(start_thread);
 
-static void hard_disable_TSC(void)
-{
-       write_cr4(read_cr4() | X86_CR4_TSD);
-}
-
-void disable_TSC(void)
-{
-       preempt_disable();
-       if (!test_and_set_thread_flag(TIF_NOTSC))
-               /*
-                * Must flip the CPU state synchronously with
-                * TIF_NOTSC in the current running context.
-                */
-               hard_disable_TSC();
-       preempt_enable();
-}
-
-static void hard_enable_TSC(void)
-{
-       write_cr4(read_cr4() & ~X86_CR4_TSD);
-}
-
-static void enable_TSC(void)
-{
-       preempt_disable();
-       if (test_and_clear_thread_flag(TIF_NOTSC))
-               /*
-                * Must flip the CPU state synchronously with
-                * TIF_NOTSC in the current running context.
-                */
-               hard_enable_TSC();
-       preempt_enable();
-}
-
-int get_tsc_mode(unsigned long adr)
-{
-       unsigned int val;
-
-       if (test_thread_flag(TIF_NOTSC))
-               val = PR_TSC_SIGSEGV;
-       else
-               val = PR_TSC_ENABLE;
-
-       return put_user(val, (unsigned int __user *)adr);
-}
-
-int set_tsc_mode(unsigned int val)
-{
-       if (val == PR_TSC_SIGSEGV)
-               disable_TSC();
-       else if (val == PR_TSC_ENABLE)
-               enable_TSC();
-       else
-               return -EINVAL;
-
-       return 0;
-}
-
-/*
- * This special macro can be used to load a debugging register
- */
-#define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
-
-static inline void __switch_to_xtra(struct task_struct *prev_p,
-                                   struct task_struct *next_p,
-                                   struct tss_struct *tss)
-{
-       struct thread_struct *prev, *next;
-
-       prev = &prev_p->thread,
-       next = &next_p->thread;
-
-       if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
-           test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
-               ds_switch_to(prev_p, next_p);
-       else if (next->debugctlmsr != prev->debugctlmsr)
-               update_debugctlmsr(next->debugctlmsr);
-
-       if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
-               loaddebug(next, 0);
-               loaddebug(next, 1);
-               loaddebug(next, 2);
-               loaddebug(next, 3);
-               /* no 4 and 5 */
-               loaddebug(next, 6);
-               loaddebug(next, 7);
-       }
-
-       if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
-           test_tsk_thread_flag(next_p, TIF_NOTSC)) {
-               /* prev and next are different */
-               if (test_tsk_thread_flag(next_p, TIF_NOTSC))
-                       hard_disable_TSC();
-               else
-                       hard_enable_TSC();
-       }
-
-       if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
-               /*
-                * Copy the relevant range of the IO bitmap.
-                * Normally this is 128 bytes or less:
-                */
-               memcpy(tss->io_bitmap, next->io_bitmap_ptr,
-                      max(prev->io_bitmap_max, next->io_bitmap_max));
-       } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
-               /*
-                * Clear any possible leftover bits:
-                */
-               memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
-       }
-}
-
 /*
  *     switch_to(x,y) should switch tasks from x to y.
  *
@@ -618,21 +467,13 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        /*
         * Switch the PDA and FPU contexts.
         */
-       prev->usersp = read_pda(oldrsp);
-       write_pda(oldrsp, next->usersp);
-       write_pda(pcurrent, next_p);
+       prev->usersp = percpu_read(old_rsp);
+       percpu_write(old_rsp, next->usersp);
+       percpu_write(current_task, next_p);
 
-       write_pda(kernelstack,
+       percpu_write(kernel_stack,
                  (unsigned long)task_stack_page(next_p) +
-                 THREAD_SIZE - PDA_STACKOFFSET);
-#ifdef CONFIG_CC_STACKPROTECTOR
-       write_pda(stack_canary, next_p->stack_canary);
-       /*
-        * Build time only check to make sure the stack_canary is at
-        * offset 40 in the pda; this is a gcc ABI requirement
-        */
-       BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
-#endif
+                 THREAD_SIZE - KERNEL_STACK_OFFSET);
 
        /*
         * Now maybe reload the debug registers and handle I/O bitmaps
@@ -686,11 +527,6 @@ void set_personality_64bit(void)
        current->personality &= ~READ_IMPLIES_EXEC;
 }
 
-asmlinkage long sys_fork(struct pt_regs *regs)
-{
-       return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
-}
-
 asmlinkage long
 sys_clone(unsigned long clone_flags, unsigned long newsp,
          void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
@@ -700,22 +536,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
        return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
 }
 
-/*
- * This is trivial, and on the face of it looks like it
- * could equally well be done in user mode.
- *
- * Not so, for quite unobvious reasons - register pressure.
- * In user mode vfork() cannot have a stack frame, and if
- * done by calling the "clone()" system call directly, you
- * do not have enough call-clobbered registers to hold all
- * the information you need.
- */
-asmlinkage long sys_vfork(struct pt_regs *regs)
-{
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
-                   NULL, NULL);
-}
-
 unsigned long get_wchan(struct task_struct *p)
 {
        unsigned long stack;
index 06ca07f6ad86243884c7852c13334d9da9604acf..19378715f4157b5202d823f9d751dd1f07c08b38 100644 (file)
@@ -75,10 +75,7 @@ static inline bool invalid_selector(u16 value)
 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
 {
        BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
-       regno >>= 2;
-       if (regno > FS)
-               --regno;
-       return &regs->bx + regno;
+       return &regs->bx + (regno >> 2);
 }
 
 static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
@@ -90,9 +87,10 @@ static u16 get_segment_reg(struct task_struct *task, unsigned long offset)
        if (offset != offsetof(struct user_regs_struct, gs))
                retval = *pt_regs_access(task_pt_regs(task), offset);
        else {
-               retval = task->thread.gs;
                if (task == current)
-                       savesegment(gs, retval);
+                       retval = get_user_gs(task_pt_regs(task));
+               else
+                       retval = task_user_gs(task);
        }
        return retval;
 }
@@ -126,13 +124,10 @@ static int set_segment_reg(struct task_struct *task,
                break;
 
        case offsetof(struct user_regs_struct, gs):
-               task->thread.gs = value;
                if (task == current)
-                       /*
-                        * The user-mode %gs is not affected by
-                        * kernel entry, so we must update the CPU.
-                        */
-                       loadsegment(gs, value);
+                       set_user_gs(task_pt_regs(task), value);
+               else
+                       task_user_gs(task) = value;
        }
 
        return 0;
@@ -273,7 +268,7 @@ static unsigned long debugreg_addr_limit(struct task_struct *task)
        if (test_tsk_thread_flag(task, TIF_IA32))
                return IA32_PAGE_OFFSET - 3;
 #endif
-       return TASK_SIZE64 - 7;
+       return TASK_SIZE_MAX - 7;
 }
 
 #endif /* CONFIG_X86_32 */
@@ -690,9 +685,8 @@ static int ptrace_bts_config(struct task_struct *child,
                if (!cfg.signal)
                        return -EINVAL;
 
-               return -EOPNOTSUPP;
-
                child->thread.bts_ovfl_signal = cfg.signal;
+               return -EOPNOTSUPP;
        }
 
        if ((cfg.flags & PTRACE_BTS_O_ALLOC) &&
index 309949e9e1c1a9e65969507cb7dea1097a2410d9..6a5a2970f4c558182669ab9119e874eed5bf9702 100644 (file)
@@ -74,8 +74,7 @@ static void ich_force_hpet_resume(void)
        if (!force_hpet_address)
                return;
 
-       if (rcba_base == NULL)
-               BUG();
+       BUG_ON(rcba_base == NULL);
 
        /* read the Function Disable register, dword mode only */
        val = readl(rcba_base + 0x3404);
index 4526b3a75ed2aef73579212c7fde9ca6ed15e946..2aef36d8aca2783a2cf9cb04f74fe9a72c564734 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/reboot.h>
 #include <asm/pci_x86.h>
 #include <asm/virtext.h>
+#include <asm/cpu.h>
 
 #ifdef CONFIG_X86_32
 # include <linux/dmi.h>
@@ -23,8 +24,6 @@
 # include <asm/iommu.h>
 #endif
 
-#include <mach_ipi.h>
-
 /*
  * Power off function, if any
  */
@@ -658,7 +657,7 @@ static int crash_nmi_callback(struct notifier_block *self,
 
 static void smp_send_nmi_allbutself(void)
 {
-       send_IPI_allbutself(NMI_VECTOR);
+       apic->send_IPI_allbutself(NMI_VECTOR);
 }
 
 static struct notifier_block crash_nmi_nb = {
index a160f31197256680c32b835b7c32d62e9dbd184b..41235531b11ca894c809f9ae97f4d3c4f14791a4 100644 (file)
@@ -7,7 +7,7 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/kexec.h>
 #include <asm/processor-flags.h>
 
@@ -17,7 +17,8 @@
 
 #define PTR(x) (x << 2)
 
-/* control_page + KEXEC_CONTROL_CODE_MAX_SIZE
+/*
+ * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
  * ~ control_page + PAGE_SIZE are used as data storage and stack for
  * jumping back
  */
@@ -76,8 +77,10 @@ relocate_kernel:
        movl    %eax, CP_PA_SWAP_PAGE(%edi)
        movl    %ebx, CP_PA_BACKUP_PAGES_MAP(%edi)
 
-       /* get physical address of control page now */
-       /* this is impossible after page table switch */
+       /*
+        * get physical address of control page now
+        * this is impossible after page table switch
+        */
        movl    PTR(PA_CONTROL_PAGE)(%ebp), %edi
 
        /* switch to new set of page tables */
@@ -97,7 +100,8 @@ identity_mapped:
        /* store the start address on the stack */
        pushl   %edx
 
-       /* Set cr0 to a known state:
+       /*
+        * Set cr0 to a known state:
         *  - Paging disabled
         *  - Alignment check disabled
         *  - Write protect disabled
@@ -113,7 +117,8 @@ identity_mapped:
        /* clear cr4 if applicable */
        testl   %ecx, %ecx
        jz      1f
-       /* Set cr4 to a known state:
+       /*
+        * Set cr4 to a known state:
         * Setting everything to zero seems safe.
         */
        xorl    %eax, %eax
@@ -132,15 +137,18 @@ identity_mapped:
        call    swap_pages
        addl    $8, %esp
 
-       /* To be certain of avoiding problems with self-modifying code
+       /*
+        * To be certain of avoiding problems with self-modifying code
         * I need to execute a serializing instruction here.
         * So I flush the TLB, it's handy, and not processor dependent.
         */
        xorl    %eax, %eax
        movl    %eax, %cr3
 
-       /* set all of the registers to known values */
-       /* leave %esp alone */
+       /*
+        * set all of the registers to known values
+        * leave %esp alone
+        */
 
        testl   %esi, %esi
        jnz 1f
index f5afe665a82b27f4aed8503b2ae4478b834a805a..4de8f5b3d476ca0aa544e025568cf00cb6c25ebb 100644 (file)
@@ -7,10 +7,10 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/kexec.h>
 #include <asm/processor-flags.h>
-#include <asm/pgtable.h>
+#include <asm/pgtable_types.h>
 
 /*
  * Must be relocatable PIC code callable as a C function
 #define PTR(x) (x << 3)
 #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
 
+/*
+ * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
+ * ~ control_page + PAGE_SIZE are used as data storage and stack for
+ * jumping back
+ */
+#define DATA(offset)           (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
+
+/* Minimal CPU state */
+#define RSP                    DATA(0x0)
+#define CR0                    DATA(0x8)
+#define CR3                    DATA(0x10)
+#define CR4                    DATA(0x18)
+
+/* other data */
+#define CP_PA_TABLE_PAGE       DATA(0x20)
+#define CP_PA_SWAP_PAGE                DATA(0x28)
+#define CP_PA_BACKUP_PAGES_MAP DATA(0x30)
+
        .text
        .align PAGE_SIZE
        .code64
        .globl relocate_kernel
 relocate_kernel:
-       /* %rdi indirection_page
+       /*
+        * %rdi indirection_page
         * %rsi page_list
         * %rdx start address
+        * %rcx preserve_context
         */
 
-       /* map the control page at its virtual address */
-
-       movq    $0x0000ff8000000000, %r10        /* mask */
-       mov     $(39 - 3), %cl                   /* bits to shift */
-       movq    PTR(VA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PGD)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PUD_0)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PUD_0)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PMD_0)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PMD_0)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PTE_0)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PTE_0)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_CONTROL_PAGE)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       /* identity map the control page at its physical address */
-
-       movq    $0x0000ff8000000000, %r10        /* mask */
-       mov     $(39 - 3), %cl                   /* bits to shift */
-       movq    PTR(PA_CONTROL_PAGE)(%rsi), %r11 /* address to map */
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PGD)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PUD_1)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PUD_1)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PMD_1)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PMD_1)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_PTE_1)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-       shrq    $9, %r10
-       sub     $9, %cl
-
-       movq    %r11, %r9
-       andq    %r10, %r9
-       shrq    %cl, %r9
-
-       movq    PTR(VA_PTE_1)(%rsi), %r8
-       addq    %r8, %r9
-       movq    PTR(PA_CONTROL_PAGE)(%rsi), %r8
-       orq     $PAGE_ATTR, %r8
-       movq    %r8, (%r9)
-
-relocate_new_kernel:
-       /* %rdi indirection_page
-        * %rsi page_list
-        * %rdx start address
-        */
+       /* Save the CPU context, used for jumping back */
+       pushq %rbx
+       pushq %rbp
+       pushq %r12
+       pushq %r13
+       pushq %r14
+       pushq %r15
+       pushf
+
+       movq    PTR(VA_CONTROL_PAGE)(%rsi), %r11
+       movq    %rsp, RSP(%r11)
+       movq    %cr0, %rax
+       movq    %rax, CR0(%r11)
+       movq    %cr3, %rax
+       movq    %rax, CR3(%r11)
+       movq    %cr4, %rax
+       movq    %rax, CR4(%r11)
 
        /* zero out flags, and disable interrupts */
        pushq $0
        popfq
 
-       /* get physical address of control page now */
-       /* this is impossible after page table switch */
+       /*
+        * get physical address of control page now
+        * this is impossible after page table switch
+        */
        movq    PTR(PA_CONTROL_PAGE)(%rsi), %r8
 
        /* get physical address of page table now too */
-       movq    PTR(PA_TABLE_PAGE)(%rsi), %rcx
+       movq    PTR(PA_TABLE_PAGE)(%rsi), %r9
+
+       /* get physical address of swap page now */
+       movq    PTR(PA_SWAP_PAGE)(%rsi), %r10
 
-       /* switch to new set of page tables */
-       movq    PTR(PA_PGD)(%rsi), %r9
+       /* save some information for jumping back */
+       movq    %r9, CP_PA_TABLE_PAGE(%r11)
+       movq    %r10, CP_PA_SWAP_PAGE(%r11)
+       movq    %rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
+
+       /* Switch to the identity mapped page tables */
        movq    %r9, %cr3
 
        /* setup a new stack at the end of the physical control page */
@@ -172,7 +103,8 @@ identity_mapped:
        /* store the start address on the stack */
        pushq   %rdx
 
-       /* Set cr0 to a known state:
+       /*
+        * Set cr0 to a known state:
         *  - Paging enabled
         *  - Alignment check disabled
         *  - Write protect disabled
@@ -185,7 +117,8 @@ identity_mapped:
        orl     $(X86_CR0_PG | X86_CR0_PE), %eax
        movq    %rax, %cr0
 
-       /* Set cr4 to a known state:
+       /*
+        * Set cr4 to a known state:
         *  - physical address extension enabled
         */
        movq    $X86_CR4_PAE, %rax
@@ -194,12 +127,88 @@ identity_mapped:
        jmp 1f
 1:
 
-       /* Switch to the identity mapped page tables,
-        * and flush the TLB.
-       */
-       movq    %rcx, %cr3
+       /* Flush the TLB (needed?) */
+       movq    %r9, %cr3
+
+       movq    %rcx, %r11
+       call    swap_pages
+
+       /*
+        * To be certain of avoiding problems with self-modifying code
+        * I need to execute a serializing instruction here.
+        * So I flush the TLB by reloading %cr3 here, it's handy,
+        * and not processor dependent.
+        */
+       movq    %cr3, %rax
+       movq    %rax, %cr3
+
+       /*
+        * set all of the registers to known values
+        * leave %rsp alone
+        */
+
+       testq   %r11, %r11
+       jnz 1f
+       xorq    %rax, %rax
+       xorq    %rbx, %rbx
+       xorq    %rcx, %rcx
+       xorq    %rdx, %rdx
+       xorq    %rsi, %rsi
+       xorq    %rdi, %rdi
+       xorq    %rbp, %rbp
+       xorq    %r8,  %r8
+       xorq    %r9,  %r9
+       xorq    %r10, %r9
+       xorq    %r11, %r11
+       xorq    %r12, %r12
+       xorq    %r13, %r13
+       xorq    %r14, %r14
+       xorq    %r15, %r15
+
+       ret
+
+1:
+       popq    %rdx
+       leaq    PAGE_SIZE(%r10), %rsp
+       call    *%rdx
+
+       /* get the re-entry point of the peer system */
+       movq    0(%rsp), %rbp
+       call    1f
+1:
+       popq    %r8
+       subq    $(1b - relocate_kernel), %r8
+       movq    CP_PA_SWAP_PAGE(%r8), %r10
+       movq    CP_PA_BACKUP_PAGES_MAP(%r8), %rdi
+       movq    CP_PA_TABLE_PAGE(%r8), %rax
+       movq    %rax, %cr3
+       lea     PAGE_SIZE(%r8), %rsp
+       call    swap_pages
+       movq    $virtual_mapped, %rax
+       pushq   %rax
+       ret
+
+virtual_mapped:
+       movq    RSP(%r8), %rsp
+       movq    CR4(%r8), %rax
+       movq    %rax, %cr4
+       movq    CR3(%r8), %rax
+       movq    CR0(%r8), %r8
+       movq    %rax, %cr3
+       movq    %r8, %cr0
+       movq    %rbp, %rax
+
+       popf
+       popq    %r15
+       popq    %r14
+       popq    %r13
+       popq    %r12
+       popq    %rbp
+       popq    %rbx
+       ret
 
        /* Do the copies */
+swap_pages:
        movq    %rdi, %rcx      /* Put the page_list in %rcx */
        xorq    %rdi, %rdi
        xorq    %rsi, %rsi
@@ -231,36 +240,27 @@ identity_mapped:
        movq    %rcx,   %rsi  /* For ever source page do a copy */
        andq    $0xfffffffffffff000, %rsi
 
+       movq    %rdi, %rdx
+       movq    %rsi, %rax
+
+       movq    %r10, %rdi
        movq    $512,   %rcx
        rep ; movsq
-       jmp     0b
-3:
-
-       /* To be certain of avoiding problems with self-modifying code
-        * I need to execute a serializing instruction here.
-        * So I flush the TLB by reloading %cr3 here, it's handy,
-        * and not processor dependent.
-        */
-       movq    %cr3, %rax
-       movq    %rax, %cr3
 
-       /* set all of the registers to known values */
-       /* leave %rsp alone */
+       movq    %rax, %rdi
+       movq    %rdx, %rsi
+       movq    $512,   %rcx
+       rep ; movsq
 
-       xorq    %rax, %rax
-       xorq    %rbx, %rbx
-       xorq    %rcx, %rcx
-       xorq    %rdx, %rdx
-       xorq    %rsi, %rsi
-       xorq    %rdi, %rdi
-       xorq    %rbp, %rbp
-       xorq    %r8,  %r8
-       xorq    %r9,  %r9
-       xorq    %r10, %r9
-       xorq    %r11, %r11
-       xorq    %r12, %r12
-       xorq    %r13, %r13
-       xorq    %r14, %r14
-       xorq    %r15, %r15
+       movq    %rdx, %rdi
+       movq    %r10, %rsi
+       movq    $512,   %rcx
+       rep ; movsq
 
+       lea     PAGE_SIZE(%rax), %rsi
+       jmp     0b
+3:
        ret
+
+       .globl kexec_control_code_size
+.set kexec_control_code_size, . - relocate_kernel
index 6a8811a693245e2b96f8815df4debdc5c9111749..f28c56e6bf94a4cc30617f6b39d0349f13f9ebff 100644 (file)
 #include <asm/e820.h>
 #include <asm/mpspec.h>
 #include <asm/setup.h>
-#include <asm/arch_hooks.h>
 #include <asm/efi.h>
+#include <asm/timer.h>
+#include <asm/i8259.h>
 #include <asm/sections.h>
 #include <asm/dmi.h>
 #include <asm/io_apic.h>
 #include <asm/ist.h>
 #include <asm/vmi.h>
-#include <setup_arch.h>
+#include <asm/setup_arch.h>
 #include <asm/bios_ebda.h>
 #include <asm/cacheflush.h>
 #include <asm/processor.h>
@@ -89,7 +90,7 @@
 
 #include <asm/system.h>
 #include <asm/vsyscall.h>
-#include <asm/smp.h>
+#include <asm/cpu.h>
 #include <asm/desc.h>
 #include <asm/dma.h>
 #include <asm/iommu.h>
@@ -97,7 +98,6 @@
 #include <asm/mmu_context.h>
 #include <asm/proto.h>
 
-#include <mach_apic.h>
 #include <asm/paravirt.h>
 #include <asm/hypervisor.h>
 
 #define ARCH_SETUP
 #endif
 
+unsigned int boot_cpu_id __read_mostly;
+
+#ifdef CONFIG_X86_64
+int default_cpu_present_to_apicid(int mps_cpu)
+{
+       return __default_cpu_present_to_apicid(mps_cpu);
+}
+
+int default_check_phys_apicid_present(int boot_cpu_physical_apicid)
+{
+       return __default_check_phys_apicid_present(boot_cpu_physical_apicid);
+}
+#endif
+
 #ifndef CONFIG_DEBUG_BOOT_PARAMS
 struct boot_params __initdata boot_params;
 #else
@@ -188,7 +202,9 @@ struct ist_info ist_info;
 #endif
 
 #else
-struct cpuinfo_x86 boot_cpu_data __read_mostly;
+struct cpuinfo_x86 boot_cpu_data __read_mostly = {
+       .x86_phys_bits = MAX_PHYSMEM_BITS,
+};
 EXPORT_SYMBOL(boot_cpu_data);
 #endif
 
@@ -586,20 +602,7 @@ static int __init setup_elfcorehdr(char *arg)
 early_param("elfcorehdr", setup_elfcorehdr);
 #endif
 
-static int __init default_update_genapic(void)
-{
-#ifdef CONFIG_X86_SMP
-# if defined(CONFIG_X86_GENERICARCH) || defined(CONFIG_X86_64)
-       genapic->wakeup_cpu = wakeup_secondary_cpu_via_init;
-# endif
-#endif
-
-       return 0;
-}
-
-static struct x86_quirks default_x86_quirks __initdata = {
-       .update_genapic         = default_update_genapic,
-};
+static struct x86_quirks default_x86_quirks __initdata;
 
 struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
 
@@ -656,7 +659,6 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_X86_32
        memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
        visws_early_detect();
-       pre_setup_arch_hook();
 #else
        printk(KERN_INFO "Command line: %s\n", boot_command_line);
 #endif
@@ -824,8 +826,7 @@ void __init setup_arch(char **cmdline_p)
 #else
        num_physpages = max_pfn;
 
-       if (cpu_has_x2apic)
-               check_x2apic();
+       check_x2apic();
 
        /* How many end-of-memory variables you have, grandma! */
        /* need this before calling reserve_initrd */
@@ -865,9 +866,7 @@ void __init setup_arch(char **cmdline_p)
 
        reserve_initrd();
 
-#ifdef CONFIG_X86_64
        vsmp_init();
-#endif
 
        io_delay_init();
 
@@ -893,12 +892,11 @@ void __init setup_arch(char **cmdline_p)
         */
        acpi_reserve_bootmem();
 #endif
-#ifdef CONFIG_X86_FIND_SMP_CONFIG
        /*
         * Find and reserve possible boot-time SMP configuration:
         */
        find_smp_config();
-#endif
+
        reserve_crashkernel();
 
 #ifdef CONFIG_X86_64
@@ -925,9 +923,7 @@ void __init setup_arch(char **cmdline_p)
        map_vsyscall();
 #endif
 
-#ifdef CONFIG_X86_GENERICARCH
        generic_apic_probe();
-#endif
 
        early_quirks();
 
@@ -978,4 +974,95 @@ void __init setup_arch(char **cmdline_p)
 #endif
 }
 
+#ifdef CONFIG_X86_32
+
+/**
+ * x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
+ *
+ * Description:
+ *     Perform any necessary interrupt initialisation prior to setting up
+ *     the "ordinary" interrupt call gates.  For legacy reasons, the ISA
+ *     interrupts should be initialised here if the machine emulates a PC
+ *     in any way.
+ **/
+void __init x86_quirk_pre_intr_init(void)
+{
+       if (x86_quirks->arch_pre_intr_init) {
+               if (x86_quirks->arch_pre_intr_init())
+                       return;
+       }
+       init_ISA_irqs();
+}
+
+/**
+ * x86_quirk_intr_init - post gate setup interrupt initialisation
+ *
+ * Description:
+ *     Fill in any interrupts that may have been left out by the general
+ *     init_IRQ() routine.  interrupts having to do with the machine rather
+ *     than the devices on the I/O bus (like APIC interrupts in intel MP
+ *     systems) are started here.
+ **/
+void __init x86_quirk_intr_init(void)
+{
+       if (x86_quirks->arch_intr_init) {
+               if (x86_quirks->arch_intr_init())
+                       return;
+       }
+}
+
+/**
+ * x86_quirk_trap_init - initialise system specific traps
+ *
+ * Description:
+ *     Called as the final act of trap_init().  Used in VISWS to initialise
+ *     the various board specific APIC traps.
+ **/
+void __init x86_quirk_trap_init(void)
+{
+       if (x86_quirks->arch_trap_init) {
+               if (x86_quirks->arch_trap_init())
+                       return;
+       }
+}
+
+static struct irqaction irq0  = {
+       .handler = timer_interrupt,
+       .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
+       .mask = CPU_MASK_NONE,
+       .name = "timer"
+};
+
+/**
+ * x86_quirk_pre_time_init - do any specific initialisations before.
+ *
+ **/
+void __init x86_quirk_pre_time_init(void)
+{
+       if (x86_quirks->arch_pre_time_init)
+               x86_quirks->arch_pre_time_init();
+}
 
+/**
+ * x86_quirk_time_init - do any specific initialisations for the system timer.
+ *
+ * Description:
+ *     Must plug the system timer interrupt source at HZ into the IRQ listed
+ *     in irq_vectors.h:TIMER_IRQ
+ **/
+void __init x86_quirk_time_init(void)
+{
+       if (x86_quirks->arch_time_init) {
+               /*
+                * A nonzero return code does not mean failure, it means
+                * that the architecture quirk does not want any
+                * generic (timer) setup to be performed after this:
+                */
+               if (x86_quirks->arch_time_init())
+                       return;
+       }
+
+       irq0.mask = cpumask_of_cpu(0);
+       setup_irq(0, &irq0);
+}
+#endif /* CONFIG_X86_32 */
index 01161077a49c32cb6b2adac120c91176105875f4..400331b50a53f0838c407a8b688bcb8d6f686087 100644 (file)
 #include <linux/crash_dump.h>
 #include <linux/smp.h>
 #include <linux/topology.h>
+#include <linux/pfn.h>
 #include <asm/sections.h>
 #include <asm/processor.h>
 #include <asm/setup.h>
 #include <asm/mpspec.h>
 #include <asm/apicdef.h>
 #include <asm/highmem.h>
+#include <asm/proto.h>
+#include <asm/cpumask.h>
+#include <asm/cpu.h>
+#include <asm/stackprotector.h>
 
-#ifdef CONFIG_X86_LOCAL_APIC
-unsigned int num_processors;
-unsigned disabled_cpus __cpuinitdata;
-/* Processor that is doing the boot up */
-unsigned int boot_cpu_physical_apicid = -1U;
-EXPORT_SYMBOL(boot_cpu_physical_apicid);
-unsigned int max_physical_apicid;
-
-/* Bitmask of physically existing CPUs */
-physid_mask_t phys_cpu_present_map;
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+# define DBG(x...) printk(KERN_DEBUG x)
+#else
+# define DBG(x...)
 #endif
 
-/* map cpu index to physical APIC ID */
-DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
-DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
-
-#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
-#define        X86_64_NUMA     1
+DEFINE_PER_CPU(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
 
-/* map cpu index to node index */
-DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
-EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
+#ifdef CONFIG_X86_64
+#define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
+#else
+#define BOOT_PERCPU_OFFSET 0
+#endif
 
-/* which logical CPUs are on which nodes */
-cpumask_t *node_to_cpumask_map;
-EXPORT_SYMBOL(node_to_cpumask_map);
+DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
+EXPORT_PER_CPU_SYMBOL(this_cpu_off);
 
-/* setup node_to_cpumask_map */
-static void __init setup_node_to_cpumask_map(void);
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
+       [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
+};
+EXPORT_SYMBOL(__per_cpu_offset);
 
+/*
+ * On x86_64 symbols referenced from code should be reachable using
+ * 32bit relocations.  Reserve space for static percpu variables in
+ * modules so that they are always served from the first chunk which
+ * is located at the percpu segment base.  On x86_32, anything can
+ * address anywhere.  No need to reserve space in the first chunk.
+ */
+#ifdef CONFIG_X86_64
+#define PERCPU_FIRST_CHUNK_RESERVE     PERCPU_MODULE_RESERVE
 #else
-static inline void setup_node_to_cpumask_map(void) { }
+#define PERCPU_FIRST_CHUNK_RESERVE     0
 #endif
 
-#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
-/*
- * Copy data used in early init routines from the initial arrays to the
- * per cpu data areas.  These arrays then become expendable and the
- * *_early_ptr's are zeroed indicating that the static arrays are gone.
+/**
+ * pcpu_need_numa - determine percpu allocation needs to consider NUMA
+ *
+ * If NUMA is not configured or there is only one NUMA node available,
+ * there is no reason to consider NUMA.  This function determines
+ * whether percpu allocation should consider NUMA or not.
+ *
+ * RETURNS:
+ * true if NUMA should be considered; otherwise, false.
  */
-static void __init setup_per_cpu_maps(void)
+static bool __init pcpu_need_numa(void)
 {
-       int cpu;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       pg_data_t *last = NULL;
+       unsigned int cpu;
 
        for_each_possible_cpu(cpu) {
-               per_cpu(x86_cpu_to_apicid, cpu) =
-                               early_per_cpu_map(x86_cpu_to_apicid, cpu);
-               per_cpu(x86_bios_cpu_apicid, cpu) =
-                               early_per_cpu_map(x86_bios_cpu_apicid, cpu);
-#ifdef X86_64_NUMA
-               per_cpu(x86_cpu_to_node_map, cpu) =
-                               early_per_cpu_map(x86_cpu_to_node_map, cpu);
-#endif
-       }
+               int node = early_cpu_to_node(cpu);
 
-       /* indicate the early static arrays will soon be gone */
-       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
-       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
-#ifdef X86_64_NUMA
-       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
+               if (node_online(node) && NODE_DATA(node) &&
+                   last && last != NODE_DATA(node))
+                       return true;
+
+               last = NODE_DATA(node);
+       }
 #endif
+       return false;
 }
 
-#ifdef CONFIG_X86_32
-/*
- * Great future not-so-futuristic plan: make i386 and x86_64 do it
- * the same way
- */
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(__per_cpu_offset);
-static inline void setup_cpu_pda_map(void) { }
-
-#elif !defined(CONFIG_SMP)
-static inline void setup_cpu_pda_map(void) { }
-
-#else /* CONFIG_SMP && CONFIG_X86_64 */
-
-/*
- * Allocate cpu_pda pointer table and array via alloc_bootmem.
+/**
+ * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
+ * @cpu: cpu to allocate for
+ * @size: size allocation in bytes
+ * @align: alignment
+ *
+ * Allocate @size bytes aligned at @align for cpu @cpu.  This wrapper
+ * does the right thing for NUMA regardless of the current
+ * configuration.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
  */
-static void __init setup_cpu_pda_map(void)
+static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
+                                       unsigned long align)
 {
-       char *pda;
-       struct x8664_pda **new_cpu_pda;
-       unsigned long size;
-       int cpu;
-
-       size = roundup(sizeof(struct x8664_pda), cache_line_size());
-
-       /* allocate cpu_pda array and pointer table */
-       {
-               unsigned long tsize = nr_cpu_ids * sizeof(void *);
-               unsigned long asize = size * (nr_cpu_ids - 1);
-
-               tsize = roundup(tsize, cache_line_size());
-               new_cpu_pda = alloc_bootmem(tsize + asize);
-               pda = (char *)new_cpu_pda + tsize;
-       }
-
-       /* initialize pointer table to static pda's */
-       for_each_possible_cpu(cpu) {
-               if (cpu == 0) {
-                       /* leave boot cpu pda in place */
-                       new_cpu_pda[0] = cpu_pda(0);
-                       continue;
-               }
-               new_cpu_pda[cpu] = (struct x8664_pda *)pda;
-               new_cpu_pda[cpu]->in_bootmem = 1;
-               pda += size;
+       const unsigned long goal = __pa(MAX_DMA_ADDRESS);
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+       int node = early_cpu_to_node(cpu);
+       void *ptr;
+
+       if (!node_online(node) || !NODE_DATA(node)) {
+               ptr = __alloc_bootmem_nopanic(size, align, goal);
+               pr_info("cpu %d has no node %d or node-local memory\n",
+                       cpu, node);
+               pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
+                        cpu, size, __pa(ptr));
+       } else {
+               ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
+                                                  size, align, goal);
+               pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
+                        "%016lx\n", cpu, size, node, __pa(ptr));
        }
-
-       /* point to new pointer table */
-       _cpu_pda = new_cpu_pda;
+       return ptr;
+#else
+       return __alloc_bootmem_nopanic(size, align, goal);
+#endif
 }
 
-#endif /* CONFIG_SMP && CONFIG_X86_64 */
-
-#ifdef CONFIG_X86_64
+/*
+ * Remap allocator
+ *
+ * This allocator uses PMD page as unit.  A PMD page is allocated for
+ * each cpu and each is remapped into vmalloc area using PMD mapping.
+ * As PMD page is quite large, only part of it is used for the first
+ * chunk.  Unused part is returned to the bootmem allocator.
+ *
+ * So, the PMD pages are mapped twice - once to the physical mapping
+ * and to the vmalloc area for the first percpu chunk.  The double
+ * mapping does add one more PMD TLB entry pressure but still is much
+ * better than only using 4k mappings while still being NUMA friendly.
+ */
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+static size_t pcpur_size __initdata;
+static void **pcpur_ptrs __initdata;
 
-/* correctly size the local cpu masks */
-static void __init setup_cpu_local_masks(void)
+static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
 {
-       alloc_bootmem_cpumask_var(&cpu_initialized_mask);
-       alloc_bootmem_cpumask_var(&cpu_callin_mask);
-       alloc_bootmem_cpumask_var(&cpu_callout_mask);
-       alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
-}
+       size_t off = (size_t)pageno << PAGE_SHIFT;
 
-#else /* CONFIG_X86_32 */
+       if (off >= pcpur_size)
+               return NULL;
 
-static inline void setup_cpu_local_masks(void)
-{
+       return virt_to_page(pcpur_ptrs[cpu] + off);
 }
 
-#endif /* CONFIG_X86_32 */
-
-/*
- * Great future plan:
- * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
- * Always point %gs to its beginning
- */
-void __init setup_per_cpu_areas(void)
+static ssize_t __init setup_pcpu_remap(size_t static_size)
 {
-       ssize_t size, old_size;
-       char *ptr;
-       int cpu;
-       unsigned long align = 1;
-
-       /* Setup cpu_pda map */
-       setup_cpu_pda_map();
+       static struct vm_struct vm;
+       pg_data_t *last;
+       size_t ptrs_size, dyn_size;
+       unsigned int cpu;
+       ssize_t ret;
+
+       /*
+        * If large page isn't supported, there's no benefit in doing
+        * this.  Also, on non-NUMA, embedding is better.
+        */
+       if (!cpu_has_pse || pcpu_need_numa())
+               return -EINVAL;
+
+       last = NULL;
+       for_each_possible_cpu(cpu) {
+               int node = early_cpu_to_node(cpu);
 
-       /* Copy section for each CPU (we discard the original) */
-       old_size = PERCPU_ENOUGH_ROOM;
-       align = max_t(unsigned long, PAGE_SIZE, align);
-       size = roundup(old_size, align);
+               if (node_online(node) && NODE_DATA(node) &&
+                   last && last != NODE_DATA(node))
+                       goto proceed;
 
-       pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
-               NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
+               last = NODE_DATA(node);
+       }
+       return -EINVAL;
+
+proceed:
+       /*
+        * Currently supports only single page.  Supporting multiple
+        * pages won't be too difficult if it ever becomes necessary.
+        */
+       pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
+                              PERCPU_DYNAMIC_RESERVE);
+       if (pcpur_size > PMD_SIZE) {
+               pr_warning("PERCPU: static data is larger than large page, "
+                          "can't use large page\n");
+               return -EINVAL;
+       }
+       dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
 
-       pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
+       /* allocate pointer array and alloc large pages */
+       ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
+       pcpur_ptrs = alloc_bootmem(ptrs_size);
 
        for_each_possible_cpu(cpu) {
-#ifndef CONFIG_NEED_MULTIPLE_NODES
-               ptr = __alloc_bootmem(size, align,
-                                __pa(MAX_DMA_ADDRESS));
-#else
-               int node = early_cpu_to_node(cpu);
-               if (!node_online(node) || !NODE_DATA(node)) {
-                       ptr = __alloc_bootmem(size, align,
-                                        __pa(MAX_DMA_ADDRESS));
-                       pr_info("cpu %d has no node %d or node-local memory\n",
-                               cpu, node);
-                       pr_debug("per cpu data for cpu%d at %016lx\n",
-                                cpu, __pa(ptr));
-               } else {
-                       ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
-                                                       __pa(MAX_DMA_ADDRESS));
-                       pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
-                               cpu, node, __pa(ptr));
-               }
-#endif
-               per_cpu_offset(cpu) = ptr - __per_cpu_start;
-               memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
+               pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
+               if (!pcpur_ptrs[cpu])
+                       goto enomem;
+
+               /*
+                * Only use pcpur_size bytes and give back the rest.
+                *
+                * Ingo: The 2MB up-rounding bootmem is needed to make
+                * sure the partial 2MB page is still fully RAM - it's
+                * not well-specified to have a PAT-incompatible area
+                * (unmapped RAM, device memory, etc.) in that hole.
+                */
+               free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
+                            PMD_SIZE - pcpur_size);
+
+               memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
        }
 
-       /* Setup percpu data maps */
-       setup_per_cpu_maps();
-
-       /* Setup node to cpumask map */
-       setup_node_to_cpumask_map();
-
-       /* Setup cpu initialized, callin, callout masks */
-       setup_cpu_local_masks();
-}
-
-#endif
+       /* allocate address and map */
+       vm.flags = VM_ALLOC;
+       vm.size = num_possible_cpus() * PMD_SIZE;
+       vm_area_register_early(&vm, PMD_SIZE);
 
-#ifdef X86_64_NUMA
+       for_each_possible_cpu(cpu) {
+               pmd_t *pmd;
 
-/*
- * Allocate node_to_cpumask_map based on number of available nodes
- * Requires node_possible_map to be valid.
- *
- * Note: node_to_cpumask() is not valid until after this is done.
- */
-static void __init setup_node_to_cpumask_map(void)
-{
-       unsigned int node, num = 0;
-       cpumask_t *map;
-
-       /* setup nr_node_ids if not done yet */
-       if (nr_node_ids == MAX_NUMNODES) {
-               for_each_node_mask(node, node_possible_map)
-                       num = node;
-               nr_node_ids = num + 1;
+               pmd = populate_extra_pmd((unsigned long)vm.addr
+                                        + cpu * PMD_SIZE);
+               set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
+                                    PAGE_KERNEL_LARGE));
        }
 
-       /* allocate the map */
-       map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
-
-       pr_debug("Node to cpumask map at %p for %d nodes\n",
-                map, nr_node_ids);
-
-       /* node_to_cpumask() will now work */
-       node_to_cpumask_map = map;
+       /* we're ready, commit */
+       pr_info("PERCPU: Remapped at %p with large pages, static data "
+               "%zu bytes\n", vm.addr, static_size);
+
+       ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
+                                    PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
+                                    PMD_SIZE, vm.addr, NULL);
+       goto out_free_ar;
+
+enomem:
+       for_each_possible_cpu(cpu)
+               if (pcpur_ptrs[cpu])
+                       free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
+       ret = -ENOMEM;
+out_free_ar:
+       free_bootmem(__pa(pcpur_ptrs), ptrs_size);
+       return ret;
 }
-
-void __cpuinit numa_set_node(int cpu, int node)
+#else
+static ssize_t __init setup_pcpu_remap(size_t static_size)
 {
-       int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
-
-       if (cpu_pda(cpu) && node != NUMA_NO_NODE)
-               cpu_pda(cpu)->nodenumber = node;
-
-       if (cpu_to_node_map)
-               cpu_to_node_map[cpu] = node;
-
-       else if (per_cpu_offset(cpu))
-               per_cpu(x86_cpu_to_node_map, cpu) = node;
-
-       else
-               pr_debug("Setting node for non-present cpu %d\n", cpu);
+       return -EINVAL;
 }
+#endif
 
-void __cpuinit numa_clear_node(int cpu)
+/*
+ * Embedding allocator
+ *
+ * The first chunk is sized to just contain the static area plus
+ * module and dynamic reserves and embedded into linear physical
+ * mapping so that it can use PMD mapping without additional TLB
+ * pressure.
+ */
+static ssize_t __init setup_pcpu_embed(size_t static_size)
 {
-       numa_set_node(cpu, NUMA_NO_NODE);
+       size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
+
+       /*
+        * If large page isn't supported, there's no benefit in doing
+        * this.  Also, embedding allocation doesn't play well with
+        * NUMA.
+        */
+       if (!cpu_has_pse || pcpu_need_numa())
+               return -EINVAL;
+
+       return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
+                                     reserve - PERCPU_FIRST_CHUNK_RESERVE, -1);
 }
 
-#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+/*
+ * 4k page allocator
+ *
+ * This is the basic allocator.  Static percpu area is allocated
+ * page-by-page and most of initialization is done by the generic
+ * setup function.
+ */
+static struct page **pcpu4k_pages __initdata;
+static int pcpu4k_nr_static_pages __initdata;
 
-void __cpuinit numa_add_cpu(int cpu)
+static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
 {
-       cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+       if (pageno < pcpu4k_nr_static_pages)
+               return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
+       return NULL;
 }
 
-void __cpuinit numa_remove_cpu(int cpu)
+static void __init pcpu4k_populate_pte(unsigned long addr)
 {
-       cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
+       populate_extra_pte(addr);
 }
 
-#else /* CONFIG_DEBUG_PER_CPU_MAPS */
-
-/*
- * --------- debug versions of the numa functions ---------
- */
-static void __cpuinit numa_set_cpumask(int cpu, int enable)
+static ssize_t __init setup_pcpu_4k(size_t static_size)
 {
-       int node = cpu_to_node(cpu);
-       cpumask_t *mask;
-       char buf[64];
-
-       if (node_to_cpumask_map == NULL) {
-               printk(KERN_ERR "node_to_cpumask_map NULL\n");
-               dump_stack();
-               return;
-       }
-
-       mask = &node_to_cpumask_map[node];
-       if (enable)
-               cpu_set(cpu, *mask);
-       else
-               cpu_clear(cpu, *mask);
+       size_t pages_size;
+       unsigned int cpu;
+       int i, j;
+       ssize_t ret;
+
+       pcpu4k_nr_static_pages = PFN_UP(static_size);
+
+       /* unaligned allocations can't be freed, round up to page size */
+       pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
+                              * sizeof(pcpu4k_pages[0]));
+       pcpu4k_pages = alloc_bootmem(pages_size);
+
+       /* allocate and copy */
+       j = 0;
+       for_each_possible_cpu(cpu)
+               for (i = 0; i < pcpu4k_nr_static_pages; i++) {
+                       void *ptr;
+
+                       ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
+                       if (!ptr)
+                               goto enomem;
+
+                       memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
+                       pcpu4k_pages[j++] = virt_to_page(ptr);
+               }
 
-       cpulist_scnprintf(buf, sizeof(buf), mask);
-       printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
-               enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
+       /* we're ready, commit */
+       pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
+               pcpu4k_nr_static_pages, static_size);
+
+       ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size,
+                                    PERCPU_FIRST_CHUNK_RESERVE, -1,
+                                    -1, NULL, pcpu4k_populate_pte);
+       goto out_free_ar;
+
+enomem:
+       while (--j >= 0)
+               free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
+       ret = -ENOMEM;
+out_free_ar:
+       free_bootmem(__pa(pcpu4k_pages), pages_size);
+       return ret;
 }
 
-void __cpuinit numa_add_cpu(int cpu)
+static inline void setup_percpu_segment(int cpu)
 {
-       numa_set_cpumask(cpu, 1);
-}
-
-void __cpuinit numa_remove_cpu(int cpu)
-{
-       numa_set_cpumask(cpu, 0);
-}
+#ifdef CONFIG_X86_32
+       struct desc_struct gdt;
 
-int cpu_to_node(int cpu)
-{
-       if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
-               printk(KERN_WARNING
-                       "cpu_to_node(%d): usage too early!\n", cpu);
-               dump_stack();
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-       }
-       return per_cpu(x86_cpu_to_node_map, cpu);
+       pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
+                       0x2 | DESCTYPE_S, 0x8);
+       gdt.s = 1;
+       write_gdt_entry(get_cpu_gdt_table(cpu),
+                       GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
+#endif
 }
-EXPORT_SYMBOL(cpu_to_node);
 
 /*
- * Same function as cpu_to_node() but used if called before the
- * per_cpu areas are setup.
+ * Great future plan:
+ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
+ * Always point %gs to its beginning
  */
-int early_cpu_to_node(int cpu)
+void __init setup_per_cpu_areas(void)
 {
-       if (early_per_cpu_ptr(x86_cpu_to_node_map))
-               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
-
-       if (!per_cpu_offset(cpu)) {
-               printk(KERN_WARNING
-                       "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
-               dump_stack();
-               return NUMA_NO_NODE;
-       }
-       return per_cpu(x86_cpu_to_node_map, cpu);
-}
+       size_t static_size = __per_cpu_end - __per_cpu_start;
+       unsigned int cpu;
+       unsigned long delta;
+       size_t pcpu_unit_size;
+       ssize_t ret;
 
+       pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
+               NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
 
-/* empty cpumask */
-static const cpumask_t cpu_mask_none;
-
-/*
- * Returns a pointer to the bitmask of CPUs on Node 'node'.
- */
-const cpumask_t *cpumask_of_node(int node)
-{
-       if (node_to_cpumask_map == NULL) {
-               printk(KERN_WARNING
-                       "cpumask_of_node(%d): no node_to_cpumask_map!\n",
-                       node);
-               dump_stack();
-               return (const cpumask_t *)&cpu_online_map;
-       }
-       if (node >= nr_node_ids) {
-               printk(KERN_WARNING
-                       "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
-                       node, nr_node_ids);
-               dump_stack();
-               return &cpu_mask_none;
-       }
-       return &node_to_cpumask_map[node];
-}
-EXPORT_SYMBOL(cpumask_of_node);
-
-/*
- * Returns a bitmask of CPUs on Node 'node'.
- *
- * Side note: this function creates the returned cpumask on the stack
- * so with a high NR_CPUS count, excessive stack space is used.  The
- * node_to_cpumask_ptr function should be used whenever possible.
- */
-cpumask_t node_to_cpumask(int node)
-{
-       if (node_to_cpumask_map == NULL) {
-               printk(KERN_WARNING
-                       "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
-               dump_stack();
-               return cpu_online_map;
-       }
-       if (node >= nr_node_ids) {
-               printk(KERN_WARNING
-                       "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
-                       node, nr_node_ids);
-               dump_stack();
-               return cpu_mask_none;
+       /*
+        * Allocate percpu area.  If PSE is supported, try to make use
+        * of large page mappings.  Please read comments on top of
+        * each allocator for details.
+        */
+       ret = setup_pcpu_remap(static_size);
+       if (ret < 0)
+               ret = setup_pcpu_embed(static_size);
+       if (ret < 0)
+               ret = setup_pcpu_4k(static_size);
+       if (ret < 0)
+               panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
+                     static_size, ret);
+
+       pcpu_unit_size = ret;
+
+       /* alrighty, percpu areas up and running */
+       delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+       for_each_possible_cpu(cpu) {
+               per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
+               per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
+               per_cpu(cpu_number, cpu) = cpu;
+               setup_percpu_segment(cpu);
+               setup_stack_canary_segment(cpu);
+               /*
+                * Copy data used in early init routines from the
+                * initial arrays to the per cpu data areas.  These
+                * arrays then become expendable and the *_early_ptr's
+                * are zeroed indicating that the static arrays are
+                * gone.
+                */
+#ifdef CONFIG_X86_LOCAL_APIC
+               per_cpu(x86_cpu_to_apicid, cpu) =
+                       early_per_cpu_map(x86_cpu_to_apicid, cpu);
+               per_cpu(x86_bios_cpu_apicid, cpu) =
+                       early_per_cpu_map(x86_bios_cpu_apicid, cpu);
+#endif
+#ifdef CONFIG_X86_64
+               per_cpu(irq_stack_ptr, cpu) =
+                       per_cpu(irq_stack_union.irq_stack, cpu) +
+                       IRQ_STACK_SIZE - 64;
+#ifdef CONFIG_NUMA
+               per_cpu(x86_cpu_to_node_map, cpu) =
+                       early_per_cpu_map(x86_cpu_to_node_map, cpu);
+#endif
+#endif
+               /*
+                * Up to this point, the boot CPU has been using .data.init
+                * area.  Reload any changed state for the boot CPU.
+                */
+               if (cpu == boot_cpu_id)
+                       switch_to_new_gdt(cpu);
        }
-       return node_to_cpumask_map[node];
-}
-EXPORT_SYMBOL(node_to_cpumask);
 
-/*
- * --------- end of debug versions of the numa functions ---------
- */
-
-#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+       /* indicate the early static arrays will soon be gone */
+#ifdef CONFIG_X86_LOCAL_APIC
+       early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
+       early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
+#endif
+#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
+       early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
+#endif
 
-#endif /* X86_64_NUMA */
+       /* Setup node to cpumask map */
+       setup_node_to_cpumask_map();
 
+       /* Setup cpu initialized, callin, callout masks */
+       setup_cpu_local_masks();
+}
index df0587f24c547eac4a45a0aa64ea73a5dd49c61e..d2cc6428c5875a6103a00757eeeec9ff0831508f 100644 (file)
 # define FIX_EFLAGS    __FIX_EFLAGS
 #endif
 
-#define COPY(x)                        {               \
-       err |= __get_user(regs->x, &sc->x);     \
-}
+#define COPY(x)                        do {                    \
+       get_user_ex(regs->x, &sc->x);                   \
+} while (0)
 
-#define COPY_SEG(seg)          {                       \
-               unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
-               regs->seg = tmp;                        \
-}
+#define GET_SEG(seg)           ({                      \
+       unsigned short tmp;                             \
+       get_user_ex(tmp, &sc->seg);                     \
+       tmp;                                            \
+})
 
-#define COPY_SEG_CPL3(seg)     {                       \
-               unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
-               regs->seg = tmp | 3;                    \
-}
+#define COPY_SEG(seg)          do {                    \
+       regs->seg = GET_SEG(seg);                       \
+} while (0)
 
-#define GET_SEG(seg)           {                       \
-               unsigned short tmp;                     \
-               err |= __get_user(tmp, &sc->seg);       \
-               loadsegment(seg, tmp);                  \
-}
+#define COPY_SEG_CPL3(seg)     do {                    \
+       regs->seg = GET_SEG(seg) | 3;                   \
+} while (0)
 
 static int
 restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
@@ -83,45 +79,49 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
        /* Always make any pending restarted system calls return -EINTR */
        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 
+       get_user_try {
+
 #ifdef CONFIG_X86_32
-       GET_SEG(gs);
-       COPY_SEG(fs);
-       COPY_SEG(es);
-       COPY_SEG(ds);
+               set_user_gs(regs, GET_SEG(gs));
+               COPY_SEG(fs);
+               COPY_SEG(es);
+               COPY_SEG(ds);
 #endif /* CONFIG_X86_32 */
 
-       COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
-       COPY(dx); COPY(cx); COPY(ip);
+               COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
+               COPY(dx); COPY(cx); COPY(ip);
 
 #ifdef CONFIG_X86_64
-       COPY(r8);
-       COPY(r9);
-       COPY(r10);
-       COPY(r11);
-       COPY(r12);
-       COPY(r13);
-       COPY(r14);
-       COPY(r15);
+               COPY(r8);
+               COPY(r9);
+               COPY(r10);
+               COPY(r11);
+               COPY(r12);
+               COPY(r13);
+               COPY(r14);
+               COPY(r15);
 #endif /* CONFIG_X86_64 */
 
 #ifdef CONFIG_X86_32
-       COPY_SEG_CPL3(cs);
-       COPY_SEG_CPL3(ss);
+               COPY_SEG_CPL3(cs);
+               COPY_SEG_CPL3(ss);
 #else /* !CONFIG_X86_32 */
-       /* Kernel saves and restores only the CS segment register on signals,
-        * which is the bare minimum needed to allow mixed 32/64-bit code.
-        * App's signal handler can save/restore other segments if needed. */
-       COPY_SEG_CPL3(cs);
+               /* Kernel saves and restores only the CS segment register on signals,
+                * which is the bare minimum needed to allow mixed 32/64-bit code.
+                * App's signal handler can save/restore other segments if needed. */
+               COPY_SEG_CPL3(cs);
 #endif /* CONFIG_X86_32 */
 
-       err |= __get_user(tmpflags, &sc->flags);
-       regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
-       regs->orig_ax = -1;             /* disable syscall checks */
+               get_user_ex(tmpflags, &sc->flags);
+               regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
+               regs->orig_ax = -1;             /* disable syscall checks */
 
-       err |= __get_user(buf, &sc->fpstate);
-       err |= restore_i387_xstate(buf);
+               get_user_ex(buf, &sc->fpstate);
+               err |= restore_i387_xstate(buf);
+
+               get_user_ex(*pax, &sc->ax);
+       } get_user_catch(err);
 
-       err |= __get_user(*pax, &sc->ax);
        return err;
 }
 
@@ -131,57 +131,55 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
 {
        int err = 0;
 
-#ifdef CONFIG_X86_32
-       {
-               unsigned int tmp;
+       put_user_try {
 
-               savesegment(gs, tmp);
-               err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
-       }
-       err |= __put_user(regs->fs, (unsigned int __user *)&sc->fs);
-       err |= __put_user(regs->es, (unsigned int __user *)&sc->es);
-       err |= __put_user(regs->ds, (unsigned int __user *)&sc->ds);
+#ifdef CONFIG_X86_32
+               put_user_ex(get_user_gs(regs), (unsigned int __user *)&sc->gs);
+               put_user_ex(regs->fs, (unsigned int __user *)&sc->fs);
+               put_user_ex(regs->es, (unsigned int __user *)&sc->es);
+               put_user_ex(regs->ds, (unsigned int __user *)&sc->ds);
 #endif /* CONFIG_X86_32 */
 
-       err |= __put_user(regs->di, &sc->di);
-       err |= __put_user(regs->si, &sc->si);
-       err |= __put_user(regs->bp, &sc->bp);
-       err |= __put_user(regs->sp, &sc->sp);
-       err |= __put_user(regs->bx, &sc->bx);
-       err |= __put_user(regs->dx, &sc->dx);
-       err |= __put_user(regs->cx, &sc->cx);
-       err |= __put_user(regs->ax, &sc->ax);
+               put_user_ex(regs->di, &sc->di);
+               put_user_ex(regs->si, &sc->si);
+               put_user_ex(regs->bp, &sc->bp);
+               put_user_ex(regs->sp, &sc->sp);
+               put_user_ex(regs->bx, &sc->bx);
+               put_user_ex(regs->dx, &sc->dx);
+               put_user_ex(regs->cx, &sc->cx);
+               put_user_ex(regs->ax, &sc->ax);
 #ifdef CONFIG_X86_64
-       err |= __put_user(regs->r8, &sc->r8);
-       err |= __put_user(regs->r9, &sc->r9);
-       err |= __put_user(regs->r10, &sc->r10);
-       err |= __put_user(regs->r11, &sc->r11);
-       err |= __put_user(regs->r12, &sc->r12);
-       err |= __put_user(regs->r13, &sc->r13);
-       err |= __put_user(regs->r14, &sc->r14);
-       err |= __put_user(regs->r15, &sc->r15);
+               put_user_ex(regs->r8, &sc->r8);
+               put_user_ex(regs->r9, &sc->r9);
+               put_user_ex(regs->r10, &sc->r10);
+               put_user_ex(regs->r11, &sc->r11);
+               put_user_ex(regs->r12, &sc->r12);
+               put_user_ex(regs->r13, &sc->r13);
+               put_user_ex(regs->r14, &sc->r14);
+               put_user_ex(regs->r15, &sc->r15);
 #endif /* CONFIG_X86_64 */
 
-       err |= __put_user(current->thread.trap_no, &sc->trapno);
-       err |= __put_user(current->thread.error_code, &sc->err);
-       err |= __put_user(regs->ip, &sc->ip);
+               put_user_ex(current->thread.trap_no, &sc->trapno);
+               put_user_ex(current->thread.error_code, &sc->err);
+               put_user_ex(regs->ip, &sc->ip);
 #ifdef CONFIG_X86_32
-       err |= __put_user(regs->cs, (unsigned int __user *)&sc->cs);
-       err |= __put_user(regs->flags, &sc->flags);
-       err |= __put_user(regs->sp, &sc->sp_at_signal);
-       err |= __put_user(regs->ss, (unsigned int __user *)&sc->ss);
+               put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
+               put_user_ex(regs->flags, &sc->flags);
+               put_user_ex(regs->sp, &sc->sp_at_signal);
+               put_user_ex(regs->ss, (unsigned int __user *)&sc->ss);
 #else /* !CONFIG_X86_32 */
-       err |= __put_user(regs->flags, &sc->flags);
-       err |= __put_user(regs->cs, &sc->cs);
-       err |= __put_user(0, &sc->gs);
-       err |= __put_user(0, &sc->fs);
+               put_user_ex(regs->flags, &sc->flags);
+               put_user_ex(regs->cs, &sc->cs);
+               put_user_ex(0, &sc->gs);
+               put_user_ex(0, &sc->fs);
 #endif /* CONFIG_X86_32 */
 
-       err |= __put_user(fpstate, &sc->fpstate);
+               put_user_ex(fpstate, &sc->fpstate);
 
-       /* non-iBCS2 extensions.. */
-       err |= __put_user(mask, &sc->oldmask);
-       err |= __put_user(current->thread.cr2, &sc->cr2);
+               /* non-iBCS2 extensions.. */
+               put_user_ex(mask, &sc->oldmask);
+               put_user_ex(current->thread.cr2, &sc->cr2);
+       } put_user_catch(err);
 
        return err;
 }
@@ -189,40 +187,35 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
 /*
  * Set up a signal frame.
  */
-#ifdef CONFIG_X86_32
-static const struct {
-       u16 poplmovl;
-       u32 val;
-       u16 int80;
-} __attribute__((packed)) retcode = {
-       0xb858,         /* popl %eax; movl $..., %eax */
-       __NR_sigreturn,
-       0x80cd,         /* int $0x80 */
-};
-
-static const struct {
-       u8  movl;
-       u32 val;
-       u16 int80;
-       u8  pad;
-} __attribute__((packed)) rt_retcode = {
-       0xb8,           /* movl $..., %eax */
-       __NR_rt_sigreturn,
-       0x80cd,         /* int $0x80 */
-       0
-};
 
 /*
  * Determine which stack to use..
  */
+static unsigned long align_sigframe(unsigned long sp)
+{
+#ifdef CONFIG_X86_32
+       /*
+        * Align the stack pointer according to the i386 ABI,
+        * i.e. so that on function entry ((sp + 4) & 15) == 0.
+        */
+       sp = ((sp + 4) & -16ul) - 4;
+#else /* !CONFIG_X86_32 */
+       sp = round_down(sp, 16) - 8;
+#endif
+       return sp;
+}
+
 static inline void __user *
 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
-            void **fpstate)
+            void __user **fpstate)
 {
-       unsigned long sp;
-
        /* Default to using normal stack */
-       sp = regs->sp;
+       unsigned long sp = regs->sp;
+
+#ifdef CONFIG_X86_64
+       /* redzone */
+       sp -= 128;
+#endif /* CONFIG_X86_64 */
 
        /*
         * If we are on the alternate signal stack and would overflow it, don't.
@@ -236,30 +229,52 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
                if (sas_ss_flags(sp) == 0)
                        sp = current->sas_ss_sp + current->sas_ss_size;
        } else {
+#ifdef CONFIG_X86_32
                /* This is the legacy signal stack switching. */
                if ((regs->ss & 0xffff) != __USER_DS &&
                        !(ka->sa.sa_flags & SA_RESTORER) &&
                                ka->sa.sa_restorer)
                        sp = (unsigned long) ka->sa.sa_restorer;
+#endif /* CONFIG_X86_32 */
        }
 
        if (used_math()) {
-               sp = sp - sig_xstate_size;
-               *fpstate = (struct _fpstate *) sp;
+               sp -= sig_xstate_size;
+#ifdef CONFIG_X86_64
+               sp = round_down(sp, 64);
+#endif /* CONFIG_X86_64 */
+               *fpstate = (void __user *)sp;
+
                if (save_i387_xstate(*fpstate) < 0)
                        return (void __user *)-1L;
        }
 
-       sp -= frame_size;
-       /*
-        * Align the stack pointer according to the i386 ABI,
-        * i.e. so that on function entry ((sp + 4) & 15) == 0.
-        */
-       sp = ((sp + 4) & -16ul) - 4;
-
-       return (void __user *) sp;
+       return (void __user *)align_sigframe(sp - frame_size);
 }
 
+#ifdef CONFIG_X86_32
+static const struct {
+       u16 poplmovl;
+       u32 val;
+       u16 int80;
+} __attribute__((packed)) retcode = {
+       0xb858,         /* popl %eax; movl $..., %eax */
+       __NR_sigreturn,
+       0x80cd,         /* int $0x80 */
+};
+
+static const struct {
+       u8  movl;
+       u32 val;
+       u16 int80;
+       u8  pad;
+} __attribute__((packed)) rt_retcode = {
+       0xb8,           /* movl $..., %eax */
+       __NR_rt_sigreturn,
+       0x80cd,         /* int $0x80 */
+       0
+};
+
 static int
 __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
              struct pt_regs *regs)
@@ -336,43 +351,41 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
                return -EFAULT;
 
-       err |= __put_user(sig, &frame->sig);
-       err |= __put_user(&frame->info, &frame->pinfo);
-       err |= __put_user(&frame->uc, &frame->puc);
-       err |= copy_siginfo_to_user(&frame->info, info);
-       if (err)
-               return -EFAULT;
+       put_user_try {
+               put_user_ex(sig, &frame->sig);
+               put_user_ex(&frame->info, &frame->pinfo);
+               put_user_ex(&frame->uc, &frame->puc);
+               err |= copy_siginfo_to_user(&frame->info, info);
 
-       /* Create the ucontext.  */
-       if (cpu_has_xsave)
-               err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
-       else
-               err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(regs->sp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
-                               regs, set->sig[0]);
-       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-       if (err)
-               return -EFAULT;
-
-       /* Set up to return from userspace.  */
-       restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
-       if (ka->sa.sa_flags & SA_RESTORER)
-               restorer = ka->sa.sa_restorer;
-       err |= __put_user(restorer, &frame->pretcode);
+               /* Create the ucontext.  */
+               if (cpu_has_xsave)
+                       put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+               put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+               put_user_ex(sas_ss_flags(regs->sp),
+                           &frame->uc.uc_stack.ss_flags);
+               put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+               err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
+                                       regs, set->sig[0]);
+               err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+               /* Set up to return from userspace.  */
+               restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
+               if (ka->sa.sa_flags & SA_RESTORER)
+                       restorer = ka->sa.sa_restorer;
+               put_user_ex(restorer, &frame->pretcode);
 
-       /*
-        * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
-        *
-        * WE DO NOT USE IT ANY MORE! It's only left here for historical
-        * reasons and because gdb uses it as a signature to notice
-        * signal handler stack frames.
-        */
-       err |= __put_user(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
+               /*
+                * This is movl $__NR_rt_sigreturn, %ax ; int $0x80
+                *
+                * WE DO NOT USE IT ANY MORE! It's only left here for historical
+                * reasons and because gdb uses it as a signature to notice
+                * signal handler stack frames.
+                */
+               put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
+       } put_user_catch(err);
 
        if (err)
                return -EFAULT;
@@ -392,24 +405,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        return 0;
 }
 #else /* !CONFIG_X86_32 */
-/*
- * Determine which stack to use..
- */
-static void __user *
-get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size)
-{
-       /* Default to using normal stack - redzone*/
-       sp -= 128;
-
-       /* This is the X/Open sanctioned signal stack switching.  */
-       if (ka->sa.sa_flags & SA_ONSTACK) {
-               if (sas_ss_flags(sp) == 0)
-                       sp = current->sas_ss_sp + current->sas_ss_size;
-       }
-
-       return (void __user *)round_down(sp - size, 64);
-}
-
 static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                            sigset_t *set, struct pt_regs *regs)
 {
@@ -418,15 +413,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        int err = 0;
        struct task_struct *me = current;
 
-       if (used_math()) {
-               fp = get_stack(ka, regs->sp, sig_xstate_size);
-               frame = (void __user *)round_down(
-                       (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
-
-               if (save_i387_xstate(fp) < 0)
-                       return -EFAULT;
-       } else
-               frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8;
+       frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp);
 
        if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
                return -EFAULT;
@@ -436,28 +423,30 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                        return -EFAULT;
        }
 
-       /* Create the ucontext.  */
-       if (cpu_has_xsave)
-               err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
-       else
-               err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
-       err |= __put_user(sas_ss_flags(regs->sp),
-                         &frame->uc.uc_stack.ss_flags);
-       err |= __put_user(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
-       err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
-       err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
-
-       /* Set up to return from userspace.  If provided, use a stub
-          already in userspace.  */
-       /* x86-64 should always use SA_RESTORER. */
-       if (ka->sa.sa_flags & SA_RESTORER) {
-               err |= __put_user(ka->sa.sa_restorer, &frame->pretcode);
-       } else {
-               /* could use a vstub here */
-               return -EFAULT;
-       }
+       put_user_try {
+               /* Create the ucontext.  */
+               if (cpu_has_xsave)
+                       put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
+               else
+                       put_user_ex(0, &frame->uc.uc_flags);
+               put_user_ex(0, &frame->uc.uc_link);
+               put_user_ex(me->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
+               put_user_ex(sas_ss_flags(regs->sp),
+                           &frame->uc.uc_stack.ss_flags);
+               put_user_ex(me->sas_ss_size, &frame->uc.uc_stack.ss_size);
+               err |= setup_sigcontext(&frame->uc.uc_mcontext, fp, regs, set->sig[0]);
+               err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+               /* Set up to return from userspace.  If provided, use a stub
+                  already in userspace.  */
+               /* x86-64 should always use SA_RESTORER. */
+               if (ka->sa.sa_flags & SA_RESTORER) {
+                       put_user_ex(ka->sa.sa_restorer, &frame->pretcode);
+               } else {
+                       /* could use a vstub here */
+                       err |= -EFAULT;
+               }
+       } put_user_catch(err);
 
        if (err)
                return -EFAULT;
@@ -509,31 +498,41 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
              struct old_sigaction __user *oact)
 {
        struct k_sigaction new_ka, old_ka;
-       int ret;
+       int ret = 0;
 
        if (act) {
                old_sigset_t mask;
 
-               if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
-                   __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
-                   __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+               if (!access_ok(VERIFY_READ, act, sizeof(*act)))
                        return -EFAULT;
 
-               __get_user(new_ka.sa.sa_flags, &act->sa_flags);
-               __get_user(mask, &act->sa_mask);
+               get_user_try {
+                       get_user_ex(new_ka.sa.sa_handler, &act->sa_handler);
+                       get_user_ex(new_ka.sa.sa_flags, &act->sa_flags);
+                       get_user_ex(mask, &act->sa_mask);
+                       get_user_ex(new_ka.sa.sa_restorer, &act->sa_restorer);
+               } get_user_catch(ret);
+
+               if (ret)
+                       return -EFAULT;
                siginitset(&new_ka.sa.sa_mask, mask);
        }
 
        ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
 
        if (!ret && oact) {
-               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
-                   __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
-                   __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+               if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)))
                        return -EFAULT;
 
-               __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
-               __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+               put_user_try {
+                       put_user_ex(old_ka.sa.sa_handler, &oact->sa_handler);
+                       put_user_ex(old_ka.sa.sa_flags, &oact->sa_flags);
+                       put_user_ex(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+                       put_user_ex(old_ka.sa.sa_restorer, &oact->sa_restorer);
+               } put_user_catch(ret);
+
+               if (ret)
+                       return -EFAULT;
        }
 
        return ret;
@@ -541,14 +540,9 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
 #endif /* CONFIG_X86_32 */
 
 #ifdef CONFIG_X86_32
-asmlinkage int sys_sigaltstack(unsigned long bx)
+int sys_sigaltstack(struct pt_regs *regs)
 {
-       /*
-        * This is needed to make gcc realize it doesn't own the
-        * "struct pt_regs"
-        */
-       struct pt_regs *regs = (struct pt_regs *)&bx;
-       const stack_t __user *uss = (const stack_t __user *)bx;
+       const stack_t __user *uss = (const stack_t __user *)regs->bx;
        stack_t __user *uoss = (stack_t __user *)regs->cx;
 
        return do_sigaltstack(uss, uoss, regs->sp);
@@ -566,14 +560,12 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
  * Do a signal return; undo the signal stack.
  */
 #ifdef CONFIG_X86_32
-asmlinkage unsigned long sys_sigreturn(unsigned long __unused)
+unsigned long sys_sigreturn(struct pt_regs *regs)
 {
        struct sigframe __user *frame;
-       struct pt_regs *regs;
        unsigned long ax;
        sigset_t set;
 
-       regs = (struct pt_regs *) &__unused;
        frame = (struct sigframe __user *)(regs->sp - 8);
 
        if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
@@ -600,7 +592,7 @@ badframe:
 }
 #endif /* CONFIG_X86_32 */
 
-static long do_rt_sigreturn(struct pt_regs *regs)
+long sys_rt_sigreturn(struct pt_regs *regs)
 {
        struct rt_sigframe __user *frame;
        unsigned long ax;
@@ -631,25 +623,6 @@ badframe:
        return 0;
 }
 
-#ifdef CONFIG_X86_32
-/*
- * Note: do not pass in pt_regs directly as with tail-call optimization
- * GCC will incorrectly stomp on the caller's frame and corrupt user-space
- * register state:
- */
-asmlinkage int sys_rt_sigreturn(unsigned long __unused)
-{
-       struct pt_regs *regs = (struct pt_regs *)&__unused;
-
-       return do_rt_sigreturn(regs);
-}
-#else /* !CONFIG_X86_32 */
-asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
-{
-       return do_rt_sigreturn(regs);
-}
-#endif /* CONFIG_X86_32 */
-
 /*
  * OK, we're invoking a handler:
  */
index e6faa3316bd2e04311313ce4c7c68ef3438ef274..13f33ea8ccaaf1ae8e23cc2a39f57d84d6d3c910 100644 (file)
@@ -2,7 +2,7 @@
  *     Intel SMP support routines.
  *
  *     (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- *     (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
+ *     (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  *      (c) 2002,2003 Andi Kleen, SuSE Labs.
  *
  *     i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
@@ -26,8 +26,7 @@
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/proto.h>
-#include <mach_ipi.h>
-#include <mach_apic.h>
+#include <asm/apic.h>
 /*
  *     Some notes on x86 processor bugs affecting SMP operation:
  *
@@ -118,12 +117,12 @@ static void native_smp_send_reschedule(int cpu)
                WARN_ON(1);
                return;
        }
-       send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
+       apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
 }
 
 void native_send_call_func_single_ipi(int cpu)
 {
-       send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
+       apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
 }
 
 void native_send_call_func_ipi(const struct cpumask *mask)
@@ -131,7 +130,7 @@ void native_send_call_func_ipi(const struct cpumask *mask)
        cpumask_var_t allbutself;
 
        if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
-               send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+               apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
                return;
        }
 
@@ -140,9 +139,9 @@ void native_send_call_func_ipi(const struct cpumask *mask)
 
        if (cpumask_equal(mask, allbutself) &&
            cpumask_equal(cpu_online_mask, cpu_callout_mask))
-               send_IPI_allbutself(CALL_FUNCTION_VECTOR);
+               apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
        else
-               send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
+               apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
 
        free_cpumask_var(allbutself);
 }
index bb1a3b1fc87faeb43baa876f725a3454673abcdc..ef7d10170c30d1e98291cfdb0b4d0f17eda49b28 100644 (file)
@@ -2,7 +2,7 @@
  *     x86 SMP booting functions
  *
  *     (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
- *     (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
+ *     (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
  *     Copyright 2001 Andi Kleen, SuSE Labs.
  *
  *     Much of the core SMP work is based on previous work by Thomas Radke, to
@@ -53,7 +53,6 @@
 #include <asm/nmi.h>
 #include <asm/irq.h>
 #include <asm/idle.h>
-#include <asm/smp.h>
 #include <asm/trampoline.h>
 #include <asm/cpu.h>
 #include <asm/numa.h>
 #include <asm/tlbflush.h>
 #include <asm/mtrr.h>
 #include <asm/vmi.h>
-#include <asm/genapic.h>
+#include <asm/apic.h>
 #include <asm/setup.h>
+#include <asm/uv/uv.h>
 #include <linux/mc146818rtc.h>
 
-#include <mach_apic.h>
-#include <mach_wakecpu.h>
-#include <smpboot_hooks.h>
+#include <asm/smpboot_hooks.h>
 
 #ifdef CONFIG_X86_32
 u8 apicid_2_node[MAX_APICID];
@@ -114,11 +112,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
 EXPORT_PER_CPU_SYMBOL(cpu_info);
 
-static atomic_t init_deasserted;
-
-
-/* Set if we find a B stepping CPU */
-static int __cpuinitdata smp_b_stepping;
+atomic_t init_deasserted;
 
 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
 
@@ -163,7 +157,7 @@ static void map_cpu_to_logical_apicid(void)
 {
        int cpu = smp_processor_id();
        int apicid = logical_smp_processor_id();
-       int node = apicid_to_node(apicid);
+       int node = apic->apicid_to_node(apicid);
 
        if (!node_online(node))
                node = first_online_node;
@@ -196,7 +190,8 @@ static void __cpuinit smp_callin(void)
         * our local APIC.  We have to wait for the IPI or we'll
         * lock up on an APIC access.
         */
-       wait_for_init_deassert(&init_deasserted);
+       if (apic->wait_for_init_deassert)
+               apic->wait_for_init_deassert(&init_deasserted);
 
        /*
         * (This works even if the APIC is not enabled.)
@@ -243,7 +238,8 @@ static void __cpuinit smp_callin(void)
         */
 
        pr_debug("CALLIN, before setup_local_APIC().\n");
-       smp_callin_clear_local_apic();
+       if (apic->smp_callin_clear_local_apic)
+               apic->smp_callin_clear_local_apic();
        setup_local_APIC();
        end_local_APIC_setup();
        map_cpu_to_logical_apicid();
@@ -271,8 +267,6 @@ static void __cpuinit smp_callin(void)
        cpumask_set_cpu(cpuid, cpu_callin_mask);
 }
 
-static int __cpuinitdata unsafe_smp;
-
 /*
  * Activate a secondary processor.
  */
@@ -340,76 +334,6 @@ notrace static void __cpuinit start_secondary(void *unused)
        cpu_idle();
 }
 
-static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
-{
-       /*
-        * Mask B, Pentium, but not Pentium MMX
-        */
-       if (c->x86_vendor == X86_VENDOR_INTEL &&
-           c->x86 == 5 &&
-           c->x86_mask >= 1 && c->x86_mask <= 4 &&
-           c->x86_model <= 3)
-               /*
-                * Remember we have B step Pentia with bugs
-                */
-               smp_b_stepping = 1;
-
-       /*
-        * Certain Athlons might work (for various values of 'work') in SMP
-        * but they are not certified as MP capable.
-        */
-       if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
-
-               if (num_possible_cpus() == 1)
-                       goto valid_k7;
-
-               /* Athlon 660/661 is valid. */
-               if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
-                   (c->x86_mask == 1)))
-                       goto valid_k7;
-
-               /* Duron 670 is valid */
-               if ((c->x86_model == 7) && (c->x86_mask == 0))
-                       goto valid_k7;
-
-               /*
-                * Athlon 662, Duron 671, and Athlon >model 7 have capability
-                * bit. It's worth noting that the A5 stepping (662) of some
-                * Athlon XP's have the MP bit set.
-                * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
-                * more.
-                */
-               if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
-                   ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
-                    (c->x86_model > 7))
-                       if (cpu_has_mp)
-                               goto valid_k7;
-
-               /* If we get here, not a certified SMP capable AMD system. */
-               unsafe_smp = 1;
-       }
-
-valid_k7:
-       ;
-}
-
-static void __cpuinit smp_checks(void)
-{
-       if (smp_b_stepping)
-               printk(KERN_WARNING "WARNING: SMP operation may be unreliable"
-                                   "with B stepping processors.\n");
-
-       /*
-        * Don't taint if we are running SMP kernel on a single non-MP
-        * approved Athlon
-        */
-       if (unsafe_smp && num_online_cpus() > 1) {
-               printk(KERN_INFO "WARNING: This combination of AMD"
-                       "processors is not suitable for SMP.\n");
-               add_taint(TAINT_UNSAFE_SMP);
-       }
-}
-
 /*
  * The bootstrap kernel entry code has set these up. Save them for
  * a given CPU
@@ -423,7 +347,6 @@ void __cpuinit smp_store_cpu_info(int id)
        c->cpu_index = id;
        if (id != 0)
                identify_secondary_cpu(c);
-       smp_apply_quirks(c);
 }
 
 
@@ -583,7 +506,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
        /* Target chip */
        /* Boot on the stack */
        /* Kick the second */
-       apic_icr_write(APIC_DM_NMI | APIC_DEST_LOGICAL, logical_apicid);
+       apic_icr_write(APIC_DM_NMI | apic->dest_logical, logical_apicid);
 
        pr_debug("Waiting for send to finish...\n");
        send_status = safe_apic_wait_icr_idle();
@@ -614,12 +537,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
        unsigned long send_status, accept_status = 0;
        int maxlvt, num_starts, j;
 
-       if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
-               send_status = uv_wakeup_secondary(phys_apicid, start_eip);
-               atomic_set(&init_deasserted, 1);
-               return send_status;
-       }
-
        maxlvt = lapic_get_maxlvt();
 
        /*
@@ -745,78 +662,23 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
        complete(&c_idle->done);
 }
 
-#ifdef CONFIG_X86_64
-
-/* __ref because it's safe to call free_bootmem when after_bootmem == 0. */
-static void __ref free_bootmem_pda(struct x8664_pda *oldpda)
-{
-       if (!after_bootmem)
-               free_bootmem((unsigned long)oldpda, sizeof(*oldpda));
-}
-
-/*
- * Allocate node local memory for the AP pda.
- *
- * Must be called after the _cpu_pda pointer table is initialized.
- */
-int __cpuinit get_local_pda(int cpu)
-{
-       struct x8664_pda *oldpda, *newpda;
-       unsigned long size = sizeof(struct x8664_pda);
-       int node = cpu_to_node(cpu);
-
-       if (cpu_pda(cpu) && !cpu_pda(cpu)->in_bootmem)
-               return 0;
-
-       oldpda = cpu_pda(cpu);
-       newpda = kmalloc_node(size, GFP_ATOMIC, node);
-       if (!newpda) {
-               printk(KERN_ERR "Could not allocate node local PDA "
-                       "for CPU %d on node %d\n", cpu, node);
-
-               if (oldpda)
-                       return 0;       /* have a usable pda */
-               else
-                       return -1;
-       }
-
-       if (oldpda) {
-               memcpy(newpda, oldpda, size);
-               free_bootmem_pda(oldpda);
-       }
-
-       newpda->in_bootmem = 0;
-       cpu_pda(cpu) = newpda;
-       return 0;
-}
-#endif /* CONFIG_X86_64 */
-
-static int __cpuinit do_boot_cpu(int apicid, int cpu)
 /*
  * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
  * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
- * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
+ * Returns zero if CPU booted OK, else error code from
+ * ->wakeup_secondary_cpu.
  */
+static int __cpuinit do_boot_cpu(int apicid, int cpu)
 {
        unsigned long boot_error = 0;
-       int timeout;
        unsigned long start_ip;
-       unsigned short nmi_high = 0, nmi_low = 0;
+       int timeout;
        struct create_idle c_idle = {
-               .cpu = cpu,
-               .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
+               .cpu    = cpu,
+               .done   = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
        };
-       INIT_WORK(&c_idle.work, do_fork_idle);
 
-#ifdef CONFIG_X86_64
-       /* Allocate node local memory for AP pdas */
-       if (cpu > 0) {
-               boot_error = get_local_pda(cpu);
-               if (boot_error)
-                       goto restore_state;
-                       /* if can't get pda memory, can't start cpu */
-       }
-#endif
+       INIT_WORK(&c_idle.work, do_fork_idle);
 
        alternatives_smp_switch(1);
 
@@ -847,14 +709,16 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
 
        set_idle_for_cpu(cpu, c_idle.idle);
 do_rest:
-#ifdef CONFIG_X86_32
        per_cpu(current_task, cpu) = c_idle.idle;
-       init_gdt(cpu);
+#ifdef CONFIG_X86_32
        /* Stack for startup_32 can be just as for start_secondary onwards */
        irq_ctx_init(cpu);
 #else
-       cpu_pda(cpu)->pcurrent = c_idle.idle;
        clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
+       initial_gs = per_cpu_offset(cpu);
+       per_cpu(kernel_stack, cpu) =
+               (unsigned long)task_stack_page(c_idle.idle) -
+               KERNEL_STACK_OFFSET + THREAD_SIZE;
 #endif
        early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
        initial_code = (unsigned long)start_secondary;
@@ -878,8 +742,6 @@ do_rest:
 
                pr_debug("Setting warm reset code and vector.\n");
 
-               store_NMI_vector(&nmi_high, &nmi_low);
-
                smpboot_setup_warm_reset_vector(start_ip);
                /*
                 * Be paranoid about clearing APIC errors.
@@ -891,9 +753,13 @@ do_rest:
        }
 
        /*
-        * Starting actual IPI sequence...
+        * Kick the secondary CPU. Use the method in the APIC driver
+        * if it's defined - or use an INIT boot APIC message otherwise:
         */
-       boot_error = wakeup_secondary_cpu(apicid, start_ip);
+       if (apic->wakeup_secondary_cpu)
+               boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
+       else
+               boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
 
        if (!boot_error) {
                /*
@@ -927,13 +793,11 @@ do_rest:
                        else
                                /* trampoline code not run */
                                printk(KERN_ERR "Not responding.\n");
-                       if (get_uv_system_type() != UV_NON_UNIQUE_APIC)
-                               inquire_remote_apic(apicid);
+                       if (apic->inquire_remote_apic)
+                               apic->inquire_remote_apic(apicid);
                }
        }
-#ifdef CONFIG_X86_64
-restore_state:
-#endif
+
        if (boot_error) {
                /* Try to put things back the way they were before ... */
                numa_remove_cpu(cpu); /* was set by numa_add_cpu */
@@ -961,7 +825,7 @@ restore_state:
 
 int __cpuinit native_cpu_up(unsigned int cpu)
 {
-       int apicid = cpu_present_to_apicid(cpu);
+       int apicid = apic->cpu_present_to_apicid(cpu);
        unsigned long flags;
        int err;
 
@@ -1054,14 +918,14 @@ static int __init smp_sanity_check(unsigned max_cpus)
 {
        preempt_disable();
 
-#if defined(CONFIG_X86_PC) && defined(CONFIG_X86_32)
+#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
        if (def_to_bigsmp && nr_cpu_ids > 8) {
                unsigned int cpu;
                unsigned nr;
 
                printk(KERN_WARNING
                       "More than 8 CPUs detected - skipping them.\n"
-                      "Use CONFIG_X86_GENERICARCH and CONFIG_X86_BIGSMP.\n");
+                      "Use CONFIG_X86_BIGSMP.\n");
 
                nr = 0;
                for_each_present_cpu(cpu) {
@@ -1107,7 +971,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
         * Should not be necessary because the MP table should list the boot
         * CPU too, but we do it for the sake of robustness anyway.
         */
-       if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
+       if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
                printk(KERN_NOTICE
                        "weird, boot CPU (#%d) not listed by the BIOS.\n",
                        boot_cpu_physical_apicid);
@@ -1125,6 +989,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
                printk(KERN_ERR "... forcing use of dummy APIC emulation."
                                "(tell your hw vendor)\n");
                smpboot_clear_io_apic();
+               arch_disable_smp_support();
                return -1;
        }
 
@@ -1181,9 +1046,9 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        current_thread_info()->cpu = 0;  /* needed? */
        set_cpu_sibling_map(0);
 
-#ifdef CONFIG_X86_64
        enable_IR_x2apic();
-       setup_apic_routing();
+#ifdef CONFIG_X86_64
+       default_setup_apic_routing();
 #endif
 
        if (smp_sanity_check(max_cpus) < 0) {
@@ -1207,18 +1072,18 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
         */
        setup_local_APIC();
 
-#ifdef CONFIG_X86_64
        /*
         * Enable IO APIC before setting up error vector
         */
        if (!skip_ioapic_setup && nr_ioapics)
                enable_IO_APIC();
-#endif
+
        end_local_APIC_setup();
 
        map_cpu_to_logical_apicid();
 
-       setup_portio_remap();
+       if (apic->setup_portio_remap)
+               apic->setup_portio_remap();
 
        smpboot_setup_io_apic();
        /*
@@ -1240,10 +1105,7 @@ out:
 void __init native_smp_prepare_boot_cpu(void)
 {
        int me = smp_processor_id();
-#ifdef CONFIG_X86_32
-       init_gdt(me);
-#endif
-       switch_to_new_gdt();
+       switch_to_new_gdt(me);
        /* already set me in cpu_online_mask in boot_cpu_init() */
        cpumask_set_cpu(me, cpu_callout_mask);
        per_cpu(cpu_state, me) = CPU_ONLINE;
@@ -1254,7 +1116,6 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
        pr_debug("Boot done.\n");
 
        impress_friends();
-       smp_checks();
 #ifdef CONFIG_X86_IO_APIC
        setup_ioapic_dest();
 #endif
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c
deleted file mode 100644 (file)
index 397e309..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * SMP stuff which is common to all sub-architectures.
- */
-#include <linux/module.h>
-#include <asm/smp.h>
-
-#ifdef CONFIG_X86_32
-DEFINE_PER_CPU(unsigned long, this_cpu_off);
-EXPORT_PER_CPU_SYMBOL(this_cpu_off);
-
-/*
- * Initialize the CPU's GDT.  This is either the boot CPU doing itself
- * (still using the master per-cpu area), or a CPU doing it for a
- * secondary which will soon come up.
- */
-__cpuinit void init_gdt(int cpu)
-{
-       struct desc_struct gdt;
-
-       pack_descriptor(&gdt, __per_cpu_offset[cpu], 0xFFFFF,
-                       0x2 | DESCTYPE_S, 0x8);
-       gdt.s = 1;
-
-       write_gdt_entry(get_cpu_gdt_table(cpu),
-                       GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
-
-       per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
-       per_cpu(cpu_number, cpu) = cpu;
-}
-#endif
index 10786af95545ea37654bcf7125d7a880edc03540..f7bddc2e37d1bbf19a86f03b9e3a3a0da70ab199 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Stack trace management functions
  *
- *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  */
 #include <linux/sched.h>
 #include <linux/stacktrace.h>
diff --git a/arch/x86/kernel/summit_32.c b/arch/x86/kernel/summit_32.c
deleted file mode 100644 (file)
index 7b98785..0000000
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- * IBM Summit-Specific Code
- *
- * Written By: Matthew Dobson, IBM Corporation
- *
- * Copyright (c) 2003 IBM Corp.
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send feedback to <colpatch@us.ibm.com>
- *
- */
-
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <asm/io.h>
-#include <asm/bios_ebda.h>
-#include <asm/summit/mpparse.h>
-
-static struct rio_table_hdr *rio_table_hdr __initdata;
-static struct scal_detail   *scal_devs[MAX_NUMNODES] __initdata;
-static struct rio_detail    *rio_devs[MAX_NUMNODES*4] __initdata;
-
-#ifndef CONFIG_X86_NUMAQ
-static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata;
-#endif
-
-static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
-{
-       int twister = 0, node = 0;
-       int i, bus, num_buses;
-
-       for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
-               if (rio_devs[i]->node_id == rio_devs[wpeg_num]->owner_id) {
-                       twister = rio_devs[i]->owner_id;
-                       break;
-               }
-       }
-       if (i == rio_table_hdr->num_rio_dev) {
-               printk(KERN_ERR "%s: Couldn't find owner Cyclone for Winnipeg!\n", __func__);
-               return last_bus;
-       }
-
-       for (i = 0; i < rio_table_hdr->num_scal_dev; i++) {
-               if (scal_devs[i]->node_id == twister) {
-                       node = scal_devs[i]->node_id;
-                       break;
-               }
-       }
-       if (i == rio_table_hdr->num_scal_dev) {
-               printk(KERN_ERR "%s: Couldn't find owner Twister for Cyclone!\n", __func__);
-               return last_bus;
-       }
-
-       switch (rio_devs[wpeg_num]->type) {
-       case CompatWPEG:
-               /*
-                * The Compatibility Winnipeg controls the 2 legacy buses,
-                * the 66MHz PCI bus [2 slots] and the 2 "extra" buses in case
-                * a PCI-PCI bridge card is used in either slot: total 5 buses.
-                */
-               num_buses = 5;
-               break;
-       case AltWPEG:
-               /*
-                * The Alternate Winnipeg controls the 2 133MHz buses [1 slot
-                * each], their 2 "extra" buses, the 100MHz bus [2 slots] and
-                * the "extra" buses for each of those slots: total 7 buses.
-                */
-               num_buses = 7;
-               break;
-       case LookOutAWPEG:
-       case LookOutBWPEG:
-               /*
-                * A Lookout Winnipeg controls 3 100MHz buses [2 slots each]
-                * & the "extra" buses for each of those slots: total 9 buses.
-                */
-               num_buses = 9;
-               break;
-       default:
-               printk(KERN_INFO "%s: Unsupported Winnipeg type!\n", __func__);
-               return last_bus;
-       }
-
-       for (bus = last_bus; bus < last_bus + num_buses; bus++)
-               mp_bus_id_to_node[bus] = node;
-       return bus;
-}
-
-static int __init build_detail_arrays(void)
-{
-       unsigned long ptr;
-       int i, scal_detail_size, rio_detail_size;
-
-       if (rio_table_hdr->num_scal_dev > MAX_NUMNODES) {
-               printk(KERN_WARNING "%s: MAX_NUMNODES too low!  Defined as %d, but system has %d nodes.\n", __func__, MAX_NUMNODES, rio_table_hdr->num_scal_dev);
-               return 0;
-       }
-
-       switch (rio_table_hdr->version) {
-       default:
-               printk(KERN_WARNING "%s: Invalid Rio Grande Table Version: %d\n", __func__, rio_table_hdr->version);
-               return 0;
-       case 2:
-               scal_detail_size = 11;
-               rio_detail_size = 13;
-               break;
-       case 3:
-               scal_detail_size = 12;
-               rio_detail_size = 15;
-               break;
-       }
-
-       ptr = (unsigned long)rio_table_hdr + 3;
-       for (i = 0; i < rio_table_hdr->num_scal_dev; i++, ptr += scal_detail_size)
-               scal_devs[i] = (struct scal_detail *)ptr;
-
-       for (i = 0; i < rio_table_hdr->num_rio_dev; i++, ptr += rio_detail_size)
-               rio_devs[i] = (struct rio_detail *)ptr;
-
-       return 1;
-}
-
-void __init setup_summit(void)
-{
-       unsigned long           ptr;
-       unsigned short          offset;
-       int                     i, next_wpeg, next_bus = 0;
-
-       /* The pointer to the EBDA is stored in the word @ phys 0x40E(40:0E) */
-       ptr = get_bios_ebda();
-       ptr = (unsigned long)phys_to_virt(ptr);
-
-       rio_table_hdr = NULL;
-       offset = 0x180;
-       while (offset) {
-               /* The block id is stored in the 2nd word */
-               if (*((unsigned short *)(ptr + offset + 2)) == 0x4752) {
-                       /* set the pointer past the offset & block id */
-                       rio_table_hdr = (struct rio_table_hdr *)(ptr + offset + 4);
-                       break;
-               }
-               /* The next offset is stored in the 1st word.  0 means no more */
-               offset = *((unsigned short *)(ptr + offset));
-       }
-       if (!rio_table_hdr) {
-               printk(KERN_ERR "%s: Unable to locate Rio Grande Table in EBDA - bailing!\n", __func__);
-               return;
-       }
-
-       if (!build_detail_arrays())
-               return;
-
-       /* The first Winnipeg we're looking for has an index of 0 */
-       next_wpeg = 0;
-       do {
-               for (i = 0; i < rio_table_hdr->num_rio_dev; i++) {
-                       if (is_WPEG(rio_devs[i]) && rio_devs[i]->WP_index == next_wpeg) {
-                               /* It's the Winnipeg we're looking for! */
-                               next_bus = setup_pci_node_map_for_wpeg(i, next_bus);
-                               next_wpeg++;
-                               break;
-                       }
-               }
-               /*
-                * If we go through all Rio devices and don't find one with
-                * the next index, it means we've found all the Winnipegs,
-                * and thus all the PCI buses.
-                */
-               if (i == rio_table_hdr->num_rio_dev)
-                       next_wpeg = 0;
-       } while (next_wpeg != 0);
-}
index e2e86a08f31d29a52fb116ba8d3befcc2424d8ae..3bdb64829b82718f42bafed884f55d1dbe9e65a2 100644 (file)
@@ -1,7 +1,7 @@
 ENTRY(sys_call_table)
        .long sys_restart_syscall       /* 0 - old "setup()" system call, used for restarting */
        .long sys_exit
-       .long sys_fork
+       .long ptregs_fork
        .long sys_read
        .long sys_write
        .long sys_open          /* 5 */
@@ -10,7 +10,7 @@ ENTRY(sys_call_table)
        .long sys_creat
        .long sys_link
        .long sys_unlink        /* 10 */
-       .long sys_execve
+       .long ptregs_execve
        .long sys_chdir
        .long sys_time
        .long sys_mknod
@@ -109,17 +109,17 @@ ENTRY(sys_call_table)
        .long sys_newlstat
        .long sys_newfstat
        .long sys_uname
-       .long sys_iopl          /* 110 */
+       .long ptregs_iopl       /* 110 */
        .long sys_vhangup
        .long sys_ni_syscall    /* old "idle" system call */
-       .long sys_vm86old
+       .long ptregs_vm86old
        .long sys_wait4
        .long sys_swapoff       /* 115 */
        .long sys_sysinfo
        .long sys_ipc
        .long sys_fsync
-       .long sys_sigreturn
-       .long sys_clone         /* 120 */
+       .long ptregs_sigreturn
+       .long ptregs_clone      /* 120 */
        .long sys_setdomainname
        .long sys_newuname
        .long sys_modify_ldt
@@ -165,14 +165,14 @@ ENTRY(sys_call_table)
        .long sys_mremap
        .long sys_setresuid16
        .long sys_getresuid16   /* 165 */
-       .long sys_vm86
+       .long ptregs_vm86
        .long sys_ni_syscall    /* Old sys_query_module */
        .long sys_poll
        .long sys_nfsservctl
        .long sys_setresgid16   /* 170 */
        .long sys_getresgid16
        .long sys_prctl
-       .long sys_rt_sigreturn
+       .long ptregs_rt_sigreturn
        .long sys_rt_sigaction
        .long sys_rt_sigprocmask        /* 175 */
        .long sys_rt_sigpending
@@ -185,11 +185,11 @@ ENTRY(sys_call_table)
        .long sys_getcwd
        .long sys_capget
        .long sys_capset        /* 185 */
-       .long sys_sigaltstack
+       .long ptregs_sigaltstack
        .long sys_sendfile
        .long sys_ni_syscall    /* reserved for streams1 */
        .long sys_ni_syscall    /* reserved for streams2 */
-       .long sys_vfork         /* 190 */
+       .long ptregs_vfork      /* 190 */
        .long sys_getrlimit
        .long sys_mmap2
        .long sys_truncate64
index 3985cac0ed4747519cfc1ea25032ac1c79ccae76..5c5d87f0b2e1939988c3d642ea08cbbccd6a828a 100644 (file)
 #include <linux/time.h>
 #include <linux/mca.h>
 
-#include <asm/arch_hooks.h>
+#include <asm/setup.h>
 #include <asm/hpet.h>
 #include <asm/time.h>
 #include <asm/timer.h>
 
-#include "do_timer.h"
+#include <asm/do_timer.h>
 
 int timer_ack;
 
@@ -118,7 +118,7 @@ void __init hpet_time_init(void)
 {
        if (!hpet_enable())
                setup_pit_timer();
-       time_init_hook();
+       x86_quirk_time_init();
 }
 
 /*
@@ -131,7 +131,7 @@ void __init hpet_time_init(void)
  */
 void __init time_init(void)
 {
-       pre_time_init_hook();
+       x86_quirk_pre_time_init();
        tsc_init();
        late_time_init = choose_time_init();
 }
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c
deleted file mode 100644 (file)
index ce50546..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-#include <linux/spinlock.h>
-#include <linux/cpu.h>
-#include <linux/interrupt.h>
-
-#include <asm/tlbflush.h>
-
-DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate)
-                       ____cacheline_aligned = { &init_mm, 0, };
-
-/* must come after the send_IPI functions above for inlining */
-#include <mach_ipi.h>
-
-/*
- *     Smarter SMP flushing macros.
- *             c/o Linus Torvalds.
- *
- *     These mean you can really definitely utterly forget about
- *     writing to user space from interrupts. (Its not allowed anyway).
- *
- *     Optimizations Manfred Spraul <manfred@colorfullife.com>
- */
-
-static cpumask_t flush_cpumask;
-static struct mm_struct *flush_mm;
-static unsigned long flush_va;
-static DEFINE_SPINLOCK(tlbstate_lock);
-
-/*
- * We cannot call mmdrop() because we are in interrupt context,
- * instead update mm->cpu_vm_mask.
- *
- * We need to reload %cr3 since the page tables may be going
- * away from under us..
- */
-void leave_mm(int cpu)
-{
-       BUG_ON(x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK);
-       cpu_clear(cpu, x86_read_percpu(cpu_tlbstate.active_mm)->cpu_vm_mask);
-       load_cr3(swapper_pg_dir);
-}
-EXPORT_SYMBOL_GPL(leave_mm);
-
-/*
- *
- * The flush IPI assumes that a thread switch happens in this order:
- * [cpu0: the cpu that switches]
- * 1) switch_mm() either 1a) or 1b)
- * 1a) thread switch to a different mm
- * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
- *     Stop ipi delivery for the old mm. This is not synchronized with
- *     the other cpus, but smp_invalidate_interrupt ignore flush ipis
- *     for the wrong mm, and in the worst case we perform a superfluous
- *     tlb flush.
- * 1a2) set cpu_tlbstate to TLBSTATE_OK
- *     Now the smp_invalidate_interrupt won't call leave_mm if cpu0
- *     was in lazy tlb mode.
- * 1a3) update cpu_tlbstate[].active_mm
- *     Now cpu0 accepts tlb flushes for the new mm.
- * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
- *     Now the other cpus will send tlb flush ipis.
- * 1a4) change cr3.
- * 1b) thread switch without mm change
- *     cpu_tlbstate[].active_mm is correct, cpu0 already handles
- *     flush ipis.
- * 1b1) set cpu_tlbstate to TLBSTATE_OK
- * 1b2) test_and_set the cpu bit in cpu_vm_mask.
- *     Atomically set the bit [other cpus will start sending flush ipis],
- *     and test the bit.
- * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
- * 2) switch %%esp, ie current
- *
- * The interrupt must handle 2 special cases:
- * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
- * - the cpu performs speculative tlb reads, i.e. even if the cpu only
- *   runs in kernel space, the cpu could load tlb entries for user space
- *   pages.
- *
- * The good news is that cpu_tlbstate is local to each cpu, no
- * write/read ordering problems.
- */
-
-/*
- * TLB flush IPI:
- *
- * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
- * 2) Leave the mm if we are in the lazy tlb mode.
- */
-
-void smp_invalidate_interrupt(struct pt_regs *regs)
-{
-       unsigned long cpu;
-
-       cpu = get_cpu();
-
-       if (!cpu_isset(cpu, flush_cpumask))
-               goto out;
-               /*
-                * This was a BUG() but until someone can quote me the
-                * line from the intel manual that guarantees an IPI to
-                * multiple CPUs is retried _only_ on the erroring CPUs
-                * its staying as a return
-                *
-                * BUG();
-                */
-
-       if (flush_mm == x86_read_percpu(cpu_tlbstate.active_mm)) {
-               if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_OK) {
-                       if (flush_va == TLB_FLUSH_ALL)
-                               local_flush_tlb();
-                       else
-                               __flush_tlb_one(flush_va);
-               } else
-                       leave_mm(cpu);
-       }
-       ack_APIC_irq();
-       smp_mb__before_clear_bit();
-       cpu_clear(cpu, flush_cpumask);
-       smp_mb__after_clear_bit();
-out:
-       put_cpu_no_resched();
-       inc_irq_stat(irq_tlb_count);
-}
-
-void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
-                            unsigned long va)
-{
-       cpumask_t cpumask = *cpumaskp;
-
-       /*
-        * A couple of (to be removed) sanity checks:
-        *
-        * - current CPU must not be in mask
-        * - mask must exist :)
-        */
-       BUG_ON(cpus_empty(cpumask));
-       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-       BUG_ON(!mm);
-
-#ifdef CONFIG_HOTPLUG_CPU
-       /* If a CPU which we ran on has gone down, OK. */
-       cpus_and(cpumask, cpumask, cpu_online_map);
-       if (unlikely(cpus_empty(cpumask)))
-               return;
-#endif
-
-       /*
-        * i'm not happy about this global shared spinlock in the
-        * MM hot path, but we'll see how contended it is.
-        * AK: x86-64 has a faster method that could be ported.
-        */
-       spin_lock(&tlbstate_lock);
-
-       flush_mm = mm;
-       flush_va = va;
-       cpus_or(flush_cpumask, cpumask, flush_cpumask);
-
-       /*
-        * Make the above memory operations globally visible before
-        * sending the IPI.
-        */
-       smp_mb();
-       /*
-        * We have to send the IPI only to
-        * CPUs affected.
-        */
-       send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
-
-       while (!cpus_empty(flush_cpumask))
-               /* nothing. lockup detection does not belong here */
-               cpu_relax();
-
-       flush_mm = NULL;
-       flush_va = 0;
-       spin_unlock(&tlbstate_lock);
-}
-
-void flush_tlb_current_task(void)
-{
-       struct mm_struct *mm = current->mm;
-       cpumask_t cpu_mask;
-
-       preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-
-       local_flush_tlb();
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-       preempt_enable();
-}
-
-void flush_tlb_mm(struct mm_struct *mm)
-{
-       cpumask_t cpu_mask;
-
-       preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-
-       if (current->active_mm == mm) {
-               if (current->mm)
-                       local_flush_tlb();
-               else
-                       leave_mm(smp_processor_id());
-       }
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-
-       preempt_enable();
-}
-
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       cpumask_t cpu_mask;
-
-       preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-
-       if (current->active_mm == mm) {
-               if (current->mm)
-                       __flush_tlb_one(va);
-                else
-                       leave_mm(smp_processor_id());
-       }
-
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, va);
-
-       preempt_enable();
-}
-EXPORT_SYMBOL(flush_tlb_page);
-
-static void do_flush_tlb_all(void *info)
-{
-       unsigned long cpu = smp_processor_id();
-
-       __flush_tlb_all();
-       if (x86_read_percpu(cpu_tlbstate.state) == TLBSTATE_LAZY)
-               leave_mm(cpu);
-}
-
-void flush_tlb_all(void)
-{
-       on_each_cpu(do_flush_tlb_all, NULL, 1);
-}
-
-void reset_lazy_tlbstate(void)
-{
-       int cpu = raw_smp_processor_id();
-
-       per_cpu(cpu_tlbstate, cpu).state = 0;
-       per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
-}
-
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c
deleted file mode 100644 (file)
index f8be6f1..0000000
+++ /dev/null
@@ -1,284 +0,0 @@
-#include <linux/init.h>
-
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/smp.h>
-#include <linux/kernel_stat.h>
-#include <linux/mc146818rtc.h>
-#include <linux/interrupt.h>
-
-#include <asm/mtrr.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/mmu_context.h>
-#include <asm/proto.h>
-#include <asm/apicdef.h>
-#include <asm/idle.h>
-#include <asm/uv/uv_hub.h>
-#include <asm/uv/uv_bau.h>
-
-#include <mach_ipi.h>
-/*
- *     Smarter SMP flushing macros.
- *             c/o Linus Torvalds.
- *
- *     These mean you can really definitely utterly forget about
- *     writing to user space from interrupts. (Its not allowed anyway).
- *
- *     Optimizations Manfred Spraul <manfred@colorfullife.com>
- *
- *     More scalable flush, from Andi Kleen
- *
- *     To avoid global state use 8 different call vectors.
- *     Each CPU uses a specific vector to trigger flushes on other
- *     CPUs. Depending on the received vector the target CPUs look into
- *     the right per cpu variable for the flush data.
- *
- *     With more than 8 CPUs they are hashed to the 8 available
- *     vectors. The limited global vector space forces us to this right now.
- *     In future when interrupts are split into per CPU domains this could be
- *     fixed, at the cost of triggering multiple IPIs in some cases.
- */
-
-union smp_flush_state {
-       struct {
-               cpumask_t flush_cpumask;
-               struct mm_struct *flush_mm;
-               unsigned long flush_va;
-               spinlock_t tlbstate_lock;
-       };
-       char pad[SMP_CACHE_BYTES];
-} ____cacheline_aligned;
-
-/* State is put into the per CPU data section, but padded
-   to a full cache line because other CPUs can access it and we don't
-   want false sharing in the per cpu data segment. */
-static DEFINE_PER_CPU(union smp_flush_state, flush_state);
-
-/*
- * We cannot call mmdrop() because we are in interrupt context,
- * instead update mm->cpu_vm_mask.
- */
-void leave_mm(int cpu)
-{
-       if (read_pda(mmu_state) == TLBSTATE_OK)
-               BUG();
-       cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
-       load_cr3(swapper_pg_dir);
-}
-EXPORT_SYMBOL_GPL(leave_mm);
-
-/*
- *
- * The flush IPI assumes that a thread switch happens in this order:
- * [cpu0: the cpu that switches]
- * 1) switch_mm() either 1a) or 1b)
- * 1a) thread switch to a different mm
- * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
- *     Stop ipi delivery for the old mm. This is not synchronized with
- *     the other cpus, but smp_invalidate_interrupt ignore flush ipis
- *     for the wrong mm, and in the worst case we perform a superfluous
- *     tlb flush.
- * 1a2) set cpu mmu_state to TLBSTATE_OK
- *     Now the smp_invalidate_interrupt won't call leave_mm if cpu0
- *     was in lazy tlb mode.
- * 1a3) update cpu active_mm
- *     Now cpu0 accepts tlb flushes for the new mm.
- * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
- *     Now the other cpus will send tlb flush ipis.
- * 1a4) change cr3.
- * 1b) thread switch without mm change
- *     cpu active_mm is correct, cpu0 already handles
- *     flush ipis.
- * 1b1) set cpu mmu_state to TLBSTATE_OK
- * 1b2) test_and_set the cpu bit in cpu_vm_mask.
- *     Atomically set the bit [other cpus will start sending flush ipis],
- *     and test the bit.
- * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
- * 2) switch %%esp, ie current
- *
- * The interrupt must handle 2 special cases:
- * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
- * - the cpu performs speculative tlb reads, i.e. even if the cpu only
- *   runs in kernel space, the cpu could load tlb entries for user space
- *   pages.
- *
- * The good news is that cpu mmu_state is local to each cpu, no
- * write/read ordering problems.
- */
-
-/*
- * TLB flush IPI:
- *
- * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
- * 2) Leave the mm if we are in the lazy tlb mode.
- *
- * Interrupts are disabled.
- */
-
-asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
-{
-       int cpu;
-       int sender;
-       union smp_flush_state *f;
-
-       cpu = smp_processor_id();
-       /*
-        * orig_rax contains the negated interrupt vector.
-        * Use that to determine where the sender put the data.
-        */
-       sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
-       f = &per_cpu(flush_state, sender);
-
-       if (!cpu_isset(cpu, f->flush_cpumask))
-               goto out;
-               /*
-                * This was a BUG() but until someone can quote me the
-                * line from the intel manual that guarantees an IPI to
-                * multiple CPUs is retried _only_ on the erroring CPUs
-                * its staying as a return
-                *
-                * BUG();
-                */
-
-       if (f->flush_mm == read_pda(active_mm)) {
-               if (read_pda(mmu_state) == TLBSTATE_OK) {
-                       if (f->flush_va == TLB_FLUSH_ALL)
-                               local_flush_tlb();
-                       else
-                               __flush_tlb_one(f->flush_va);
-               } else
-                       leave_mm(cpu);
-       }
-out:
-       ack_APIC_irq();
-       cpu_clear(cpu, f->flush_cpumask);
-       inc_irq_stat(irq_tlb_count);
-}
-
-void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
-                            unsigned long va)
-{
-       int sender;
-       union smp_flush_state *f;
-       cpumask_t cpumask = *cpumaskp;
-
-       if (is_uv_system() && uv_flush_tlb_others(&cpumask, mm, va))
-               return;
-
-       /* Caller has disabled preemption */
-       sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
-       f = &per_cpu(flush_state, sender);
-
-       /*
-        * Could avoid this lock when
-        * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
-        * probably not worth checking this for a cache-hot lock.
-        */
-       spin_lock(&f->tlbstate_lock);
-
-       f->flush_mm = mm;
-       f->flush_va = va;
-       cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
-
-       /*
-        * Make the above memory operations globally visible before
-        * sending the IPI.
-        */
-       smp_mb();
-       /*
-        * We have to send the IPI only to
-        * CPUs affected.
-        */
-       send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
-
-       while (!cpus_empty(f->flush_cpumask))
-               cpu_relax();
-
-       f->flush_mm = NULL;
-       f->flush_va = 0;
-       spin_unlock(&f->tlbstate_lock);
-}
-
-static int __cpuinit init_smp_flush(void)
-{
-       int i;
-
-       for_each_possible_cpu(i)
-               spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
-
-       return 0;
-}
-core_initcall(init_smp_flush);
-
-void flush_tlb_current_task(void)
-{
-       struct mm_struct *mm = current->mm;
-       cpumask_t cpu_mask;
-
-       preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-
-       local_flush_tlb();
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-       preempt_enable();
-}
-
-void flush_tlb_mm(struct mm_struct *mm)
-{
-       cpumask_t cpu_mask;
-
-       preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-
-       if (current->active_mm == mm) {
-               if (current->mm)
-                       local_flush_tlb();
-               else
-                       leave_mm(smp_processor_id());
-       }
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-
-       preempt_enable();
-}
-
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       cpumask_t cpu_mask;
-
-       preempt_disable();
-       cpu_mask = mm->cpu_vm_mask;
-       cpu_clear(smp_processor_id(), cpu_mask);
-
-       if (current->active_mm == mm) {
-               if (current->mm)
-                       __flush_tlb_one(va);
-               else
-                       leave_mm(smp_processor_id());
-       }
-
-       if (!cpus_empty(cpu_mask))
-               flush_tlb_others(cpu_mask, mm, va);
-
-       preempt_enable();
-}
-
-static void do_flush_tlb_all(void *info)
-{
-       unsigned long cpu = smp_processor_id();
-
-       __flush_tlb_all();
-       if (read_pda(mmu_state) == TLBSTATE_LAZY)
-               leave_mm(cpu);
-}
-
-void flush_tlb_all(void)
-{
-       on_each_cpu(do_flush_tlb_all, NULL, 1);
-}
index 6812b829ed83e86a8c0919e8c4daaadd2516cc12..d038b9c45cf89597e10ab05d1814b2557b606994 100644 (file)
 #include <linux/kernel.h>
 
 #include <asm/mmu_context.h>
+#include <asm/uv/uv.h>
 #include <asm/uv/uv_mmrs.h>
 #include <asm/uv/uv_hub.h>
 #include <asm/uv/uv_bau.h>
-#include <asm/genapic.h>
+#include <asm/apic.h>
 #include <asm/idle.h>
 #include <asm/tsc.h>
 #include <asm/irq_vectors.h>
 
-#include <mach_apic.h>
-
 static struct bau_control      **uv_bau_table_bases __read_mostly;
 static int                     uv_bau_retry_limit __read_mostly;
 
@@ -210,14 +209,15 @@ static int uv_wait_completion(struct bau_desc *bau_desc,
  *
  * Send a broadcast and wait for a broadcast message to complete.
  *
- * The cpumaskp mask contains the cpus the broadcast was sent to.
+ * The flush_mask contains the cpus the broadcast was sent to.
  *
- * Returns 1 if all remote flushing was done. The mask is zeroed.
- * Returns 0 if some remote flushing remains to be done. The mask is left
- * unchanged.
+ * Returns NULL if all remote flushing was done. The mask is zeroed.
+ * Returns @flush_mask if some remote flushing remains to be done. The
+ * mask will have some bits still set.
  */
-int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
-                          cpumask_t *cpumaskp)
+const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
+                                            struct bau_desc *bau_desc,
+                                            struct cpumask *flush_mask)
 {
        int completion_status = 0;
        int right_shift;
@@ -257,66 +257,74 @@ int uv_flush_send_and_wait(int cpu, int this_blade, struct bau_desc *bau_desc,
                 * the cpu's, all of which are still in the mask.
                 */
                __get_cpu_var(ptcstats).ptc_i++;
-               return 0;
+               return flush_mask;
        }
 
        /*
         * Success, so clear the remote cpu's from the mask so we don't
         * use the IPI method of shootdown on them.
         */
-       for_each_cpu_mask(bit, *cpumaskp) {
+       for_each_cpu(bit, flush_mask) {
                blade = uv_cpu_to_blade_id(bit);
                if (blade == this_blade)
                        continue;
-               cpu_clear(bit, *cpumaskp);
+               cpumask_clear_cpu(bit, flush_mask);
        }
-       if (!cpus_empty(*cpumaskp))
-               return 0;
-       return 1;
+       if (!cpumask_empty(flush_mask))
+               return flush_mask;
+       return NULL;
 }
 
 /**
  * uv_flush_tlb_others - globally purge translation cache of a virtual
  * address or all TLB's
- * @cpumaskp: mask of all cpu's in which the address is to be removed
+ * @cpumask: mask of all cpu's in which the address is to be removed
  * @mm: mm_struct containing virtual address range
  * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
+ * @cpu: the current cpu
  *
  * This is the entry point for initiating any UV global TLB shootdown.
  *
  * Purges the translation caches of all specified processors of the given
  * virtual address, or purges all TLB's on specified processors.
  *
- * The caller has derived the cpumaskp from the mm_struct and has subtracted
- * the local cpu from the mask.  This function is called only if there
- * are bits set in the mask. (e.g. flush_tlb_page())
+ * The caller has derived the cpumask from the mm_struct.  This function
+ * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
  *
- * The cpumaskp is converted into a nodemask of the nodes containing
+ * The cpumask is converted into a nodemask of the nodes containing
  * the cpus.
  *
- * Returns 1 if all remote flushing was done.
- * Returns 0 if some remote flushing remains to be done.
+ * Note that this function should be called with preemption disabled.
+ *
+ * Returns NULL if all remote flushing was done.
+ * Returns pointer to cpumask if some remote flushing remains to be
+ * done.  The returned pointer is valid till preemption is re-enabled.
  */
-int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
-                       unsigned long va)
+const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
+                                         struct mm_struct *mm,
+                                         unsigned long va, unsigned int cpu)
 {
+       static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
+       struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
        int i;
        int bit;
        int blade;
-       int cpu;
+       int uv_cpu;
        int this_blade;
        int locals = 0;
        struct bau_desc *bau_desc;
 
-       cpu = uv_blade_processor_id();
+       cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
+
+       uv_cpu = uv_blade_processor_id();
        this_blade = uv_numa_blade_id();
        bau_desc = __get_cpu_var(bau_control).descriptor_base;
-       bau_desc += UV_ITEMS_PER_DESCRIPTOR * cpu;
+       bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
 
        bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
 
        i = 0;
-       for_each_cpu_mask(bit, *cpumaskp) {
+       for_each_cpu(bit, flush_mask) {
                blade = uv_cpu_to_blade_id(bit);
                BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
                if (blade == this_blade) {
@@ -331,17 +339,17 @@ int uv_flush_tlb_others(cpumask_t *cpumaskp, struct mm_struct *mm,
                 * no off_node flushing; return status for local node
                 */
                if (locals)
-                       return 0;
+                       return flush_mask;
                else
-                       return 1;
+                       return NULL;
        }
        __get_cpu_var(ptcstats).requestor++;
        __get_cpu_var(ptcstats).ntargeted += i;
 
        bau_desc->payload.address = va;
-       bau_desc->payload.sending_cpu = smp_processor_id();
+       bau_desc->payload.sending_cpu = cpu;
 
-       return uv_flush_send_and_wait(cpu, this_blade, bau_desc, cpumaskp);
+       return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask);
 }
 
 /*
index d8ccc3c6552f60f85bd50de1d4e6c68548261ae7..66d874e5404cec797dbdab2fae1a84558ecbc50a 100644 (file)
@@ -29,7 +29,7 @@
 
 #include <linux/linkage.h>
 #include <asm/segment.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 
 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
 #ifndef CONFIG_HOTPLUG_CPU
index 894293c598db62fe6b23a365d048c8a6225a8285..cddfb8d386b927bce74e8b82e78a23aeaa83c04a 100644 (file)
  */
 
 #include <linux/linkage.h>
-#include <asm/pgtable.h>
-#include <asm/page.h>
+#include <asm/pgtable_types.h>
+#include <asm/page_types.h>
 #include <asm/msr.h>
 #include <asm/segment.h>
+#include <asm/processor-flags.h>
 
 .section .rodata, "a", @progbits
 
@@ -37,7 +38,7 @@
 ENTRY(trampoline_data)
 r_base = .
        cli                     # We should be safe anyway
-       wbinvd  
+       wbinvd
        mov     %cs, %ax        # Code and data in the same place
        mov     %ax, %ds
        mov     %ax, %es
@@ -73,9 +74,8 @@ r_base = .
        lidtl   tidt - r_base   # load idt with 0, 0
        lgdtl   tgdt - r_base   # load gdt with whatever is appropriate
 
-       xor     %ax, %ax
-       inc     %ax             # protected mode (PE) bit
-       lmsw    %ax             # into protected mode
+       mov     $X86_CR0_PE, %ax        # protected mode (PE) bit
+       lmsw    %ax                     # into protected mode
 
        # flush prefetch and jump to startup_32
        ljmpl   *(startup_32_vector - r_base)
@@ -86,9 +86,8 @@ startup_32:
        movl    $__KERNEL_DS, %eax      # Initialize the %ds segment register
        movl    %eax, %ds
 
-       xorl    %eax, %eax
-       btsl    $5, %eax                # Enable PAE mode
-       movl    %eax, %cr4
+       movl    $X86_CR4_PAE, %eax
+       movl    %eax, %cr4              # Enable PAE mode
 
                                        # Setup trampoline 4 level pagetables
        leal    (trampoline_level4_pgt - r_base)(%esi), %eax
@@ -99,9 +98,9 @@ startup_32:
        xorl    %edx, %edx
        wrmsr
 
-       xorl    %eax, %eax
-       btsl    $31, %eax               # Enable paging and in turn activate Long Mode
-       btsl    $0, %eax                # Enable protected mode
+       # Enable paging and in turn activate Long Mode
+       # Enable protected mode
+       movl    $(X86_CR0_PG | X86_CR0_PE), %eax
        movl    %eax, %cr0
 
        /*
index a9e7548e17906f885f28e4763128e749c7a76481..a1d288327ff0ff0f152241bf296c742778f61e00 100644 (file)
 #include <asm/desc.h>
 #include <asm/i387.h>
 
-#include <mach_traps.h>
+#include <asm/mach_traps.h>
 
 #ifdef CONFIG_X86_64
 #include <asm/pgalloc.h>
 #include <asm/proto.h>
-#include <asm/pda.h>
 #else
 #include <asm/processor-flags.h>
-#include <asm/arch_hooks.h>
+#include <asm/setup.h>
 #include <asm/traps.h>
 
 #include "cpu/mcheck/mce.h"
@@ -119,47 +118,6 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
        if (!user_mode_vm(regs))
                die(str, regs, err);
 }
-
-/*
- * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
- * invalid offset set (the LAZY one) and the faulting thread has
- * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
- * we set the offset field correctly and return 1.
- */
-static int lazy_iobitmap_copy(void)
-{
-       struct thread_struct *thread;
-       struct tss_struct *tss;
-       int cpu;
-
-       cpu = get_cpu();
-       tss = &per_cpu(init_tss, cpu);
-       thread = &current->thread;
-
-       if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
-           thread->io_bitmap_ptr) {
-               memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
-                      thread->io_bitmap_max);
-               /*
-                * If the previously set map was extending to higher ports
-                * than the current one, pad extra space with 0xff (no access).
-                */
-               if (thread->io_bitmap_max < tss->io_bitmap_max) {
-                       memset((char *) tss->io_bitmap +
-                               thread->io_bitmap_max, 0xff,
-                               tss->io_bitmap_max - thread->io_bitmap_max);
-               }
-               tss->io_bitmap_max = thread->io_bitmap_max;
-               tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
-               tss->io_bitmap_owner = thread;
-               put_cpu();
-
-               return 1;
-       }
-       put_cpu();
-
-       return 0;
-}
 #endif
 
 static void __kprobes
@@ -310,11 +268,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
        conditional_sti(regs);
 
 #ifdef CONFIG_X86_32
-       if (lazy_iobitmap_copy()) {
-               /* restart the faulting instruction */
-               return;
-       }
-
        if (regs->flags & X86_VM_MASK)
                goto gp_in_vm86;
 #endif
@@ -914,19 +867,20 @@ void math_emulate(struct math_emu_info *info)
 }
 #endif /* CONFIG_MATH_EMULATION */
 
-dotraplinkage void __kprobes do_device_not_available(struct pt_regs regs)
+dotraplinkage void __kprobes
+do_device_not_available(struct pt_regs *regs, long error_code)
 {
 #ifdef CONFIG_X86_32
        if (read_cr0() & X86_CR0_EM) {
                struct math_emu_info info = { };
 
-               conditional_sti(&regs);
+               conditional_sti(regs);
 
-               info.regs = &regs;
+               info.regs = regs;
                math_emulate(&info);
        } else {
                math_state_restore(); /* interrupts still off */
-               conditional_sti(&regs);
+               conditional_sti(regs);
        }
 #else
        math_state_restore();
@@ -942,7 +896,7 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
        info.si_signo = SIGILL;
        info.si_errno = 0;
        info.si_code = ILL_BADSTK;
-       info.si_addr = 0;
+       info.si_addr = NULL;
        if (notify_die(DIE_TRAP, "iret exception",
                        regs, error_code, 32, SIGILL) == NOTIFY_STOP)
                return;
@@ -1026,6 +980,6 @@ void __init trap_init(void)
        cpu_init();
 
 #ifdef CONFIG_X86_32
-       trap_init_hook();
+       x86_quirk_trap_init();
 #endif
 }
index 599e58168631e22e5ff69e15c365273bd2f9b03c..83d53ce5d4c4a98aa703032f4f7390292a35536f 100644 (file)
@@ -773,7 +773,7 @@ __cpuinit int unsynchronized_tsc(void)
        if (!cpu_has_tsc || tsc_unstable)
                return 1;
 
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
        if (apic_is_clustered_box())
                return 1;
 #endif
diff --git a/arch/x86/kernel/uv_time.c b/arch/x86/kernel/uv_time.c
new file mode 100644 (file)
index 0000000..2ffb6c5
--- /dev/null
@@ -0,0 +1,393 @@
+/*
+ * SGI RTC clock/timer routines.
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ *
+ *  Copyright (c) 2009 Silicon Graphics, Inc.  All Rights Reserved.
+ *  Copyright (c) Dimitri Sivanich
+ */
+#include <linux/clockchips.h>
+
+#include <asm/uv/uv_mmrs.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+#include <asm/apic.h>
+#include <asm/cpu.h>
+
+#define RTC_NAME               "sgi_rtc"
+
+static cycle_t uv_read_rtc(void);
+static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
+static void uv_rtc_timer_setup(enum clock_event_mode,
+                               struct clock_event_device *);
+
+static struct clocksource clocksource_uv = {
+       .name           = RTC_NAME,
+       .rating         = 400,
+       .read           = uv_read_rtc,
+       .mask           = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK,
+       .shift          = 10,
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static struct clock_event_device clock_event_device_uv = {
+       .name           = RTC_NAME,
+       .features       = CLOCK_EVT_FEAT_ONESHOT,
+       .shift          = 20,
+       .rating         = 400,
+       .irq            = -1,
+       .set_next_event = uv_rtc_next_event,
+       .set_mode       = uv_rtc_timer_setup,
+       .event_handler  = NULL,
+};
+
+static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
+
+/* There is one of these allocated per node */
+struct uv_rtc_timer_head {
+       spinlock_t      lock;
+       /* next cpu waiting for timer, local node relative: */
+       int             next_cpu;
+       /* number of cpus on this node: */
+       int             ncpus;
+       struct {
+               int     lcpu;           /* systemwide logical cpu number */
+               u64     expires;        /* next timer expiration for this cpu */
+       } cpu[1];
+};
+
+/*
+ * Access to uv_rtc_timer_head via blade id.
+ */
+static struct uv_rtc_timer_head                **blade_info __read_mostly;
+
+static int                             uv_rtc_enable;
+
+/*
+ * Hardware interface routines
+ */
+
+/* Send IPIs to another node */
+static void uv_rtc_send_IPI(int cpu)
+{
+       unsigned long apicid, val;
+       int pnode;
+
+       apicid = cpu_physical_id(cpu);
+       pnode = uv_apicid_to_pnode(apicid);
+       val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+             (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
+             (GENERIC_INTERRUPT_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
+
+       uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+}
+
+/* Check for an RTC interrupt pending */
+static int uv_intr_pending(int pnode)
+{
+       return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED0) &
+               UVH_EVENT_OCCURRED0_RTC1_MASK;
+}
+
+/* Setup interrupt and return non-zero if early expiration occurred. */
+static int uv_setup_intr(int cpu, u64 expires)
+{
+       u64 val;
+       int pnode = uv_cpu_to_pnode(cpu);
+
+       uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
+               UVH_RTC1_INT_CONFIG_M_MASK);
+       uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
+
+       uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED0_ALIAS,
+               UVH_EVENT_OCCURRED0_RTC1_MASK);
+
+       val = (GENERIC_INTERRUPT_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
+               ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
+
+       /* Set configuration */
+       uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
+       /* Initialize comparator value */
+       uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
+
+       return (expires < uv_read_rtc() && !uv_intr_pending(pnode));
+}
+
+/*
+ * Per-cpu timer tracking routines
+ */
+
+static __init void uv_rtc_deallocate_timers(void)
+{
+       int bid;
+
+       for_each_possible_blade(bid) {
+               kfree(blade_info[bid]);
+       }
+       kfree(blade_info);
+}
+
+/* Allocate per-node list of cpu timer expiration times. */
+static __init int uv_rtc_allocate_timers(void)
+{
+       int cpu;
+
+       blade_info = kmalloc(uv_possible_blades * sizeof(void *), GFP_KERNEL);
+       if (!blade_info)
+               return -ENOMEM;
+       memset(blade_info, 0, uv_possible_blades * sizeof(void *));
+
+       for_each_present_cpu(cpu) {
+               int nid = cpu_to_node(cpu);
+               int bid = uv_cpu_to_blade_id(cpu);
+               int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+               struct uv_rtc_timer_head *head = blade_info[bid];
+
+               if (!head) {
+                       head = kmalloc_node(sizeof(struct uv_rtc_timer_head) +
+                               (uv_blade_nr_possible_cpus(bid) *
+                                       2 * sizeof(u64)),
+                               GFP_KERNEL, nid);
+                       if (!head) {
+                               uv_rtc_deallocate_timers();
+                               return -ENOMEM;
+                       }
+                       spin_lock_init(&head->lock);
+                       head->ncpus = uv_blade_nr_possible_cpus(bid);
+                       head->next_cpu = -1;
+                       blade_info[bid] = head;
+               }
+
+               head->cpu[bcpu].lcpu = cpu;
+               head->cpu[bcpu].expires = ULLONG_MAX;
+       }
+
+       return 0;
+}
+
+/* Find and set the next expiring timer.  */
+static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
+{
+       u64 lowest = ULLONG_MAX;
+       int c, bcpu = -1;
+
+       head->next_cpu = -1;
+       for (c = 0; c < head->ncpus; c++) {
+               u64 exp = head->cpu[c].expires;
+               if (exp < lowest) {
+                       bcpu = c;
+                       lowest = exp;
+               }
+       }
+       if (bcpu >= 0) {
+               head->next_cpu = bcpu;
+               c = head->cpu[bcpu].lcpu;
+               if (uv_setup_intr(c, lowest))
+                       /* If we didn't set it up in time, trigger */
+                       uv_rtc_send_IPI(c);
+       } else {
+               uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
+                       UVH_RTC1_INT_CONFIG_M_MASK);
+       }
+}
+
+/*
+ * Set expiration time for current cpu.
+ *
+ * Returns 1 if we missed the expiration time.
+ */
+static int uv_rtc_set_timer(int cpu, u64 expires)
+{
+       int pnode = uv_cpu_to_pnode(cpu);
+       int bid = uv_cpu_to_blade_id(cpu);
+       struct uv_rtc_timer_head *head = blade_info[bid];
+       int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+       u64 *t = &head->cpu[bcpu].expires;
+       unsigned long flags;
+       int next_cpu;
+
+       spin_lock_irqsave(&head->lock, flags);
+
+       next_cpu = head->next_cpu;
+       *t = expires;
+       /* Will this one be next to go off? */
+       if (next_cpu < 0 || bcpu == next_cpu ||
+                       expires < head->cpu[next_cpu].expires) {
+               head->next_cpu = bcpu;
+               if (uv_setup_intr(cpu, expires)) {
+                       *t = ULLONG_MAX;
+                       uv_rtc_find_next_timer(head, pnode);
+                       spin_unlock_irqrestore(&head->lock, flags);
+                       return 1;
+               }
+       }
+
+       spin_unlock_irqrestore(&head->lock, flags);
+       return 0;
+}
+
+/*
+ * Unset expiration time for current cpu.
+ *
+ * Returns 1 if this timer was pending.
+ */
+static int uv_rtc_unset_timer(int cpu)
+{
+       int pnode = uv_cpu_to_pnode(cpu);
+       int bid = uv_cpu_to_blade_id(cpu);
+       struct uv_rtc_timer_head *head = blade_info[bid];
+       int bcpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+       u64 *t = &head->cpu[bcpu].expires;
+       unsigned long flags;
+       int rc = 0;
+
+       spin_lock_irqsave(&head->lock, flags);
+
+       if (head->next_cpu == bcpu && uv_read_rtc() >= *t)
+               rc = 1;
+
+       *t = ULLONG_MAX;
+
+       /* Was the hardware setup for this timer? */
+       if (head->next_cpu == bcpu)
+               uv_rtc_find_next_timer(head, pnode);
+
+       spin_unlock_irqrestore(&head->lock, flags);
+
+       return rc;
+}
+
+
+/*
+ * Kernel interface routines.
+ */
+
+/*
+ * Read the RTC.
+ */
+static cycle_t uv_read_rtc(void)
+{
+       return (cycle_t)uv_read_local_mmr(UVH_RTC);
+}
+
+/*
+ * Program the next event, relative to now
+ */
+static int uv_rtc_next_event(unsigned long delta,
+                            struct clock_event_device *ced)
+{
+       int ced_cpu = cpumask_first(ced->cpumask);
+
+       return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc());
+}
+
+/*
+ * Setup the RTC timer in oneshot mode
+ */
+static void uv_rtc_timer_setup(enum clock_event_mode mode,
+                              struct clock_event_device *evt)
+{
+       int ced_cpu = cpumask_first(evt->cpumask);
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+       case CLOCK_EVT_MODE_ONESHOT:
+       case CLOCK_EVT_MODE_RESUME:
+               /* Nothing to do here yet */
+               break;
+       case CLOCK_EVT_MODE_UNUSED:
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               uv_rtc_unset_timer(ced_cpu);
+               break;
+       }
+}
+
+static void uv_rtc_interrupt(void)
+{
+       struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
+       int cpu = smp_processor_id();
+
+       if (!ced || !ced->event_handler)
+               return;
+
+       if (uv_rtc_unset_timer(cpu) != 1)
+               return;
+
+       ced->event_handler(ced);
+}
+
+static int __init uv_enable_rtc(char *str)
+{
+       uv_rtc_enable = 1;
+
+       return 1;
+}
+__setup("uvrtc", uv_enable_rtc);
+
+static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
+{
+       struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
+
+       *ced = clock_event_device_uv;
+       ced->cpumask = cpumask_of(smp_processor_id());
+       clockevents_register_device(ced);
+}
+
+static __init int uv_rtc_setup_clock(void)
+{
+       int rc;
+
+       if (!uv_rtc_enable || !is_uv_system() || generic_interrupt_extension)
+               return -ENODEV;
+
+       generic_interrupt_extension = uv_rtc_interrupt;
+
+       clocksource_uv.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
+                               clocksource_uv.shift);
+
+       rc = clocksource_register(&clocksource_uv);
+       if (rc) {
+               generic_interrupt_extension = NULL;
+               return rc;
+       }
+
+       /* Setup and register clockevents */
+       rc = uv_rtc_allocate_timers();
+       if (rc) {
+               clocksource_unregister(&clocksource_uv);
+               generic_interrupt_extension = NULL;
+               return rc;
+       }
+
+       clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
+                               NSEC_PER_SEC, clock_event_device_uv.shift);
+
+       clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
+                                               sn_rtc_cycles_per_second;
+
+       clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
+                               (NSEC_PER_SEC / sn_rtc_cycles_per_second);
+
+       rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
+       if (rc) {
+               clocksource_unregister(&clocksource_uv);
+               generic_interrupt_extension = NULL;
+               uv_rtc_deallocate_timers();
+       }
+
+       return rc;
+}
+arch_initcall(uv_rtc_setup_clock);
index d801d06af068ff1a3a752380827348f3f57e79e2..31ffc24eec4d6f7e3a84cd7d5a11cc8a5b6e18d2 100644 (file)
 
 #include <asm/visws/cobalt.h>
 #include <asm/visws/piix4.h>
-#include <asm/arch_hooks.h>
 #include <asm/io_apic.h>
 #include <asm/fixmap.h>
 #include <asm/reboot.h>
 #include <asm/setup.h>
+#include <asm/apic.h>
 #include <asm/e820.h>
 #include <asm/io.h>
 
-#include <mach_ipi.h>
-
-#include "mach_apic.h"
-
 #include <linux/kernel_stat.h>
 
 #include <asm/i8259.h>
@@ -49,8 +45,6 @@
 
 extern int no_broadcast;
 
-#include <asm/apic.h>
-
 char visws_board_type  = -1;
 char visws_board_rev   = -1;
 
@@ -200,7 +194,7 @@ static void __init MP_processor_info(struct mpc_cpu *m)
                return;
        }
 
-       apic_cpus = apicid_to_cpu_present(m->apicid);
+       apic_cpus = apic->apicid_to_cpu_present(m->apicid);
        physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus);
        /*
         * Validate version
@@ -584,7 +578,7 @@ static struct irq_chip piix4_virtual_irq_type = {
 static irqreturn_t piix4_master_intr(int irq, void *dev_id)
 {
        int realirq;
-       irq_desc_t *desc;
+       struct irq_desc *desc;
        unsigned long flags;
 
        spin_lock_irqsave(&i8259A_lock, flags);
index 4eeb5cf9720d2e7b7099d145e6c87a0c771fed1a..d7ac84e7fc1c73f7cfcfd8ca6b66c5abed2219ce 100644 (file)
@@ -158,7 +158,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
        ret = KVM86->regs32;
 
        ret->fs = current->thread.saved_fs;
-       loadsegment(gs, current->thread.saved_gs);
+       set_user_gs(ret, current->thread.saved_gs);
 
        return ret;
 }
@@ -197,9 +197,9 @@ out:
 static int do_vm86_irq_handling(int subfunction, int irqnumber);
 static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk);
 
-asmlinkage int sys_vm86old(struct pt_regs regs)
+int sys_vm86old(struct pt_regs *regs)
 {
-       struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs.bx;
+       struct vm86_struct __user *v86 = (struct vm86_struct __user *)regs->bx;
        struct kernel_vm86_struct info; /* declare this _on top_,
                                         * this avoids wasting of stack space.
                                         * This remains on the stack until we
@@ -218,7 +218,7 @@ asmlinkage int sys_vm86old(struct pt_regs regs)
        if (tmp)
                goto out;
        memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus);
-       info.regs32 = &regs;
+       info.regs32 = regs;
        tsk->thread.vm86_info = v86;
        do_sys_vm86(&info, tsk);
        ret = 0;        /* we never return here */
@@ -227,7 +227,7 @@ out:
 }
 
 
-asmlinkage int sys_vm86(struct pt_regs regs)
+int sys_vm86(struct pt_regs *regs)
 {
        struct kernel_vm86_struct info; /* declare this _on top_,
                                         * this avoids wasting of stack space.
@@ -239,12 +239,12 @@ asmlinkage int sys_vm86(struct pt_regs regs)
        struct vm86plus_struct __user *v86;
 
        tsk = current;
-       switch (regs.bx) {
+       switch (regs->bx) {
        case VM86_REQUEST_IRQ:
        case VM86_FREE_IRQ:
        case VM86_GET_IRQ_BITS:
        case VM86_GET_AND_RESET_IRQ:
-               ret = do_vm86_irq_handling(regs.bx, (int)regs.cx);
+               ret = do_vm86_irq_handling(regs->bx, (int)regs->cx);
                goto out;
        case VM86_PLUS_INSTALL_CHECK:
                /*
@@ -261,14 +261,14 @@ asmlinkage int sys_vm86(struct pt_regs regs)
        ret = -EPERM;
        if (tsk->thread.saved_sp0)
                goto out;
-       v86 = (struct vm86plus_struct __user *)regs.cx;
+       v86 = (struct vm86plus_struct __user *)regs->cx;
        tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
                                       offsetof(struct kernel_vm86_struct, regs32) -
                                       sizeof(info.regs));
        ret = -EFAULT;
        if (tmp)
                goto out;
-       info.regs32 = &regs;
+       info.regs32 = regs;
        info.vm86plus.is_vm86pus = 1;
        tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
        do_sys_vm86(&info, tsk);
@@ -323,7 +323,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
        info->regs32->ax = 0;
        tsk->thread.saved_sp0 = tsk->thread.sp0;
        tsk->thread.saved_fs = info->regs32->fs;
-       savesegment(gs, tsk->thread.saved_gs);
+       tsk->thread.saved_gs = get_user_gs(info->regs32);
 
        tss = &per_cpu(init_tss, get_cpu());
        tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
index bef58b4982dbc449696b624e24318cdf5b773926..2cc4a90e2cb3ae35e0b81425ef3f41e159f0a1a3 100644 (file)
@@ -680,10 +680,11 @@ static inline int __init activate_vmi(void)
        para_fill(pv_mmu_ops.write_cr2, SetCR2);
        para_fill(pv_mmu_ops.write_cr3, SetCR3);
        para_fill(pv_cpu_ops.write_cr4, SetCR4);
-       para_fill(pv_irq_ops.save_fl, GetInterruptMask);
-       para_fill(pv_irq_ops.restore_fl, SetInterruptMask);
-       para_fill(pv_irq_ops.irq_disable, DisableInterrupts);
-       para_fill(pv_irq_ops.irq_enable, EnableInterrupts);
+
+       para_fill(pv_irq_ops.save_fl.func, GetInterruptMask);
+       para_fill(pv_irq_ops.restore_fl.func, SetInterruptMask);
+       para_fill(pv_irq_ops.irq_disable.func, DisableInterrupts);
+       para_fill(pv_irq_ops.irq_enable.func, EnableInterrupts);
 
        para_fill(pv_cpu_ops.wbinvd, WBINVD);
        para_fill(pv_cpu_ops.read_tsc, RDTSC);
@@ -797,8 +798,8 @@ static inline int __init activate_vmi(void)
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
-       para_fill(apic_ops->read, APICRead);
-       para_fill(apic_ops->write, APICWrite);
+       para_fill(apic->read, APICRead);
+       para_fill(apic->write, APICWrite);
 #endif
 
        /*
index e5b088fffa40f56f1418179efcd89ba480bcae6d..33a788d5879c3186182201736330723fa3669164 100644 (file)
@@ -28,7 +28,6 @@
 
 #include <asm/vmi.h>
 #include <asm/vmi_time.h>
-#include <asm/arch_hooks.h>
 #include <asm/apicdef.h>
 #include <asm/apic.h>
 #include <asm/timer.h>
@@ -256,7 +255,7 @@ void __devinit vmi_time_bsp_init(void)
         */
        clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
        local_irq_disable();
-#ifdef CONFIG_X86_SMP
+#ifdef CONFIG_SMP
        /*
         * XXX handle_percpu_irq only defined for SMP; we need to switch over
         * to using it, since this is a local interrupt, which each CPU must
@@ -288,8 +287,7 @@ static struct clocksource clocksource_vmi;
 static cycle_t read_real_cycles(void)
 {
        cycle_t ret = (cycle_t)vmi_timer_ops.get_cycle_counter(VMI_CYCLES_REAL);
-       return ret >= clocksource_vmi.cycle_last ?
-               ret : clocksource_vmi.cycle_last;
+       return max(ret, clocksource_vmi.cycle_last);
 }
 
 static struct clocksource clocksource_vmi = {
index 82c67559dde7858f6bd5a433e38e595dc0cec7cf..0d860963f268f5a79c1198a6dffba0fee22f897d 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/thread_info.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/cache.h>
 #include <asm/boot.h>
 
@@ -178,14 +178,7 @@ SECTIONS
        __initramfs_end = .;
   }
 #endif
-  . = ALIGN(PAGE_SIZE);
-  .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {
-       __per_cpu_start = .;
-       *(.data.percpu.page_aligned)
-       *(.data.percpu)
-       *(.data.percpu.shared_aligned)
-       __per_cpu_end = .;
-  }
+  PERCPU(PAGE_SIZE)
   . = ALIGN(PAGE_SIZE);
   /* freed after init ends here */
 
index 1a614c0e6befcb5893dcb50dbc9cefac16907645..5bf54e40c6efb41cf31fc3d44017d5b9acf402ec 100644 (file)
@@ -5,7 +5,8 @@
 #define LOAD_OFFSET __START_KERNEL_map
 
 #include <asm-generic/vmlinux.lds.h>
-#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/page_types.h>
 
 #undef i386    /* in case the preprocessor is a 32bit one */
 
@@ -13,12 +14,15 @@ OUTPUT_FORMAT("elf64-x86-64", "elf64-x86-64", "elf64-x86-64")
 OUTPUT_ARCH(i386:x86-64)
 ENTRY(phys_startup_64)
 jiffies_64 = jiffies;
-_proxy_pda = 1;
 PHDRS {
        text PT_LOAD FLAGS(5);  /* R_E */
        data PT_LOAD FLAGS(7);  /* RWE */
        user PT_LOAD FLAGS(7);  /* RWE */
        data.init PT_LOAD FLAGS(7);     /* RWE */
+#ifdef CONFIG_SMP
+       percpu PT_LOAD FLAGS(7);        /* RWE */
+#endif
+       data.init2 PT_LOAD FLAGS(7);    /* RWE */
        note PT_NOTE FLAGS(0);  /* ___ */
 }
 SECTIONS
@@ -208,14 +212,28 @@ SECTIONS
   __initramfs_end = .;
 #endif
 
+#ifdef CONFIG_SMP
+  /*
+   * percpu offsets are zero-based on SMP.  PERCPU_VADDR() changes the
+   * output PHDR, so the next output section - __data_nosave - should
+   * start another section data.init2.  Also, pda should be at the head of
+   * percpu area.  Preallocate it and define the percpu offset symbol
+   * so that it can be accessed as a percpu variable.
+   */
+  . = ALIGN(PAGE_SIZE);
+  PERCPU_VADDR(0, :percpu)
+#else
   PERCPU(PAGE_SIZE)
+#endif
 
   . = ALIGN(PAGE_SIZE);
   __init_end = .;
 
   . = ALIGN(PAGE_SIZE);
   __nosave_begin = .;
-  .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { *(.data.nosave) }
+  .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
+      *(.data.nosave)
+  } :data.init2 /* use another section data.init2, see PERCPU_VADDR() above */
   . = ALIGN(PAGE_SIZE);
   __nosave_end = .;
 
@@ -239,8 +257,28 @@ SECTIONS
   DWARF_DEBUG
 }
 
+ /*
+  * Per-cpu symbols which need to be offset from __per_cpu_load
+  * for the boot processor.
+  */
+#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
+INIT_PER_CPU(gdt_page);
+INIT_PER_CPU(irq_stack_union);
+
 /*
  * Build-time check on the image size:
  */
 ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
        "kernel image bigger than KERNEL_IMAGE_SIZE")
+
+#ifdef CONFIG_SMP
+ASSERT((per_cpu__irq_stack_union == 0),
+        "irq_stack_union is not at start of per-cpu area");
+#endif
+
+#ifdef CONFIG_KEXEC
+#include <asm/kexec.h>
+
+ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
+       "kexec control code size is too big")
+#endif
index a688f3bfaec2df184760f836dda4947edede828d..74de562812ccc0e3e4886c18d945d669d282c48c 100644 (file)
@@ -22,7 +22,7 @@
 #include <asm/paravirt.h>
 #include <asm/setup.h>
 
-#if defined CONFIG_PCI && defined CONFIG_PARAVIRT
+#ifdef CONFIG_PARAVIRT
 /*
  * Interrupt control on vSMPowered systems:
  * ~AC is a shadow of IF.  If IF is 'on' AC should be 'off'
@@ -37,6 +37,7 @@ static unsigned long vsmp_save_fl(void)
                flags &= ~X86_EFLAGS_IF;
        return flags;
 }
+PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
 
 static void vsmp_restore_fl(unsigned long flags)
 {
@@ -46,6 +47,7 @@ static void vsmp_restore_fl(unsigned long flags)
                flags |= X86_EFLAGS_AC;
        native_restore_fl(flags);
 }
+PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
 
 static void vsmp_irq_disable(void)
 {
@@ -53,6 +55,7 @@ static void vsmp_irq_disable(void)
 
        native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
 }
+PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
 
 static void vsmp_irq_enable(void)
 {
@@ -60,6 +63,7 @@ static void vsmp_irq_enable(void)
 
        native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
 }
+PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
 
 static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf,
                                  unsigned long addr, unsigned len)
@@ -90,10 +94,10 @@ static void __init set_vsmp_pv_ops(void)
               cap, ctl);
        if (cap & ctl & (1 << 4)) {
                /* Setup irq ops and turn on vSMP  IRQ fastpath handling */
-               pv_irq_ops.irq_disable = vsmp_irq_disable;
-               pv_irq_ops.irq_enable  = vsmp_irq_enable;
-               pv_irq_ops.save_fl  = vsmp_save_fl;
-               pv_irq_ops.restore_fl  = vsmp_restore_fl;
+               pv_irq_ops.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
+               pv_irq_ops.irq_enable  = PV_CALLEE_SAVE(vsmp_irq_enable);
+               pv_irq_ops.save_fl  = PV_CALLEE_SAVE(vsmp_save_fl);
+               pv_irq_ops.restore_fl  = PV_CALLEE_SAVE(vsmp_restore_fl);
                pv_init_ops.patch = vsmp_patch;
 
                ctl &= ~(1 << 4);
@@ -110,7 +114,6 @@ static void __init set_vsmp_pv_ops(void)
 }
 #endif
 
-#ifdef CONFIG_PCI
 static int is_vsmp = -1;
 
 static void __init detect_vsmp_box(void)
@@ -135,15 +138,6 @@ int is_vsmp_box(void)
                return 0;
        }
 }
-#else
-static void __init detect_vsmp_box(void)
-{
-}
-int is_vsmp_box(void)
-{
-       return 0;
-}
-#endif
 
 void __init vsmp_init(void)
 {
index 695e426aa3540ef8d762e0d0c71779b2c5c6c036..3909e3ba5ce3b40d152f004c0348108cbb036d11 100644 (file)
@@ -58,5 +58,3 @@ EXPORT_SYMBOL(__memcpy);
 EXPORT_SYMBOL(empty_zero_page);
 EXPORT_SYMBOL(init_level4_pgt);
 EXPORT_SYMBOL(load_gs_index);
-
-EXPORT_SYMBOL(_proxy_pda);
index c70e12b1a6375bbe1eed42195cdfff320eaa07e4..8dab8f7844d3e2ac62371e413ce71a07eb280d51 100644 (file)
@@ -3,7 +3,6 @@ config LGUEST_GUEST
        select PARAVIRT
        depends on X86_32
        depends on !X86_PAE
-       depends on !X86_VOYAGER
        select VIRTIO
        select VIRTIO_RING
        select VIRTIO_CONSOLE
index 960a8d9c049c697e8505e003e96e7cffcd089f86..9fe4ddaa8f6ff1fc53bbe09443d32d733876131c 100644 (file)
@@ -173,24 +173,29 @@ static unsigned long save_fl(void)
 {
        return lguest_data.irq_enabled;
 }
+PV_CALLEE_SAVE_REGS_THUNK(save_fl);
 
 /* restore_flags() just sets the flags back to the value given. */
 static void restore_fl(unsigned long flags)
 {
        lguest_data.irq_enabled = flags;
 }
+PV_CALLEE_SAVE_REGS_THUNK(restore_fl);
 
 /* Interrupts go off... */
 static void irq_disable(void)
 {
        lguest_data.irq_enabled = 0;
 }
+PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
 
 /* Interrupts go on... */
 static void irq_enable(void)
 {
        lguest_data.irq_enabled = X86_EFLAGS_IF;
 }
+PV_CALLEE_SAVE_REGS_THUNK(irq_enable);
+
 /*:*/
 /*M:003 Note that we don't check for outstanding interrupts when we re-enable
  * them (or when we unmask an interrupt).  This seems to work for the moment,
@@ -278,7 +283,7 @@ static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
        /* There's one problem which normal hardware doesn't have: the Host
         * can't handle us removing entries we're currently using.  So we clear
         * the GS register here: if it's needed it'll be reloaded anyway. */
-       loadsegment(gs, 0);
+       lazy_load_gs(0);
        lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
 }
 
@@ -830,13 +835,14 @@ static u32 lguest_apic_safe_wait_icr_idle(void)
        return 0;
 }
 
-static struct apic_ops lguest_basic_apic_ops = {
-       .read = lguest_apic_read,
-       .write = lguest_apic_write,
-       .icr_read = lguest_apic_icr_read,
-       .icr_write = lguest_apic_icr_write,
-       .wait_icr_idle = lguest_apic_wait_icr_idle,
-       .safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle,
+static void set_lguest_basic_apic_ops(void)
+{
+       apic->read = lguest_apic_read;
+       apic->write = lguest_apic_write;
+       apic->icr_read = lguest_apic_icr_read;
+       apic->icr_write = lguest_apic_icr_write;
+       apic->wait_icr_idle = lguest_apic_wait_icr_idle;
+       apic->safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle;
 };
 #endif
 
@@ -991,10 +997,10 @@ __init void lguest_init(void)
 
        /* interrupt-related operations */
        pv_irq_ops.init_IRQ = lguest_init_IRQ;
-       pv_irq_ops.save_fl = save_fl;
-       pv_irq_ops.restore_fl = restore_fl;
-       pv_irq_ops.irq_disable = irq_disable;
-       pv_irq_ops.irq_enable = irq_enable;
+       pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
+       pv_irq_ops.restore_fl = PV_CALLEE_SAVE(restore_fl);
+       pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
+       pv_irq_ops.irq_enable = PV_CALLEE_SAVE(irq_enable);
        pv_irq_ops.safe_halt = lguest_safe_halt;
 
        /* init-time operations */
@@ -1037,7 +1043,7 @@ __init void lguest_init(void)
 
 #ifdef CONFIG_X86_LOCAL_APIC
        /* apic read/write intercepts */
-       apic_ops = &lguest_basic_apic_ops;
+       set_lguest_basic_apic_ops();
 #endif
 
        /* time operations */
index ad374003742f98534db184ebc1f019ff845141a4..51f1504cddd9f20fa50e010c66ec0d83da2c2215 100644 (file)
@@ -28,7 +28,7 @@
 
 #include <linux/linkage.h>
 #include <asm/dwarf2.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/errno.h>
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
index c22981fa2f3a95240f3a4c99d00da41d1b7392ec..ad5441ed1b57fdfffe6822dede52cad626ae2b39 100644 (file)
@@ -1,30 +1,38 @@
 /* Copyright 2002 Andi Kleen */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
+
 #include <asm/cpufeature.h>
+#include <asm/dwarf2.h>
 
 /*
  * memcpy - Copy a memory block.
  *
- * Input:      
- * rdi destination
- * rsi source
- * rdx count
- * 
+ * Input:
+ *  rdi destination
+ *  rsi source
+ *  rdx count
+ *
  * Output:
  * rax original destination
- */    
+ */
 
+/*
+ * memcpy_c() - fast string ops (REP MOVSQ) based variant.
+ *
+ * Calls to this get patched into the kernel image via the
+ * alternative instructions framework:
+ */
        ALIGN
 memcpy_c:
        CFI_STARTPROC
-       movq %rdi,%rax
-       movl %edx,%ecx
-       shrl $3,%ecx
-       andl $7,%edx
+       movq %rdi, %rax
+
+       movl %edx, %ecx
+       shrl $3, %ecx
+       andl $7, %edx
        rep movsq
-       movl %edx,%ecx
+       movl %edx, %ecx
        rep movsb
        ret
        CFI_ENDPROC
@@ -33,99 +41,110 @@ ENDPROC(memcpy_c)
 ENTRY(__memcpy)
 ENTRY(memcpy)
        CFI_STARTPROC
-       pushq %rbx
-       CFI_ADJUST_CFA_OFFSET 8
-       CFI_REL_OFFSET rbx, 0
-       movq %rdi,%rax
 
-       movl %edx,%ecx
-       shrl $6,%ecx
+       /*
+        * Put the number of full 64-byte blocks into %ecx.
+        * Tail portion is handled at the end:
+        */
+       movq %rdi, %rax
+       movl %edx, %ecx
+       shrl   $6, %ecx
        jz .Lhandle_tail
 
        .p2align 4
 .Lloop_64:
+       /*
+        * We decrement the loop index here - and the zero-flag is
+        * checked at the end of the loop (instructions inbetween do
+        * not change the zero flag):
+        */
        decl %ecx
 
-       movq (%rsi),%r11
-       movq 8(%rsi),%r8
+       /*
+        * Move in blocks of 4x16 bytes:
+        */
+       movq 0*8(%rsi),         %r11
+       movq 1*8(%rsi),         %r8
+       movq %r11,              0*8(%rdi)
+       movq %r8,               1*8(%rdi)
 
-       movq %r11,(%rdi)
-       movq %r8,1*8(%rdi)
+       movq 2*8(%rsi),         %r9
+       movq 3*8(%rsi),         %r10
+       movq %r9,               2*8(%rdi)
+       movq %r10,              3*8(%rdi)
 
-       movq 2*8(%rsi),%r9
-       movq 3*8(%rsi),%r10
+       movq 4*8(%rsi),         %r11
+       movq 5*8(%rsi),         %r8
+       movq %r11,              4*8(%rdi)
+       movq %r8,               5*8(%rdi)
 
-       movq %r9,2*8(%rdi)
-       movq %r10,3*8(%rdi)
+       movq 6*8(%rsi),         %r9
+       movq 7*8(%rsi),         %r10
+       movq %r9,               6*8(%rdi)
+       movq %r10,              7*8(%rdi)
 
-       movq 4*8(%rsi),%r11
-       movq 5*8(%rsi),%r8
+       leaq 64(%rsi), %rsi
+       leaq 64(%rdi), %rdi
 
-       movq %r11,4*8(%rdi)
-       movq %r8,5*8(%rdi)
-
-       movq 6*8(%rsi),%r9
-       movq 7*8(%rsi),%r10
-
-       movq %r9,6*8(%rdi)
-       movq %r10,7*8(%rdi)
-
-       leaq 64(%rsi),%rsi
-       leaq 64(%rdi),%rdi
        jnz  .Lloop_64
 
 .Lhandle_tail:
-       movl %edx,%ecx
-       andl $63,%ecx
-       shrl $3,%ecx
+       movl %edx, %ecx
+       andl  $63, %ecx
+       shrl   $3, %ecx
        jz   .Lhandle_7
+
        .p2align 4
 .Lloop_8:
        decl %ecx
-       movq (%rsi),%r8
-       movq %r8,(%rdi)
-       leaq 8(%rdi),%rdi
-       leaq 8(%rsi),%rsi
+       movq (%rsi),            %r8
+       movq %r8,               (%rdi)
+       leaq 8(%rdi),           %rdi
+       leaq 8(%rsi),           %rsi
        jnz  .Lloop_8
 
 .Lhandle_7:
-       movl %edx,%ecx
-       andl $7,%ecx
-       jz .Lende
+       movl %edx, %ecx
+       andl $7, %ecx
+       jz .Lend
+
        .p2align 4
 .Lloop_1:
-       movb (%rsi),%r8b
-       movb %r8b,(%rdi)
+       movb (%rsi), %r8b
+       movb %r8b, (%rdi)
        incq %rdi
        incq %rsi
        decl %ecx
        jnz .Lloop_1
 
-.Lende:
-       popq %rbx
-       CFI_ADJUST_CFA_OFFSET -8
-       CFI_RESTORE rbx
+.Lend:
        ret
-.Lfinal:
        CFI_ENDPROC
 ENDPROC(memcpy)
 ENDPROC(__memcpy)
 
-       /* Some CPUs run faster using the string copy instructions.
-          It is also a lot simpler. Use this when possible */
+       /*
+        * Some CPUs run faster using the string copy instructions.
+        * It is also a lot simpler. Use this when possible:
+        */
 
-       .section .altinstr_replacement,"ax"
+       .section .altinstr_replacement, "ax"
 1:     .byte 0xeb                              /* jmp <disp8> */
        .byte (memcpy_c - memcpy) - (2f - 1b)   /* offset */
 2:
        .previous
-       .section .altinstructions,"a"
+
+       .section .altinstructions, "a"
        .align 8
        .quad memcpy
        .quad 1b
        .byte X86_FEATURE_REP_GOOD
-       /* Replace only beginning, memcpy is used to apply alternatives, so it
-        * is silly to overwrite itself with nops - reboot is only outcome... */
+
+       /*
+        * Replace only beginning, memcpy is used to apply alternatives,
+        * so it is silly to overwrite itself with nops - reboot is the
+        * only outcome...
+        */
        .byte 2b - 1b
        .byte 2b - 1b
        .previous
diff --git a/arch/x86/mach-default/Makefile b/arch/x86/mach-default/Makefile
deleted file mode 100644 (file)
index 012fe34..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-obj-y                          := setup.o
diff --git a/arch/x86/mach-default/setup.c b/arch/x86/mach-default/setup.c
deleted file mode 100644 (file)
index 50b5918..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- *     Machine specific setup for generic
- */
-
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <asm/acpi.h>
-#include <asm/arch_hooks.h>
-#include <asm/e820.h>
-#include <asm/setup.h>
-
-#include <mach_ipi.h>
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define DEFAULT_SEND_IPI       (1)
-#else
-#define DEFAULT_SEND_IPI       (0)
-#endif
-
-int no_broadcast = DEFAULT_SEND_IPI;
-
-/**
- * pre_intr_init_hook - initialisation prior to setting up interrupt vectors
- *
- * Description:
- *     Perform any necessary interrupt initialisation prior to setting up
- *     the "ordinary" interrupt call gates.  For legacy reasons, the ISA
- *     interrupts should be initialised here if the machine emulates a PC
- *     in any way.
- **/
-void __init pre_intr_init_hook(void)
-{
-       if (x86_quirks->arch_pre_intr_init) {
-               if (x86_quirks->arch_pre_intr_init())
-                       return;
-       }
-       init_ISA_irqs();
-}
-
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
-       .handler = no_action,
-       .mask = CPU_MASK_NONE,
-       .name = "cascade",
-};
-
-/**
- * intr_init_hook - post gate setup interrupt initialisation
- *
- * Description:
- *     Fill in any interrupts that may have been left out by the general
- *     init_IRQ() routine.  interrupts having to do with the machine rather
- *     than the devices on the I/O bus (like APIC interrupts in intel MP
- *     systems) are started here.
- **/
-void __init intr_init_hook(void)
-{
-       if (x86_quirks->arch_intr_init) {
-               if (x86_quirks->arch_intr_init())
-                       return;
-       }
-       if (!acpi_ioapic)
-               setup_irq(2, &irq2);
-
-}
-
-/**
- * pre_setup_arch_hook - hook called prior to any setup_arch() execution
- *
- * Description:
- *     generally used to activate any machine specific identification
- *     routines that may be needed before setup_arch() runs.  On Voyager
- *     this is used to get the board revision and type.
- **/
-void __init pre_setup_arch_hook(void)
-{
-}
-
-/**
- * trap_init_hook - initialise system specific traps
- *
- * Description:
- *     Called as the final act of trap_init().  Used in VISWS to initialise
- *     the various board specific APIC traps.
- **/
-void __init trap_init_hook(void)
-{
-       if (x86_quirks->arch_trap_init) {
-               if (x86_quirks->arch_trap_init())
-                       return;
-       }
-}
-
-static struct irqaction irq0  = {
-       .handler = timer_interrupt,
-       .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
-       .mask = CPU_MASK_NONE,
-       .name = "timer"
-};
-
-/**
- * pre_time_init_hook - do any specific initialisations before.
- *
- **/
-void __init pre_time_init_hook(void)
-{
-       if (x86_quirks->arch_pre_time_init)
-               x86_quirks->arch_pre_time_init();
-}
-
-/**
- * time_init_hook - do any specific initialisations for the system timer.
- *
- * Description:
- *     Must plug the system timer interrupt source at HZ into the IRQ listed
- *     in irq_vectors.h:TIMER_IRQ
- **/
-void __init time_init_hook(void)
-{
-       if (x86_quirks->arch_time_init) {
-               /*
-                * A nonzero return code does not mean failure, it means
-                * that the architecture quirk does not want any
-                * generic (timer) setup to be performed after this:
-                */
-               if (x86_quirks->arch_time_init())
-                       return;
-       }
-
-       irq0.mask = cpumask_of_cpu(0);
-       setup_irq(0, &irq0);
-}
-
-#ifdef CONFIG_MCA
-/**
- * mca_nmi_hook - hook into MCA specific NMI chain
- *
- * Description:
- *     The MCA (Microchannel Architecture) has an NMI chain for NMI sources
- *     along the MCA bus.  Use this to hook into that chain if you will need
- *     it.
- **/
-void mca_nmi_hook(void)
-{
-       /*
-        * If I recall correctly, there's a whole bunch of other things that
-        * we can do to check for NMI problems, but that's all I know about
-        * at the moment.
-        */
-       pr_warning("NMI generated from unknown source!\n");
-}
-#endif
-
-static __init int no_ipi_broadcast(char *str)
-{
-       get_option(&str, &no_broadcast);
-       pr_info("Using %s mode\n",
-               no_broadcast ? "No IPI Broadcast" : "IPI Broadcast");
-       return 1;
-}
-__setup("no_ipi_broadcast=", no_ipi_broadcast);
-
-static int __init print_ipi_mode(void)
-{
-       pr_info("Using IPI %s mode\n",
-               no_broadcast ? "No-Shortcut" : "Shortcut");
-       return 0;
-}
-
-late_initcall(print_ipi_mode);
-
diff --git a/arch/x86/mach-generic/Makefile b/arch/x86/mach-generic/Makefile
deleted file mode 100644 (file)
index 6730f4e..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile for the generic architecture
-#
-
-EXTRA_CFLAGS                   := -Iarch/x86/kernel
-
-obj-y                          := probe.o default.o
-obj-$(CONFIG_X86_NUMAQ)                += numaq.o
-obj-$(CONFIG_X86_SUMMIT)       += summit.o
-obj-$(CONFIG_X86_BIGSMP)       += bigsmp.o
-obj-$(CONFIG_X86_ES7000)       += es7000.o
diff --git a/arch/x86/mach-generic/bigsmp.c b/arch/x86/mach-generic/bigsmp.c
deleted file mode 100644 (file)
index bc4c784..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * APIC driver for "bigsmp" XAPIC machines with more than 8 virtual CPUs.
- * Drives the local APIC in "clustered mode".
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/dmi.h>
-#include <asm/bigsmp/apicdef.h>
-#include <linux/smp.h>
-#include <asm/bigsmp/apic.h>
-#include <asm/bigsmp/ipi.h>
-#include <asm/mach-default/mach_mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-static int dmi_bigsmp; /* can be set by dmi scanners */
-
-static int hp_ht_bigsmp(const struct dmi_system_id *d)
-{
-       printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
-       dmi_bigsmp = 1;
-       return 0;
-}
-
-
-static const struct dmi_system_id bigsmp_dmi_table[] = {
-       { hp_ht_bigsmp, "HP ProLiant DL760 G2",
-       { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
-       DMI_MATCH(DMI_BIOS_VERSION, "P44-"),}
-       },
-
-       { hp_ht_bigsmp, "HP ProLiant DL740",
-       { DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
-       DMI_MATCH(DMI_BIOS_VERSION, "P47-"),}
-       },
-        { }
-};
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
-       cpus_clear(*retmask);
-       cpu_set(cpu, *retmask);
-}
-
-static int probe_bigsmp(void)
-{
-       if (def_to_bigsmp)
-               dmi_bigsmp = 1;
-       else
-               dmi_check_system(bigsmp_dmi_table);
-       return dmi_bigsmp;
-}
-
-struct genapic apic_bigsmp = APIC_INIT("bigsmp", probe_bigsmp);
diff --git a/arch/x86/mach-generic/default.c b/arch/x86/mach-generic/default.c
deleted file mode 100644 (file)
index e63a4a7..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Default generic APIC driver. This handles up to 8 CPUs.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/mach-default/mach_apicdef.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <asm/mach-default/mach_apic.h>
-#include <asm/mach-default/mach_ipi.h>
-#include <asm/mach-default/mach_mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-/* should be called last. */
-static int probe_default(void)
-{
-       return 1;
-}
-
-struct genapic apic_default = APIC_INIT("default", probe_default);
diff --git a/arch/x86/mach-generic/es7000.c b/arch/x86/mach-generic/es7000.c
deleted file mode 100644 (file)
index c2ded14..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * APIC driver for the Unisys ES7000 chipset.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <asm/es7000/apicdef.h>
-#include <linux/smp.h>
-#include <asm/es7000/apic.h>
-#include <asm/es7000/ipi.h>
-#include <asm/es7000/mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-void __init es7000_update_genapic_to_cluster(void)
-{
-       genapic->target_cpus = target_cpus_cluster;
-       genapic->int_delivery_mode = INT_DELIVERY_MODE_CLUSTER;
-       genapic->int_dest_mode = INT_DEST_MODE_CLUSTER;
-       genapic->no_balance_irq = NO_BALANCE_IRQ_CLUSTER;
-
-       genapic->init_apic_ldr = init_apic_ldr_cluster;
-
-       genapic->cpu_mask_to_apicid = cpu_mask_to_apicid_cluster;
-}
-
-static int probe_es7000(void)
-{
-       /* probed later in mptable/ACPI hooks */
-       return 0;
-}
-
-extern void es7000_sw_apic(void);
-static void __init enable_apic_mode(void)
-{
-       es7000_sw_apic();
-       return;
-}
-
-static __init int
-mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
-       if (mpc->oemptr) {
-               struct mpc_oemtable *oem_table =
-                       (struct mpc_oemtable *)mpc->oemptr;
-               if (!strncmp(oem, "UNISYS", 6))
-                       return parse_unisys_oem((char *)oem_table);
-       }
-       return 0;
-}
-
-#ifdef CONFIG_ACPI
-/* Hook from generic ACPI tables.c */
-static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       unsigned long oem_addr = 0;
-       int check_dsdt;
-       int ret = 0;
-
-       /* check dsdt at first to avoid clear fix_map for oem_addr */
-       check_dsdt = es7000_check_dsdt();
-
-       if (!find_unisys_acpi_oem_table(&oem_addr)) {
-               if (check_dsdt)
-                       ret = parse_unisys_oem((char *)oem_addr);
-               else {
-                       setup_unisys();
-                       ret = 1;
-               }
-               /*
-                * we need to unmap it
-                */
-               unmap_unisys_acpi_oem_table(oem_addr);
-       }
-       return ret;
-}
-#else
-static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       return 0;
-}
-#endif
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
-       /* Careful. Some cpus do not strictly honor the set of cpus
-        * specified in the interrupt destination when using lowest
-        * priority interrupt delivery mode.
-        *
-        * In particular there was a hyperthreading cpu observed to
-        * deliver interrupts to the wrong hyperthread when only one
-        * hyperthread was specified in the interrupt desitination.
-        */
-       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
-}
-
-struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
diff --git a/arch/x86/mach-generic/numaq.c b/arch/x86/mach-generic/numaq.c
deleted file mode 100644 (file)
index 3679e22..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * APIC driver for the IBM NUMAQ chipset.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <asm/numaq/apicdef.h>
-#include <linux/smp.h>
-#include <asm/numaq/apic.h>
-#include <asm/numaq/ipi.h>
-#include <asm/numaq/mpparse.h>
-#include <asm/numaq/wakecpu.h>
-#include <asm/numaq.h>
-
-static int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
-       numaq_mps_oem_check(mpc, oem, productid);
-       return found_numaq;
-}
-
-static int probe_numaq(void)
-{
-       /* already know from get_memcfg_numaq() */
-       return found_numaq;
-}
-
-/* Hook from generic ACPI tables.c */
-static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       return 0;
-}
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
-       /* Careful. Some cpus do not strictly honor the set of cpus
-        * specified in the interrupt destination when using lowest
-        * priority interrupt delivery mode.
-        *
-        * In particular there was a hyperthreading cpu observed to
-        * deliver interrupts to the wrong hyperthread when only one
-        * hyperthread was specified in the interrupt desitination.
-        */
-       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
-}
-
-struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
diff --git a/arch/x86/mach-generic/probe.c b/arch/x86/mach-generic/probe.c
deleted file mode 100644 (file)
index 15a38da..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * Copyright 2003 Andi Kleen, SuSE Labs.
- * Subject to the GNU Public License, v.2
- *
- * Generic x86 APIC driver probe layer.
- */
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/ctype.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <asm/fixmap.h>
-#include <asm/mpspec.h>
-#include <asm/apicdef.h>
-#include <asm/genapic.h>
-#include <asm/setup.h>
-
-extern struct genapic apic_numaq;
-extern struct genapic apic_summit;
-extern struct genapic apic_bigsmp;
-extern struct genapic apic_es7000;
-extern struct genapic apic_default;
-
-struct genapic *genapic = &apic_default;
-
-static struct genapic *apic_probe[] __initdata = {
-#ifdef CONFIG_X86_NUMAQ
-       &apic_numaq,
-#endif
-#ifdef CONFIG_X86_SUMMIT
-       &apic_summit,
-#endif
-#ifdef CONFIG_X86_BIGSMP
-       &apic_bigsmp,
-#endif
-#ifdef CONFIG_X86_ES7000
-       &apic_es7000,
-#endif
-       &apic_default,  /* must be last */
-       NULL,
-};
-
-static int cmdline_apic __initdata;
-static int __init parse_apic(char *arg)
-{
-       int i;
-
-       if (!arg)
-               return -EINVAL;
-
-       for (i = 0; apic_probe[i]; i++) {
-               if (!strcmp(apic_probe[i]->name, arg)) {
-                       genapic = apic_probe[i];
-                       cmdline_apic = 1;
-                       return 0;
-               }
-       }
-
-       if (x86_quirks->update_genapic)
-               x86_quirks->update_genapic();
-
-       /* Parsed again by __setup for debug/verbose */
-       return 0;
-}
-early_param("apic", parse_apic);
-
-void __init generic_bigsmp_probe(void)
-{
-#ifdef CONFIG_X86_BIGSMP
-       /*
-        * This routine is used to switch to bigsmp mode when
-        * - There is no apic= option specified by the user
-        * - generic_apic_probe() has chosen apic_default as the sub_arch
-        * - we find more than 8 CPUs in acpi LAPIC listing with xAPIC support
-        */
-
-       if (!cmdline_apic && genapic == &apic_default) {
-               if (apic_bigsmp.probe()) {
-                       genapic = &apic_bigsmp;
-                       if (x86_quirks->update_genapic)
-                               x86_quirks->update_genapic();
-                       printk(KERN_INFO "Overriding APIC driver with %s\n",
-                              genapic->name);
-               }
-       }
-#endif
-}
-
-void __init generic_apic_probe(void)
-{
-       if (!cmdline_apic) {
-               int i;
-               for (i = 0; apic_probe[i]; i++) {
-                       if (apic_probe[i]->probe()) {
-                               genapic = apic_probe[i];
-                               break;
-                       }
-               }
-               /* Not visible without early console */
-               if (!apic_probe[i])
-                       panic("Didn't find an APIC driver");
-
-               if (x86_quirks->update_genapic)
-                       x86_quirks->update_genapic();
-       }
-       printk(KERN_INFO "Using APIC driver %s\n", genapic->name);
-}
-
-/* These functions can switch the APIC even after the initial ->probe() */
-
-int __init mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
-{
-       int i;
-       for (i = 0; apic_probe[i]; ++i) {
-               if (apic_probe[i]->mps_oem_check(mpc, oem, productid)) {
-                       if (!cmdline_apic) {
-                               genapic = apic_probe[i];
-                               if (x86_quirks->update_genapic)
-                                       x86_quirks->update_genapic();
-                               printk(KERN_INFO "Switched to APIC driver `%s'.\n",
-                                      genapic->name);
-                       }
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
-{
-       int i;
-       for (i = 0; apic_probe[i]; ++i) {
-               if (apic_probe[i]->acpi_madt_oem_check(oem_id, oem_table_id)) {
-                       if (!cmdline_apic) {
-                               genapic = apic_probe[i];
-                               if (x86_quirks->update_genapic)
-                                       x86_quirks->update_genapic();
-                               printk(KERN_INFO "Switched to APIC driver `%s'.\n",
-                                      genapic->name);
-                       }
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-int hard_smp_processor_id(void)
-{
-       return genapic->get_apic_id(*(unsigned long *)(APIC_BASE+APIC_ID));
-}
diff --git a/arch/x86/mach-generic/summit.c b/arch/x86/mach-generic/summit.c
deleted file mode 100644 (file)
index 2821ffc..0000000
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * APIC driver for the IBM "Summit" chipset.
- */
-#define APIC_DEFINITION 1
-#include <linux/threads.h>
-#include <linux/cpumask.h>
-#include <asm/mpspec.h>
-#include <asm/genapic.h>
-#include <asm/fixmap.h>
-#include <asm/apicdef.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <asm/summit/apicdef.h>
-#include <linux/smp.h>
-#include <asm/summit/apic.h>
-#include <asm/summit/ipi.h>
-#include <asm/summit/mpparse.h>
-#include <asm/mach-default/mach_wakecpu.h>
-
-static int probe_summit(void)
-{
-       /* probed later in mptable/ACPI hooks */
-       return 0;
-}
-
-static void vector_allocation_domain(int cpu, cpumask_t *retmask)
-{
-       /* Careful. Some cpus do not strictly honor the set of cpus
-        * specified in the interrupt destination when using lowest
-        * priority interrupt delivery mode.
-        *
-        * In particular there was a hyperthreading cpu observed to
-        * deliver interrupts to the wrong hyperthread when only one
-        * hyperthread was specified in the interrupt desitination.
-        */
-       *retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
-}
-
-struct genapic apic_summit = APIC_INIT("summit", probe_summit);
diff --git a/arch/x86/mach-rdc321x/Makefile b/arch/x86/mach-rdc321x/Makefile
deleted file mode 100644 (file)
index 8325b4c..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the RDC321x specific parts of the kernel
-#
-obj-$(CONFIG_X86_RDC321X)        := gpio.o platform.o
-
diff --git a/arch/x86/mach-rdc321x/gpio.c b/arch/x86/mach-rdc321x/gpio.c
deleted file mode 100644 (file)
index 247f33d..0000000
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- *  GPIO support for RDC SoC R3210/R8610
- *
- *  Copyright (C) 2007, Florian Fainelli <florian@openwrt.org>
- *  Copyright (C) 2008, Volker Weiss <dev@tintuc.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-
-#include <linux/spinlock.h>
-#include <linux/io.h>
-#include <linux/types.h>
-#include <linux/module.h>
-
-#include <asm/gpio.h>
-#include <asm/mach-rdc321x/rdc321x_defs.h>
-
-
-/* spin lock to protect our private copy of GPIO data register plus
-   the access to PCI conf registers. */
-static DEFINE_SPINLOCK(gpio_lock);
-
-/* copy of GPIO data registers */
-static u32 gpio_data_reg1;
-static u32 gpio_data_reg2;
-
-static u32 gpio_request_data[2];
-
-
-static inline void rdc321x_conf_write(unsigned addr, u32 value)
-{
-       outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
-       outl(value, RDC3210_CFGREG_DATA);
-}
-
-static inline void rdc321x_conf_or(unsigned addr, u32 value)
-{
-       outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
-       value |= inl(RDC3210_CFGREG_DATA);
-       outl(value, RDC3210_CFGREG_DATA);
-}
-
-static inline u32 rdc321x_conf_read(unsigned addr)
-{
-       outl((1 << 31) | (7 << 11) | addr, RDC3210_CFGREG_ADDR);
-
-       return inl(RDC3210_CFGREG_DATA);
-}
-
-/* configure pin as GPIO */
-static void rdc321x_configure_gpio(unsigned gpio)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-       rdc321x_conf_or(gpio < 32
-               ? RDC321X_GPIO_CTRL_REG1 : RDC321X_GPIO_CTRL_REG2,
-               1 << (gpio & 0x1f));
-       spin_unlock_irqrestore(&gpio_lock, flags);
-}
-
-/* initially setup the 2 copies of the gpio data registers.
-   This function must be called by the platform setup code. */
-void __init rdc321x_gpio_setup()
-{
-       /* this might not be, what others (BIOS, bootloader, etc.)
-          wrote to these registers before, but it's a good guess. Still
-          better than just using 0xffffffff. */
-
-       gpio_data_reg1 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG1);
-       gpio_data_reg2 = rdc321x_conf_read(RDC321X_GPIO_DATA_REG2);
-}
-
-/* determine, if gpio number is valid */
-static inline int rdc321x_is_gpio(unsigned gpio)
-{
-       return gpio <= RDC321X_MAX_GPIO;
-}
-
-/* request GPIO */
-int rdc_gpio_request(unsigned gpio, const char *label)
-{
-       unsigned long flags;
-
-       if (!rdc321x_is_gpio(gpio))
-               return -EINVAL;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-       if (gpio_request_data[(gpio & 0x20) ? 1 : 0] & (1 << (gpio & 0x1f)))
-               goto inuse;
-       gpio_request_data[(gpio & 0x20) ? 1 : 0] |= (1 << (gpio & 0x1f));
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
-       return 0;
-inuse:
-       spin_unlock_irqrestore(&gpio_lock, flags);
-       return -EINVAL;
-}
-EXPORT_SYMBOL(rdc_gpio_request);
-
-/* release previously-claimed GPIO */
-void rdc_gpio_free(unsigned gpio)
-{
-       unsigned long flags;
-
-       if (!rdc321x_is_gpio(gpio))
-               return;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-       gpio_request_data[(gpio & 0x20) ? 1 : 0] &= ~(1 << (gpio & 0x1f));
-       spin_unlock_irqrestore(&gpio_lock, flags);
-}
-EXPORT_SYMBOL(rdc_gpio_free);
-
-/* read GPIO pin */
-int rdc_gpio_get_value(unsigned gpio)
-{
-       u32 reg;
-       unsigned long flags;
-
-       spin_lock_irqsave(&gpio_lock, flags);
-       reg = rdc321x_conf_read(gpio < 32
-               ? RDC321X_GPIO_DATA_REG1 : RDC321X_GPIO_DATA_REG2);
-       spin_unlock_irqrestore(&gpio_lock, flags);
-
-       return (1 << (gpio & 0x1f)) & reg ? 1 : 0;
-}
-EXPORT_SYMBOL(rdc_gpio_get_value);
-
-/* set GPIO pin to value */
-void rdc_gpio_set_value(unsigned gpio, int value)
-{
-       unsigned long flags;
-       u32 reg;
-
-       reg = 1 << (gpio & 0x1f);
-       if (gpio < 32) {
-               spin_lock_irqsave(&gpio_lock, flags);
-               if (value)
-                       gpio_data_reg1 |= reg;
-               else
-                       gpio_data_reg1 &= ~reg;
-               rdc321x_conf_write(RDC321X_GPIO_DATA_REG1, gpio_data_reg1);
-               spin_unlock_irqrestore(&gpio_lock, flags);
-       } else {
-               spin_lock_irqsave(&gpio_lock, flags);
-               if (value)
-                       gpio_data_reg2 |= reg;
-               else
-                       gpio_data_reg2 &= ~reg;
-               rdc321x_conf_write(RDC321X_GPIO_DATA_REG2, gpio_data_reg2);
-               spin_unlock_irqrestore(&gpio_lock, flags);
-       }
-}
-EXPORT_SYMBOL(rdc_gpio_set_value);
-
-/* configure GPIO pin as input */
-int rdc_gpio_direction_input(unsigned gpio)
-{
-       if (!rdc321x_is_gpio(gpio))
-               return -EINVAL;
-
-       rdc321x_configure_gpio(gpio);
-
-       return 0;
-}
-EXPORT_SYMBOL(rdc_gpio_direction_input);
-
-/* configure GPIO pin as output and set value */
-int rdc_gpio_direction_output(unsigned gpio, int value)
-{
-       if (!rdc321x_is_gpio(gpio))
-               return -EINVAL;
-
-       gpio_set_value(gpio, value);
-       rdc321x_configure_gpio(gpio);
-
-       return 0;
-}
-EXPORT_SYMBOL(rdc_gpio_direction_output);
diff --git a/arch/x86/mach-rdc321x/platform.c b/arch/x86/mach-rdc321x/platform.c
deleted file mode 100644 (file)
index 4f4e50c..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- *  Generic RDC321x platform devices
- *
- *  Copyright (C) 2007 Florian Fainelli <florian@openwrt.org>
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version 2
- *  of the License, or (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the
- *  Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- *  Boston, MA  02110-1301, USA.
- *
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-
-#include <asm/gpio.h>
-
-/* LEDS */
-static struct gpio_led default_leds[] = {
-       { .name = "rdc:dmz", .gpio = 1, },
-};
-
-static struct gpio_led_platform_data rdc321x_led_data = {
-       .num_leds = ARRAY_SIZE(default_leds),
-       .leds = default_leds,
-};
-
-static struct platform_device rdc321x_leds = {
-       .name = "leds-gpio",
-       .id = -1,
-       .dev = {
-               .platform_data = &rdc321x_led_data,
-       }
-};
-
-/* Watchdog */
-static struct platform_device rdc321x_wdt = {
-       .name = "rdc321x-wdt",
-       .id = -1,
-       .num_resources = 0,
-};
-
-static struct platform_device *rdc321x_devs[] = {
-       &rdc321x_leds,
-       &rdc321x_wdt
-};
-
-static int __init rdc_board_setup(void)
-{
-       rdc321x_gpio_setup();
-
-       return platform_add_devices(rdc321x_devs, ARRAY_SIZE(rdc321x_devs));
-}
-
-arch_initcall(rdc_board_setup);
diff --git a/arch/x86/mach-voyager/Makefile b/arch/x86/mach-voyager/Makefile
deleted file mode 100644 (file)
index 15c250b..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-EXTRA_CFLAGS   := -Iarch/x86/kernel
-obj-y                  := setup.o voyager_basic.o voyager_thread.o
-
-obj-$(CONFIG_SMP)      += voyager_smp.o voyager_cat.o
diff --git a/arch/x86/mach-voyager/setup.c b/arch/x86/mach-voyager/setup.c
deleted file mode 100644 (file)
index 8e51183..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- *     Machine specific setup for generic
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <asm/arch_hooks.h>
-#include <asm/voyager.h>
-#include <asm/e820.h>
-#include <asm/io.h>
-#include <asm/setup.h>
-
-void __init pre_intr_init_hook(void)
-{
-       init_ISA_irqs();
-}
-
-/*
- * IRQ2 is cascade interrupt to second interrupt controller
- */
-static struct irqaction irq2 = {
-       .handler = no_action,
-       .mask = CPU_MASK_NONE,
-       .name = "cascade",
-};
-
-void __init intr_init_hook(void)
-{
-#ifdef CONFIG_SMP
-       voyager_smp_intr_init();
-#endif
-
-       setup_irq(2, &irq2);
-}
-
-static void voyager_disable_tsc(void)
-{
-       /* Voyagers run their CPUs from independent clocks, so disable
-        * the TSC code because we can't sync them */
-       setup_clear_cpu_cap(X86_FEATURE_TSC);
-}
-
-void __init pre_setup_arch_hook(void)
-{
-       voyager_disable_tsc();
-}
-
-void __init pre_time_init_hook(void)
-{
-       voyager_disable_tsc();
-}
-
-void __init trap_init_hook(void)
-{
-}
-
-static struct irqaction irq0 = {
-       .handler = timer_interrupt,
-       .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
-       .mask = CPU_MASK_NONE,
-       .name = "timer"
-};
-
-void __init time_init_hook(void)
-{
-       irq0.mask = cpumask_of_cpu(safe_smp_processor_id());
-       setup_irq(0, &irq0);
-}
-
-/* Hook for machine specific memory setup. */
-
-char *__init machine_specific_memory_setup(void)
-{
-       char *who;
-       int new_nr;
-
-       who = "NOT VOYAGER";
-
-       if (voyager_level == 5) {
-               __u32 addr, length;
-               int i;
-
-               who = "Voyager-SUS";
-
-               e820.nr_map = 0;
-               for (i = 0; voyager_memory_detect(i, &addr, &length); i++) {
-                       e820_add_region(addr, length, E820_RAM);
-               }
-               return who;
-       } else if (voyager_level == 4) {
-               __u32 tom;
-               __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8;
-               /* select the DINO config space */
-               outb(VOYAGER_DINO, VOYAGER_CAT_CONFIG_PORT);
-               /* Read DINO top of memory register */
-               tom = ((inb(catbase + 0x4) & 0xf0) << 16)
-                   + ((inb(catbase + 0x5) & 0x7f) << 24);
-
-               if (inb(catbase) != VOYAGER_DINO) {
-                       printk(KERN_ERR
-                              "Voyager: Failed to get DINO for L4, setting tom to EXT_MEM_K\n");
-                       tom = (boot_params.screen_info.ext_mem_k) << 10;
-               }
-               who = "Voyager-TOM";
-               e820_add_region(0, 0x9f000, E820_RAM);
-               /* map from 1M to top of memory */
-               e820_add_region(1 * 1024 * 1024, tom - 1 * 1024 * 1024,
-                                 E820_RAM);
-               /* FIXME: Should check the ASICs to see if I need to
-                * take out the 8M window.  Just do it at the moment
-                * */
-               e820_add_region(8 * 1024 * 1024, 8 * 1024 * 1024,
-                                 E820_RESERVED);
-               return who;
-       }
-
-       return default_machine_specific_memory_setup();
-}
diff --git a/arch/x86/mach-voyager/voyager_basic.c b/arch/x86/mach-voyager/voyager_basic.c
deleted file mode 100644 (file)
index 46d6f80..0000000
+++ /dev/null
@@ -1,317 +0,0 @@
-/* Copyright (C) 1999,2001 
- *
- * Author: J.E.J.Bottomley@HansenPartnership.com
- *
- * This file contains all the voyager specific routines for getting
- * initialisation of the architecture to function.  For additional
- * features see:
- *
- *     voyager_cat.c - Voyager CAT bus interface
- *     voyager_smp.c - Voyager SMP hal (emulates linux smp.c)
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/reboot.h>
-#include <linux/sysrq.h>
-#include <linux/smp.h>
-#include <linux/nodemask.h>
-#include <asm/io.h>
-#include <asm/voyager.h>
-#include <asm/vic.h>
-#include <linux/pm.h>
-#include <asm/tlbflush.h>
-#include <asm/arch_hooks.h>
-#include <asm/i8253.h>
-
-/*
- * Power off function, if any
- */
-void (*pm_power_off) (void);
-EXPORT_SYMBOL(pm_power_off);
-
-int voyager_level = 0;
-
-struct voyager_SUS *voyager_SUS = NULL;
-
-#ifdef CONFIG_SMP
-static void voyager_dump(int dummy1, struct tty_struct *dummy3)
-{
-       /* get here via a sysrq */
-       voyager_smp_dump();
-}
-
-static struct sysrq_key_op sysrq_voyager_dump_op = {
-       .handler = voyager_dump,
-       .help_msg = "Voyager",
-       .action_msg = "Dump Voyager Status",
-};
-#endif
-
-void voyager_detect(struct voyager_bios_info *bios)
-{
-       if (bios->len != 0xff) {
-               int class = (bios->class_1 << 8)
-                   | (bios->class_2 & 0xff);
-
-               printk("Voyager System detected.\n"
-                      "        Class %x, Revision %d.%d\n",
-                      class, bios->major, bios->minor);
-               if (class == VOYAGER_LEVEL4)
-                       voyager_level = 4;
-               else if (class < VOYAGER_LEVEL5_AND_ABOVE)
-                       voyager_level = 3;
-               else
-                       voyager_level = 5;
-               printk("        Architecture Level %d\n", voyager_level);
-               if (voyager_level < 4)
-                       printk
-                           ("\n**WARNING**: Voyager HAL only supports Levels 4 and 5 Architectures at the moment\n\n");
-               /* install the power off handler */
-               pm_power_off = voyager_power_off;
-#ifdef CONFIG_SMP
-               register_sysrq_key('v', &sysrq_voyager_dump_op);
-#endif
-       } else {
-               printk("\n\n**WARNING**: No Voyager Subsystem Found\n");
-       }
-}
-
-void voyager_system_interrupt(int cpl, void *dev_id)
-{
-       printk("Voyager: detected system interrupt\n");
-}
-
-/* Routine to read information from the extended CMOS area */
-__u8 voyager_extended_cmos_read(__u16 addr)
-{
-       outb(addr & 0xff, 0x74);
-       outb((addr >> 8) & 0xff, 0x75);
-       return inb(0x76);
-}
-
-/* internal definitions for the SUS Click Map of memory */
-
-#define CLICK_ENTRIES  16
-#define CLICK_SIZE     4096    /* click to byte conversion for Length */
-
-typedef struct ClickMap {
-       struct Entry {
-               __u32 Address;
-               __u32 Length;
-       } Entry[CLICK_ENTRIES];
-} ClickMap_t;
-
-/* This routine is pretty much an awful hack to read the bios clickmap by
- * mapping it into page 0.  There are usually three regions in the map:
- *     Base Memory
- *     Extended Memory
- *     zero length marker for end of map
- *
- * Returns are 0 for failure and 1 for success on extracting region.
- */
-int __init voyager_memory_detect(int region, __u32 * start, __u32 * length)
-{
-       int i;
-       int retval = 0;
-       __u8 cmos[4];
-       ClickMap_t *map;
-       unsigned long map_addr;
-       unsigned long old;
-
-       if (region >= CLICK_ENTRIES) {
-               printk("Voyager: Illegal ClickMap region %d\n", region);
-               return 0;
-       }
-
-       for (i = 0; i < sizeof(cmos); i++)
-               cmos[i] =
-                   voyager_extended_cmos_read(VOYAGER_MEMORY_CLICKMAP + i);
-
-       map_addr = *(unsigned long *)cmos;
-
-       /* steal page 0 for this */
-       old = pg0[0];
-       pg0[0] = ((map_addr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
-       local_flush_tlb();
-       /* now clear everything out but page 0 */
-       map = (ClickMap_t *) (map_addr & (~PAGE_MASK));
-
-       /* zero length is the end of the clickmap */
-       if (map->Entry[region].Length != 0) {
-               *length = map->Entry[region].Length * CLICK_SIZE;
-               *start = map->Entry[region].Address;
-               retval = 1;
-       }
-
-       /* replace the mapping */
-       pg0[0] = old;
-       local_flush_tlb();
-       return retval;
-}
-
-/* voyager specific handling code for timer interrupts.  Used to hand
- * off the timer tick to the SMP code, since the VIC doesn't have an
- * internal timer (The QIC does, but that's another story). */
-void voyager_timer_interrupt(void)
-{
-       if ((jiffies & 0x3ff) == 0) {
-
-               /* There seems to be something flaky in either
-                * hardware or software that is resetting the timer 0
-                * count to something much higher than it should be
-                * This seems to occur in the boot sequence, just
-                * before root is mounted.  Therefore, every 10
-                * seconds or so, we sanity check the timer zero count
-                * and kick it back to where it should be.
-                *
-                * FIXME: This is the most awful hack yet seen.  I
-                * should work out exactly what is interfering with
-                * the timer count settings early in the boot sequence
-                * and swiftly introduce it to something sharp and
-                * pointy.  */
-               __u16 val;
-
-               spin_lock(&i8253_lock);
-
-               outb_p(0x00, 0x43);
-               val = inb_p(0x40);
-               val |= inb(0x40) << 8;
-               spin_unlock(&i8253_lock);
-
-               if (val > LATCH) {
-                       printk
-                           ("\nVOYAGER: countdown timer value too high (%d), resetting\n\n",
-                            val);
-                       spin_lock(&i8253_lock);
-                       outb(0x34, 0x43);
-                       outb_p(LATCH & 0xff, 0x40);     /* LSB */
-                       outb(LATCH >> 8, 0x40); /* MSB */
-                       spin_unlock(&i8253_lock);
-               }
-       }
-#ifdef CONFIG_SMP
-       smp_vic_timer_interrupt();
-#endif
-}
-
-void voyager_power_off(void)
-{
-       printk("VOYAGER Power Off\n");
-
-       if (voyager_level == 5) {
-               voyager_cat_power_off();
-       } else if (voyager_level == 4) {
-               /* This doesn't apparently work on most L4 machines,
-                * but the specs say to do this to get automatic power
-                * off.  Unfortunately, if it doesn't power off the
-                * machine, it ends up doing a cold restart, which
-                * isn't really intended, so comment out the code */
-#if 0
-               int port;
-
-               /* enable the voyager Configuration Space */
-               outb((inb(VOYAGER_MC_SETUP) & 0xf0) | 0x8, VOYAGER_MC_SETUP);
-               /* the port for the power off flag is an offset from the
-                  floating base */
-               port = (inb(VOYAGER_SSPB_RELOCATION_PORT) << 8) + 0x21;
-               /* set the power off flag */
-               outb(inb(port) | 0x1, port);
-#endif
-       }
-       /* and wait for it to happen */
-       local_irq_disable();
-       for (;;)
-               halt();
-}
-
-/* copied from process.c */
-static inline void kb_wait(void)
-{
-       int i;
-
-       for (i = 0; i < 0x10000; i++)
-               if ((inb_p(0x64) & 0x02) == 0)
-                       break;
-}
-
-void machine_shutdown(void)
-{
-       /* Architecture specific shutdown needed before a kexec */
-}
-
-void machine_restart(char *cmd)
-{
-       printk("Voyager Warm Restart\n");
-       kb_wait();
-
-       if (voyager_level == 5) {
-               /* write magic values to the RTC to inform system that
-                * shutdown is beginning */
-               outb(0x8f, 0x70);
-               outb(0x5, 0x71);
-
-               udelay(50);
-               outb(0xfe, 0x64);       /* pull reset low */
-       } else if (voyager_level == 4) {
-               __u16 catbase = inb(VOYAGER_SSPB_RELOCATION_PORT) << 8;
-               __u8 basebd = inb(VOYAGER_MC_SETUP);
-
-               outb(basebd | 0x08, VOYAGER_MC_SETUP);
-               outb(0x02, catbase + 0x21);
-       }
-       local_irq_disable();
-       for (;;)
-               halt();
-}
-
-void machine_emergency_restart(void)
-{
-       /*for now, just hook this to a warm restart */
-       machine_restart(NULL);
-}
-
-void mca_nmi_hook(void)
-{
-       __u8 dumpval __maybe_unused = inb(0xf823);
-       __u8 swnmi __maybe_unused = inb(0xf813);
-
-       /* FIXME: assume dump switch pressed */
-       /* check to see if the dump switch was pressed */
-       VDEBUG(("VOYAGER: dumpval = 0x%x, swnmi = 0x%x\n", dumpval, swnmi));
-       /* clear swnmi */
-       outb(0xff, 0xf813);
-       /* tell SUS to ignore dump */
-       if (voyager_level == 5 && voyager_SUS != NULL) {
-               if (voyager_SUS->SUS_mbox == VOYAGER_DUMP_BUTTON_NMI) {
-                       voyager_SUS->kernel_mbox = VOYAGER_NO_COMMAND;
-                       voyager_SUS->kernel_flags |= VOYAGER_OS_IN_PROGRESS;
-                       udelay(1000);
-                       voyager_SUS->kernel_mbox = VOYAGER_IGNORE_DUMP;
-                       voyager_SUS->kernel_flags &= ~VOYAGER_OS_IN_PROGRESS;
-               }
-       }
-       printk(KERN_ERR
-              "VOYAGER: Dump switch pressed, printing CPU%d tracebacks\n",
-              smp_processor_id());
-       show_stack(NULL, NULL);
-       show_state();
-}
-
-void machine_halt(void)
-{
-       /* treat a halt like a power off */
-       machine_power_off();
-}
-
-void machine_power_off(void)
-{
-       if (pm_power_off)
-               pm_power_off();
-}
diff --git a/arch/x86/mach-voyager/voyager_cat.c b/arch/x86/mach-voyager/voyager_cat.c
deleted file mode 100644 (file)
index 2ad598c..0000000
+++ /dev/null
@@ -1,1197 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* Copyright (C) 1999,2001
- *
- * Author: J.E.J.Bottomley@HansenPartnership.com
- *
- * This file contains all the logic for manipulating the CAT bus
- * in a level 5 machine.
- *
- * The CAT bus is a serial configuration and test bus.  Its primary
- * uses are to probe the initial configuration of the system and to
- * diagnose error conditions when a system interrupt occurs.  The low
- * level interface is fairly primitive, so most of this file consists
- * of bit shift manipulations to send and receive packets on the
- * serial bus */
-
-#include <linux/types.h>
-#include <linux/completion.h>
-#include <linux/sched.h>
-#include <asm/voyager.h>
-#include <asm/vic.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <asm/io.h>
-
-#ifdef VOYAGER_CAT_DEBUG
-#define CDEBUG(x)      printk x
-#else
-#define CDEBUG(x)
-#endif
-
-/* the CAT command port */
-#define CAT_CMD                (sspb + 0xe)
-/* the CAT data port */
-#define CAT_DATA       (sspb + 0xd)
-
-/* the internal cat functions */
-static void cat_pack(__u8 * msg, __u16 start_bit, __u8 * data, __u16 num_bits);
-static void cat_unpack(__u8 * msg, __u16 start_bit, __u8 * data,
-                      __u16 num_bits);
-static void cat_build_header(__u8 * header, const __u16 len,
-                            const __u16 smallest_reg_bits,
-                            const __u16 longest_reg_bits);
-static int cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp,
-                       __u8 reg, __u8 op);
-static int cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp,
-                      __u8 reg, __u8 * value);
-static int cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes,
-                       __u8 pad_bits);
-static int cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
-                    __u8 value);
-static int cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
-                   __u8 * value);
-static int cat_subread(voyager_module_t * modp, voyager_asic_t * asicp,
-                      __u16 offset, __u16 len, void *buf);
-static int cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp,
-                       __u8 reg, __u8 value);
-static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp);
-static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp);
-
-static inline const char *cat_module_name(int module_id)
-{
-       switch (module_id) {
-       case 0x10:
-               return "Processor Slot 0";
-       case 0x11:
-               return "Processor Slot 1";
-       case 0x12:
-               return "Processor Slot 2";
-       case 0x13:
-               return "Processor Slot 4";
-       case 0x14:
-               return "Memory Slot 0";
-       case 0x15:
-               return "Memory Slot 1";
-       case 0x18:
-               return "Primary Microchannel";
-       case 0x19:
-               return "Secondary Microchannel";
-       case 0x1a:
-               return "Power Supply Interface";
-       case 0x1c:
-               return "Processor Slot 5";
-       case 0x1d:
-               return "Processor Slot 6";
-       case 0x1e:
-               return "Processor Slot 7";
-       case 0x1f:
-               return "Processor Slot 8";
-       default:
-               return "Unknown Module";
-       }
-}
-
-static int sspb = 0;           /* stores the super port location */
-int voyager_8slot = 0;         /* set to true if a 51xx monster */
-
-voyager_module_t *voyager_cat_list;
-
-/* the I/O port assignments for the VIC and QIC */
-static struct resource vic_res = {
-       .name = "Voyager Interrupt Controller",
-       .start = 0xFC00,
-       .end = 0xFC6F
-};
-static struct resource qic_res = {
-       .name = "Quad Interrupt Controller",
-       .start = 0xFC70,
-       .end = 0xFCFF
-};
-
-/* This function is used to pack a data bit stream inside a message.
- * It writes num_bits of the data buffer in msg starting at start_bit.
- * Note: This function assumes that any unused bit in the data stream
- * is set to zero so that the ors will work correctly */
-static void
-cat_pack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits)
-{
-       /* compute initial shift needed */
-       const __u16 offset = start_bit % BITS_PER_BYTE;
-       __u16 len = num_bits / BITS_PER_BYTE;
-       __u16 byte = start_bit / BITS_PER_BYTE;
-       __u16 residue = (num_bits % BITS_PER_BYTE) + offset;
-       int i;
-
-       /* adjust if we have more than a byte of residue */
-       if (residue >= BITS_PER_BYTE) {
-               residue -= BITS_PER_BYTE;
-               len++;
-       }
-
-       /* clear out the bits.  We assume here that if len==0 then
-        * residue >= offset.  This is always true for the catbus
-        * operations */
-       msg[byte] &= 0xff << (BITS_PER_BYTE - offset);
-       msg[byte++] |= data[0] >> offset;
-       if (len == 0)
-               return;
-       for (i = 1; i < len; i++)
-               msg[byte++] = (data[i - 1] << (BITS_PER_BYTE - offset))
-                   | (data[i] >> offset);
-       if (residue != 0) {
-               __u8 mask = 0xff >> residue;
-               __u8 last_byte = data[i - 1] << (BITS_PER_BYTE - offset)
-                   | (data[i] >> offset);
-
-               last_byte &= ~mask;
-               msg[byte] &= mask;
-               msg[byte] |= last_byte;
-       }
-       return;
-}
-
-/* unpack the data again (same arguments as cat_pack()). data buffer
- * must be zero populated.
- *
- * Function: given a message string move to start_bit and copy num_bits into
- * data (starting at bit 0 in data).
- */
-static void
-cat_unpack(__u8 * msg, const __u16 start_bit, __u8 * data, const __u16 num_bits)
-{
-       /* compute initial shift needed */
-       const __u16 offset = start_bit % BITS_PER_BYTE;
-       __u16 len = num_bits / BITS_PER_BYTE;
-       const __u8 last_bits = num_bits % BITS_PER_BYTE;
-       __u16 byte = start_bit / BITS_PER_BYTE;
-       int i;
-
-       if (last_bits != 0)
-               len++;
-
-       /* special case: want < 8 bits from msg and we can get it from
-        * a single byte of the msg */
-       if (len == 0 && BITS_PER_BYTE - offset >= num_bits) {
-               data[0] = msg[byte] << offset;
-               data[0] &= 0xff >> (BITS_PER_BYTE - num_bits);
-               return;
-       }
-       for (i = 0; i < len; i++) {
-               /* this annoying if has to be done just in case a read of
-                * msg one beyond the array causes a panic */
-               if (offset != 0) {
-                       data[i] = msg[byte++] << offset;
-                       data[i] |= msg[byte] >> (BITS_PER_BYTE - offset);
-               } else {
-                       data[i] = msg[byte++];
-               }
-       }
-       /* do we need to truncate the final byte */
-       if (last_bits != 0) {
-               data[i - 1] &= 0xff << (BITS_PER_BYTE - last_bits);
-       }
-       return;
-}
-
-static void
-cat_build_header(__u8 * header, const __u16 len, const __u16 smallest_reg_bits,
-                const __u16 longest_reg_bits)
-{
-       int i;
-       __u16 start_bit = (smallest_reg_bits - 1) % BITS_PER_BYTE;
-       __u8 *last_byte = &header[len - 1];
-
-       if (start_bit == 0)
-               start_bit = 1;  /* must have at least one bit in the hdr */
-
-       for (i = 0; i < len; i++)
-               header[i] = 0;
-
-       for (i = start_bit; i > 0; i--)
-               *last_byte = ((*last_byte) << 1) + 1;
-
-}
-
-static int
-cat_sendinst(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 op)
-{
-       __u8 parity, inst, inst_buf[4] = { 0 };
-       __u8 iseq[VOYAGER_MAX_SCAN_PATH], hseq[VOYAGER_MAX_REG_SIZE];
-       __u16 ibytes, hbytes, padbits;
-       int i;
-
-       /* 
-        * Parity is the parity of the register number + 1 (READ_REGISTER
-        * and WRITE_REGISTER always add '1' to the number of bits == 1)
-        */
-       parity = (__u8) (1 + (reg & 0x01) +
-                        ((__u8) (reg & 0x02) >> 1) +
-                        ((__u8) (reg & 0x04) >> 2) +
-                        ((__u8) (reg & 0x08) >> 3)) % 2;
-
-       inst = ((parity << 7) | (reg << 2) | op);
-
-       outb(VOYAGER_CAT_IRCYC, CAT_CMD);
-       if (!modp->scan_path_connected) {
-               if (asicp->asic_id != VOYAGER_CAT_ID) {
-                       printk
-                           ("**WARNING***: cat_sendinst has disconnected scan path not to CAT asic\n");
-                       return 1;
-               }
-               outb(VOYAGER_CAT_HEADER, CAT_DATA);
-               outb(inst, CAT_DATA);
-               if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
-                       CDEBUG(("VOYAGER CAT: cat_sendinst failed to get CAT_HEADER\n"));
-                       return 1;
-               }
-               return 0;
-       }
-       ibytes = modp->inst_bits / BITS_PER_BYTE;
-       if ((padbits = modp->inst_bits % BITS_PER_BYTE) != 0) {
-               padbits = BITS_PER_BYTE - padbits;
-               ibytes++;
-       }
-       hbytes = modp->largest_reg / BITS_PER_BYTE;
-       if (modp->largest_reg % BITS_PER_BYTE)
-               hbytes++;
-       CDEBUG(("cat_sendinst: ibytes=%d, hbytes=%d\n", ibytes, hbytes));
-       /* initialise the instruction sequence to 0xff */
-       for (i = 0; i < ibytes + hbytes; i++)
-               iseq[i] = 0xff;
-       cat_build_header(hseq, hbytes, modp->smallest_reg, modp->largest_reg);
-       cat_pack(iseq, modp->inst_bits, hseq, hbytes * BITS_PER_BYTE);
-       inst_buf[0] = inst;
-       inst_buf[1] = 0xFF >> (modp->largest_reg % BITS_PER_BYTE);
-       cat_pack(iseq, asicp->bit_location, inst_buf, asicp->ireg_length);
-#ifdef VOYAGER_CAT_DEBUG
-       printk("ins = 0x%x, iseq: ", inst);
-       for (i = 0; i < ibytes + hbytes; i++)
-               printk("0x%x ", iseq[i]);
-       printk("\n");
-#endif
-       if (cat_shiftout(iseq, ibytes, hbytes, padbits)) {
-               CDEBUG(("VOYAGER CAT: cat_sendinst: cat_shiftout failed\n"));
-               return 1;
-       }
-       CDEBUG(("CAT SHIFTOUT DONE\n"));
-       return 0;
-}
-
-static int
-cat_getdata(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
-           __u8 * value)
-{
-       if (!modp->scan_path_connected) {
-               if (asicp->asic_id != VOYAGER_CAT_ID) {
-                       CDEBUG(("VOYAGER CAT: ERROR: cat_getdata to CAT asic with scan path connected\n"));
-                       return 1;
-               }
-               if (reg > VOYAGER_SUBADDRHI)
-                       outb(VOYAGER_CAT_RUN, CAT_CMD);
-               outb(VOYAGER_CAT_DRCYC, CAT_CMD);
-               outb(VOYAGER_CAT_HEADER, CAT_DATA);
-               *value = inb(CAT_DATA);
-               outb(0xAA, CAT_DATA);
-               if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
-                       CDEBUG(("cat_getdata: failed to get VOYAGER_CAT_HEADER\n"));
-                       return 1;
-               }
-               return 0;
-       } else {
-               __u16 sbits = modp->num_asics - 1 + asicp->ireg_length;
-               __u16 sbytes = sbits / BITS_PER_BYTE;
-               __u16 tbytes;
-               __u8 string[VOYAGER_MAX_SCAN_PATH],
-                   trailer[VOYAGER_MAX_REG_SIZE];
-               __u8 padbits;
-               int i;
-
-               outb(VOYAGER_CAT_DRCYC, CAT_CMD);
-
-               if ((padbits = sbits % BITS_PER_BYTE) != 0) {
-                       padbits = BITS_PER_BYTE - padbits;
-                       sbytes++;
-               }
-               tbytes = asicp->ireg_length / BITS_PER_BYTE;
-               if (asicp->ireg_length % BITS_PER_BYTE)
-                       tbytes++;
-               CDEBUG(("cat_getdata: tbytes = %d, sbytes = %d, padbits = %d\n",
-                       tbytes, sbytes, padbits));
-               cat_build_header(trailer, tbytes, 1, asicp->ireg_length);
-
-               for (i = tbytes - 1; i >= 0; i--) {
-                       outb(trailer[i], CAT_DATA);
-                       string[sbytes + i] = inb(CAT_DATA);
-               }
-
-               for (i = sbytes - 1; i >= 0; i--) {
-                       outb(0xaa, CAT_DATA);
-                       string[i] = inb(CAT_DATA);
-               }
-               *value = 0;
-               cat_unpack(string,
-                          padbits + (tbytes * BITS_PER_BYTE) +
-                          asicp->asic_location, value, asicp->ireg_length);
-#ifdef VOYAGER_CAT_DEBUG
-               printk("value=0x%x, string: ", *value);
-               for (i = 0; i < tbytes + sbytes; i++)
-                       printk("0x%x ", string[i]);
-               printk("\n");
-#endif
-
-               /* sanity check the rest of the return */
-               for (i = 0; i < tbytes; i++) {
-                       __u8 input = 0;
-
-                       cat_unpack(string, padbits + (i * BITS_PER_BYTE),
-                                  &input, BITS_PER_BYTE);
-                       if (trailer[i] != input) {
-                               CDEBUG(("cat_getdata: failed to sanity check rest of ret(%d) 0x%x != 0x%x\n", i, input, trailer[i]));
-                               return 1;
-                       }
-               }
-               CDEBUG(("cat_getdata DONE\n"));
-               return 0;
-       }
-}
-
-static int
-cat_shiftout(__u8 * data, __u16 data_bytes, __u16 header_bytes, __u8 pad_bits)
-{
-       int i;
-
-       for (i = data_bytes + header_bytes - 1; i >= header_bytes; i--)
-               outb(data[i], CAT_DATA);
-
-       for (i = header_bytes - 1; i >= 0; i--) {
-               __u8 header = 0;
-               __u8 input;
-
-               outb(data[i], CAT_DATA);
-               input = inb(CAT_DATA);
-               CDEBUG(("cat_shiftout: returned 0x%x\n", input));
-               cat_unpack(data, ((data_bytes + i) * BITS_PER_BYTE) - pad_bits,
-                          &header, BITS_PER_BYTE);
-               if (input != header) {
-                       CDEBUG(("VOYAGER CAT: cat_shiftout failed to return header 0x%x != 0x%x\n", input, header));
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-static int
-cat_senddata(voyager_module_t * modp, voyager_asic_t * asicp,
-            __u8 reg, __u8 value)
-{
-       outb(VOYAGER_CAT_DRCYC, CAT_CMD);
-       if (!modp->scan_path_connected) {
-               if (asicp->asic_id != VOYAGER_CAT_ID) {
-                       CDEBUG(("VOYAGER CAT: ERROR: scan path disconnected when asic != CAT\n"));
-                       return 1;
-               }
-               outb(VOYAGER_CAT_HEADER, CAT_DATA);
-               outb(value, CAT_DATA);
-               if (inb(CAT_DATA) != VOYAGER_CAT_HEADER) {
-                       CDEBUG(("cat_senddata: failed to get correct header response to sent data\n"));
-                       return 1;
-               }
-               if (reg > VOYAGER_SUBADDRHI) {
-                       outb(VOYAGER_CAT_RUN, CAT_CMD);
-                       outb(VOYAGER_CAT_END, CAT_CMD);
-                       outb(VOYAGER_CAT_RUN, CAT_CMD);
-               }
-
-               return 0;
-       } else {
-               __u16 hbytes = asicp->ireg_length / BITS_PER_BYTE;
-               __u16 dbytes =
-                   (modp->num_asics - 1 + asicp->ireg_length) / BITS_PER_BYTE;
-               __u8 padbits, dseq[VOYAGER_MAX_SCAN_PATH],
-                   hseq[VOYAGER_MAX_REG_SIZE];
-               int i;
-
-               if ((padbits = (modp->num_asics - 1
-                               + asicp->ireg_length) % BITS_PER_BYTE) != 0) {
-                       padbits = BITS_PER_BYTE - padbits;
-                       dbytes++;
-               }
-               if (asicp->ireg_length % BITS_PER_BYTE)
-                       hbytes++;
-
-               cat_build_header(hseq, hbytes, 1, asicp->ireg_length);
-
-               for (i = 0; i < dbytes + hbytes; i++)
-                       dseq[i] = 0xff;
-               CDEBUG(("cat_senddata: dbytes=%d, hbytes=%d, padbits=%d\n",
-                       dbytes, hbytes, padbits));
-               cat_pack(dseq, modp->num_asics - 1 + asicp->ireg_length,
-                        hseq, hbytes * BITS_PER_BYTE);
-               cat_pack(dseq, asicp->asic_location, &value,
-                        asicp->ireg_length);
-#ifdef VOYAGER_CAT_DEBUG
-               printk("dseq ");
-               for (i = 0; i < hbytes + dbytes; i++) {
-                       printk("0x%x ", dseq[i]);
-               }
-               printk("\n");
-#endif
-               return cat_shiftout(dseq, dbytes, hbytes, padbits);
-       }
-}
-
-static int
-cat_write(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg, __u8 value)
-{
-       if (cat_sendinst(modp, asicp, reg, VOYAGER_WRITE_CONFIG))
-               return 1;
-       return cat_senddata(modp, asicp, reg, value);
-}
-
-static int
-cat_read(voyager_module_t * modp, voyager_asic_t * asicp, __u8 reg,
-        __u8 * value)
-{
-       if (cat_sendinst(modp, asicp, reg, VOYAGER_READ_CONFIG))
-               return 1;
-       return cat_getdata(modp, asicp, reg, value);
-}
-
-static int
-cat_subaddrsetup(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
-                __u16 len)
-{
-       __u8 val;
-
-       if (len > 1) {
-               /* set auto increment */
-               __u8 newval;
-
-               if (cat_read(modp, asicp, VOYAGER_AUTO_INC_REG, &val)) {
-                       CDEBUG(("cat_subaddrsetup: read of VOYAGER_AUTO_INC_REG failed\n"));
-                       return 1;
-               }
-               CDEBUG(("cat_subaddrsetup: VOYAGER_AUTO_INC_REG = 0x%x\n",
-                       val));
-               newval = val | VOYAGER_AUTO_INC;
-               if (newval != val) {
-                       if (cat_write(modp, asicp, VOYAGER_AUTO_INC_REG, val)) {
-                               CDEBUG(("cat_subaddrsetup: write to VOYAGER_AUTO_INC_REG failed\n"));
-                               return 1;
-                       }
-               }
-       }
-       if (cat_write(modp, asicp, VOYAGER_SUBADDRLO, (__u8) (offset & 0xff))) {
-               CDEBUG(("cat_subaddrsetup: write to SUBADDRLO failed\n"));
-               return 1;
-       }
-       if (asicp->subaddr > VOYAGER_SUBADDR_LO) {
-               if (cat_write
-                   (modp, asicp, VOYAGER_SUBADDRHI, (__u8) (offset >> 8))) {
-                       CDEBUG(("cat_subaddrsetup: write to SUBADDRHI failed\n"));
-                       return 1;
-               }
-               cat_read(modp, asicp, VOYAGER_SUBADDRHI, &val);
-               CDEBUG(("cat_subaddrsetup: offset = %d, hi = %d\n", offset,
-                       val));
-       }
-       cat_read(modp, asicp, VOYAGER_SUBADDRLO, &val);
-       CDEBUG(("cat_subaddrsetup: offset = %d, lo = %d\n", offset, val));
-       return 0;
-}
-
-static int
-cat_subwrite(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
-            __u16 len, void *buf)
-{
-       int i, retval;
-
-       /* FIXME: need special actions for VOYAGER_CAT_ID here */
-       if (asicp->asic_id == VOYAGER_CAT_ID) {
-               CDEBUG(("cat_subwrite: ATTEMPT TO WRITE TO CAT ASIC\n"));
-               /* FIXME -- This is supposed to be handled better
-                * There is a problem writing to the cat asic in the
-                * PSI.  The 30us delay seems to work, though */
-               udelay(30);
-       }
-
-       if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
-               printk("cat_subwrite: cat_subaddrsetup FAILED\n");
-               return retval;
-       }
-
-       if (cat_sendinst
-           (modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_WRITE_CONFIG)) {
-               printk("cat_subwrite: cat_sendinst FAILED\n");
-               return 1;
-       }
-       for (i = 0; i < len; i++) {
-               if (cat_senddata(modp, asicp, 0xFF, ((__u8 *) buf)[i])) {
-                       printk
-                           ("cat_subwrite: cat_sendata element at %d FAILED\n",
-                            i);
-                       return 1;
-               }
-       }
-       return 0;
-}
-static int
-cat_subread(voyager_module_t * modp, voyager_asic_t * asicp, __u16 offset,
-           __u16 len, void *buf)
-{
-       int i, retval;
-
-       if ((retval = cat_subaddrsetup(modp, asicp, offset, len)) != 0) {
-               CDEBUG(("cat_subread: cat_subaddrsetup FAILED\n"));
-               return retval;
-       }
-
-       if (cat_sendinst(modp, asicp, VOYAGER_SUBADDRDATA, VOYAGER_READ_CONFIG)) {
-               CDEBUG(("cat_subread: cat_sendinst failed\n"));
-               return 1;
-       }
-       for (i = 0; i < len; i++) {
-               if (cat_getdata(modp, asicp, 0xFF, &((__u8 *) buf)[i])) {
-                       CDEBUG(("cat_subread: cat_getdata element %d failed\n",
-                               i));
-                       return 1;
-               }
-       }
-       return 0;
-}
-
-/* buffer for storing EPROM data read in during initialisation */
-static __initdata __u8 eprom_buf[0xFFFF];
-static voyager_module_t *voyager_initial_module;
-
-/* Initialise the cat bus components.  We assume this is called by the
- * boot cpu *after* all memory initialisation has been done (so we can
- * use kmalloc) but before smp initialisation, so we can probe the SMP
- * configuration and pick up necessary information.  */
-void __init voyager_cat_init(void)
-{
-       voyager_module_t **modpp = &voyager_initial_module;
-       voyager_asic_t **asicpp;
-       voyager_asic_t *qabc_asic = NULL;
-       int i, j;
-       unsigned long qic_addr = 0;
-       __u8 qabc_data[0x20];
-       __u8 num_submodules, val;
-       voyager_eprom_hdr_t *eprom_hdr = (voyager_eprom_hdr_t *) & eprom_buf[0];
-
-       __u8 cmos[4];
-       unsigned long addr;
-
-       /* initiallise the SUS mailbox */
-       for (i = 0; i < sizeof(cmos); i++)
-               cmos[i] = voyager_extended_cmos_read(VOYAGER_DUMP_LOCATION + i);
-       addr = *(unsigned long *)cmos;
-       if ((addr & 0xff000000) != 0xff000000) {
-               printk(KERN_ERR
-                      "Voyager failed to get SUS mailbox (addr = 0x%lx\n",
-                      addr);
-       } else {
-               static struct resource res;
-
-               res.name = "voyager SUS";
-               res.start = addr;
-               res.end = addr + 0x3ff;
-
-               request_resource(&iomem_resource, &res);
-               voyager_SUS = (struct voyager_SUS *)
-                   ioremap(addr, 0x400);
-               printk(KERN_NOTICE "Voyager SUS mailbox version 0x%x\n",
-                      voyager_SUS->SUS_version);
-               voyager_SUS->kernel_version = VOYAGER_MAILBOX_VERSION;
-               voyager_SUS->kernel_flags = VOYAGER_OS_HAS_SYSINT;
-       }
-
-       /* clear the processor counts */
-       voyager_extended_vic_processors = 0;
-       voyager_quad_processors = 0;
-
-       printk("VOYAGER: beginning CAT bus probe\n");
-       /* set up the SuperSet Port Block which tells us where the
-        * CAT communication port is */
-       sspb = inb(VOYAGER_SSPB_RELOCATION_PORT) * 0x100;
-       VDEBUG(("VOYAGER DEBUG: sspb = 0x%x\n", sspb));
-
-       /* now find out if were 8 slot or normal */
-       if ((inb(VIC_PROC_WHO_AM_I) & EIGHT_SLOT_IDENTIFIER)
-           == EIGHT_SLOT_IDENTIFIER) {
-               voyager_8slot = 1;
-               printk(KERN_NOTICE
-                      "Voyager: Eight slot 51xx configuration detected\n");
-       }
-
-       for (i = VOYAGER_MIN_MODULE; i <= VOYAGER_MAX_MODULE; i++) {
-               __u8 input;
-               int asic;
-               __u16 eprom_size;
-               __u16 sp_offset;
-
-               outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT);
-               outb(i, VOYAGER_CAT_CONFIG_PORT);
-
-               /* check the presence of the module */
-               outb(VOYAGER_CAT_RUN, CAT_CMD);
-               outb(VOYAGER_CAT_IRCYC, CAT_CMD);
-               outb(VOYAGER_CAT_HEADER, CAT_DATA);
-               /* stream series of alternating 1's and 0's to stimulate
-                * response */
-               outb(0xAA, CAT_DATA);
-               input = inb(CAT_DATA);
-               outb(VOYAGER_CAT_END, CAT_CMD);
-               if (input != VOYAGER_CAT_HEADER) {
-                       continue;
-               }
-               CDEBUG(("VOYAGER DEBUG: found module id 0x%x, %s\n", i,
-                       cat_module_name(i)));
-               *modpp = kmalloc(sizeof(voyager_module_t), GFP_KERNEL); /*&voyager_module_storage[cat_count++]; */
-               if (*modpp == NULL) {
-                       printk("**WARNING** kmalloc failure in cat_init\n");
-                       continue;
-               }
-               memset(*modpp, 0, sizeof(voyager_module_t));
-               /* need temporary asic for cat_subread.  It will be
-                * filled in correctly later */
-               (*modpp)->asic = kmalloc(sizeof(voyager_asic_t), GFP_KERNEL);   /*&voyager_asic_storage[asic_count]; */
-               if ((*modpp)->asic == NULL) {
-                       printk("**WARNING** kmalloc failure in cat_init\n");
-                       continue;
-               }
-               memset((*modpp)->asic, 0, sizeof(voyager_asic_t));
-               (*modpp)->asic->asic_id = VOYAGER_CAT_ID;
-               (*modpp)->asic->subaddr = VOYAGER_SUBADDR_HI;
-               (*modpp)->module_addr = i;
-               (*modpp)->scan_path_connected = 0;
-               if (i == VOYAGER_PSI) {
-                       /* Exception leg for modules with no EEPROM */
-                       printk("Module \"%s\"\n", cat_module_name(i));
-                       continue;
-               }
-
-               CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET));
-               outb(VOYAGER_CAT_RUN, CAT_CMD);
-               cat_disconnect(*modpp, (*modpp)->asic);
-               if (cat_subread(*modpp, (*modpp)->asic,
-                               VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
-                               &eprom_size)) {
-                       printk
-                           ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n",
-                            i);
-                       outb(VOYAGER_CAT_END, CAT_CMD);
-                       continue;
-               }
-               if (eprom_size > sizeof(eprom_buf)) {
-                       printk
-                           ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x.  Need %d\n",
-                            i, eprom_size);
-                       outb(VOYAGER_CAT_END, CAT_CMD);
-                       continue;
-               }
-               outb(VOYAGER_CAT_END, CAT_CMD);
-               outb(VOYAGER_CAT_RUN, CAT_CMD);
-               CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i,
-                       eprom_size));
-               if (cat_subread
-                   (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) {
-                       outb(VOYAGER_CAT_END, CAT_CMD);
-                       continue;
-               }
-               outb(VOYAGER_CAT_END, CAT_CMD);
-               printk("Module \"%s\", version 0x%x, tracer 0x%x, asics %d\n",
-                      cat_module_name(i), eprom_hdr->version_id,
-                      *((__u32 *) eprom_hdr->tracer), eprom_hdr->num_asics);
-               (*modpp)->ee_size = eprom_hdr->ee_size;
-               (*modpp)->num_asics = eprom_hdr->num_asics;
-               asicpp = &((*modpp)->asic);
-               sp_offset = eprom_hdr->scan_path_offset;
-               /* All we really care about are the Quad cards.  We
-                * identify them because they are in a processor slot
-                * and have only four asics */
-               if ((i < 0x10 || (i >= 0x14 && i < 0x1c) || i > 0x1f)) {
-                       modpp = &((*modpp)->next);
-                       continue;
-               }
-               /* Now we know it's in a processor slot, does it have
-                * a quad baseboard submodule */
-               outb(VOYAGER_CAT_RUN, CAT_CMD);
-               cat_read(*modpp, (*modpp)->asic, VOYAGER_SUBMODPRESENT,
-                        &num_submodules);
-               /* lowest two bits, active low */
-               num_submodules = ~(0xfc | num_submodules);
-               CDEBUG(("VOYAGER CAT: %d submodules present\n",
-                       num_submodules));
-               if (num_submodules == 0) {
-                       /* fill in the dyadic extended processors */
-                       __u8 cpu = i & 0x07;
-
-                       printk("Module \"%s\": Dyadic Processor Card\n",
-                              cat_module_name(i));
-                       voyager_extended_vic_processors |= (1 << cpu);
-                       cpu += 4;
-                       voyager_extended_vic_processors |= (1 << cpu);
-                       outb(VOYAGER_CAT_END, CAT_CMD);
-                       continue;
-               }
-
-               /* now we want to read the asics on the first submodule,
-                * which should be the quad base board */
-
-               cat_read(*modpp, (*modpp)->asic, VOYAGER_SUBMODSELECT, &val);
-               CDEBUG(("cat_init: SUBMODSELECT value = 0x%x\n", val));
-               val = (val & 0x7c) | VOYAGER_QUAD_BASEBOARD;
-               cat_write(*modpp, (*modpp)->asic, VOYAGER_SUBMODSELECT, val);
-
-               outb(VOYAGER_CAT_END, CAT_CMD);
-
-               CDEBUG(("cat_init: Reading eeprom for module 0x%x at offset %d\n", i, VOYAGER_XSUM_END_OFFSET));
-               outb(VOYAGER_CAT_RUN, CAT_CMD);
-               cat_disconnect(*modpp, (*modpp)->asic);
-               if (cat_subread(*modpp, (*modpp)->asic,
-                               VOYAGER_XSUM_END_OFFSET, sizeof(eprom_size),
-                               &eprom_size)) {
-                       printk
-                           ("**WARNING**: Voyager couldn't read EPROM size for module 0x%x\n",
-                            i);
-                       outb(VOYAGER_CAT_END, CAT_CMD);
-                       continue;
-               }
-               if (eprom_size > sizeof(eprom_buf)) {
-                       printk
-                           ("**WARNING**: Voyager insufficient size to read EPROM data, module 0x%x.  Need %d\n",
-                            i, eprom_size);
-                       outb(VOYAGER_CAT_END, CAT_CMD);
-                       continue;
-               }
-               outb(VOYAGER_CAT_END, CAT_CMD);
-               outb(VOYAGER_CAT_RUN, CAT_CMD);
-               CDEBUG(("cat_init: module 0x%x, eeprom_size %d\n", i,
-                       eprom_size));
-               if (cat_subread
-                   (*modpp, (*modpp)->asic, 0, eprom_size, eprom_buf)) {
-                       outb(VOYAGER_CAT_END, CAT_CMD);
-                       continue;
-               }
-               outb(VOYAGER_CAT_END, CAT_CMD);
-               /* Now do everything for the QBB submodule 1 */
-               (*modpp)->ee_size = eprom_hdr->ee_size;
-               (*modpp)->num_asics = eprom_hdr->num_asics;
-               asicpp = &((*modpp)->asic);
-               sp_offset = eprom_hdr->scan_path_offset;
-               /* get rid of the dummy CAT asic and read the real one */
-               kfree((*modpp)->asic);
-               for (asic = 0; asic < (*modpp)->num_asics; asic++) {
-                       int j;
-                       voyager_asic_t *asicp = *asicpp = kzalloc(sizeof(voyager_asic_t), GFP_KERNEL);  /*&voyager_asic_storage[asic_count++]; */
-                       voyager_sp_table_t *sp_table;
-                       voyager_at_t *asic_table;
-                       voyager_jtt_t *jtag_table;
-
-                       if (asicp == NULL) {
-                               printk
-                                   ("**WARNING** kmalloc failure in cat_init\n");
-                               continue;
-                       }
-                       asicpp = &(asicp->next);
-                       asicp->asic_location = asic;
-                       sp_table =
-                           (voyager_sp_table_t *) (eprom_buf + sp_offset);
-                       asicp->asic_id = sp_table->asic_id;
-                       asic_table =
-                           (voyager_at_t *) (eprom_buf +
-                                             sp_table->asic_data_offset);
-                       for (j = 0; j < 4; j++)
-                               asicp->jtag_id[j] = asic_table->jtag_id[j];
-                       jtag_table =
-                           (voyager_jtt_t *) (eprom_buf +
-                                              asic_table->jtag_offset);
-                       asicp->ireg_length = jtag_table->ireg_len;
-                       asicp->bit_location = (*modpp)->inst_bits;
-                       (*modpp)->inst_bits += asicp->ireg_length;
-                       if (asicp->ireg_length > (*modpp)->largest_reg)
-                               (*modpp)->largest_reg = asicp->ireg_length;
-                       if (asicp->ireg_length < (*modpp)->smallest_reg ||
-                           (*modpp)->smallest_reg == 0)
-                               (*modpp)->smallest_reg = asicp->ireg_length;
-                       CDEBUG(("asic 0x%x, ireg_length=%d, bit_location=%d\n",
-                               asicp->asic_id, asicp->ireg_length,
-                               asicp->bit_location));
-                       if (asicp->asic_id == VOYAGER_QUAD_QABC) {
-                               CDEBUG(("VOYAGER CAT: QABC ASIC found\n"));
-                               qabc_asic = asicp;
-                       }
-                       sp_offset += sizeof(voyager_sp_table_t);
-               }
-               CDEBUG(("Module inst_bits = %d, largest_reg = %d, smallest_reg=%d\n", (*modpp)->inst_bits, (*modpp)->largest_reg, (*modpp)->smallest_reg));
-               /* OK, now we have the QUAD ASICs set up, use them.
-                * we need to:
-                *
-                * 1. Find the Memory area for the Quad CPIs.
-                * 2. Find the Extended VIC processor
-                * 3. Configure a second extended VIC processor (This
-                *    cannot be done for the 51xx.
-                * */
-               outb(VOYAGER_CAT_RUN, CAT_CMD);
-               cat_connect(*modpp, (*modpp)->asic);
-               CDEBUG(("CAT CONNECTED!!\n"));
-               cat_subread(*modpp, qabc_asic, 0, sizeof(qabc_data), qabc_data);
-               qic_addr = qabc_data[5] << 8;
-               qic_addr = (qic_addr | qabc_data[6]) << 8;
-               qic_addr = (qic_addr | qabc_data[7]) << 8;
-               printk
-                   ("Module \"%s\": Quad Processor Card; CPI 0x%lx, SET=0x%x\n",
-                    cat_module_name(i), qic_addr, qabc_data[8]);
-#if 0                          /* plumbing fails---FIXME */
-               if ((qabc_data[8] & 0xf0) == 0) {
-                       /* FIXME: 32 way 8 CPU slot monster cannot be
-                        * plumbed this way---need to check for it */
-
-                       printk("Plumbing second Extended Quad Processor\n");
-                       /* second VIC line hardwired to Quad CPU 1 */
-                       qabc_data[8] |= 0x20;
-                       cat_subwrite(*modpp, qabc_asic, 8, 1, &qabc_data[8]);
-#ifdef VOYAGER_CAT_DEBUG
-                       /* verify plumbing */
-                       cat_subread(*modpp, qabc_asic, 8, 1, &qabc_data[8]);
-                       if ((qabc_data[8] & 0xf0) == 0) {
-                               CDEBUG(("PLUMBING FAILED: 0x%x\n",
-                                       qabc_data[8]));
-                       }
-#endif
-               }
-#endif
-
-               {
-                       struct resource *res =
-                           kzalloc(sizeof(struct resource), GFP_KERNEL);
-                       res->name = kmalloc(128, GFP_KERNEL);
-                       sprintf((char *)res->name, "Voyager %s Quad CPI",
-                               cat_module_name(i));
-                       res->start = qic_addr;
-                       res->end = qic_addr + 0x3ff;
-                       request_resource(&iomem_resource, res);
-               }
-
-               qic_addr = (unsigned long)ioremap_cache(qic_addr, 0x400);
-
-               for (j = 0; j < 4; j++) {
-                       __u8 cpu;
-
-                       if (voyager_8slot) {
-                               /* 8 slot has a different mapping,
-                                * each slot has only one vic line, so
-                                * 1 cpu in each slot must be < 8 */
-                               cpu = (i & 0x07) + j * 8;
-                       } else {
-                               cpu = (i & 0x03) + j * 4;
-                       }
-                       if ((qabc_data[8] & (1 << j))) {
-                               voyager_extended_vic_processors |= (1 << cpu);
-                       }
-                       if (qabc_data[8] & (1 << (j + 4))) {
-                               /* Second SET register plumbed: Quad
-                                * card has two VIC connected CPUs.
-                                * Secondary cannot be booted as a VIC
-                                * CPU */
-                               voyager_extended_vic_processors |= (1 << cpu);
-                               voyager_allowed_boot_processors &=
-                                   (~(1 << cpu));
-                       }
-
-                       voyager_quad_processors |= (1 << cpu);
-                       voyager_quad_cpi_addr[cpu] = (struct voyager_qic_cpi *)
-                           (qic_addr + (j << 8));
-                       CDEBUG(("CPU%d: CPI address 0x%lx\n", cpu,
-                               (unsigned long)voyager_quad_cpi_addr[cpu]));
-               }
-               outb(VOYAGER_CAT_END, CAT_CMD);
-
-               *asicpp = NULL;
-               modpp = &((*modpp)->next);
-       }
-       *modpp = NULL;
-       printk
-           ("CAT Bus Initialisation finished: extended procs 0x%x, quad procs 0x%x, allowed vic boot = 0x%x\n",
-            voyager_extended_vic_processors, voyager_quad_processors,
-            voyager_allowed_boot_processors);
-       request_resource(&ioport_resource, &vic_res);
-       if (voyager_quad_processors)
-               request_resource(&ioport_resource, &qic_res);
-       /* set up the front power switch */
-}
-
-int voyager_cat_readb(__u8 module, __u8 asic, int reg)
-{
-       return 0;
-}
-
-static int cat_disconnect(voyager_module_t * modp, voyager_asic_t * asicp)
-{
-       __u8 val;
-       int err = 0;
-
-       if (!modp->scan_path_connected)
-               return 0;
-       if (asicp->asic_id != VOYAGER_CAT_ID) {
-               CDEBUG(("cat_disconnect: ASIC is not CAT\n"));
-               return 1;
-       }
-       err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val);
-       if (err) {
-               CDEBUG(("cat_disconnect: failed to read SCANPATH\n"));
-               return err;
-       }
-       val &= VOYAGER_DISCONNECT_ASIC;
-       err = cat_write(modp, asicp, VOYAGER_SCANPATH, val);
-       if (err) {
-               CDEBUG(("cat_disconnect: failed to write SCANPATH\n"));
-               return err;
-       }
-       outb(VOYAGER_CAT_END, CAT_CMD);
-       outb(VOYAGER_CAT_RUN, CAT_CMD);
-       modp->scan_path_connected = 0;
-
-       return 0;
-}
-
-static int cat_connect(voyager_module_t * modp, voyager_asic_t * asicp)
-{
-       __u8 val;
-       int err = 0;
-
-       if (modp->scan_path_connected)
-               return 0;
-       if (asicp->asic_id != VOYAGER_CAT_ID) {
-               CDEBUG(("cat_connect: ASIC is not CAT\n"));
-               return 1;
-       }
-
-       err = cat_read(modp, asicp, VOYAGER_SCANPATH, &val);
-       if (err) {
-               CDEBUG(("cat_connect: failed to read SCANPATH\n"));
-               return err;
-       }
-       val |= VOYAGER_CONNECT_ASIC;
-       err = cat_write(modp, asicp, VOYAGER_SCANPATH, val);
-       if (err) {
-               CDEBUG(("cat_connect: failed to write SCANPATH\n"));
-               return err;
-       }
-       outb(VOYAGER_CAT_END, CAT_CMD);
-       outb(VOYAGER_CAT_RUN, CAT_CMD);
-       modp->scan_path_connected = 1;
-
-       return 0;
-}
-
-void voyager_cat_power_off(void)
-{
-       /* Power the machine off by writing to the PSI over the CAT
-        * bus */
-       __u8 data;
-       voyager_module_t psi = { 0 };
-       voyager_asic_t psi_asic = { 0 };
-
-       psi.asic = &psi_asic;
-       psi.asic->asic_id = VOYAGER_CAT_ID;
-       psi.asic->subaddr = VOYAGER_SUBADDR_HI;
-       psi.module_addr = VOYAGER_PSI;
-       psi.scan_path_connected = 0;
-
-       outb(VOYAGER_CAT_END, CAT_CMD);
-       /* Connect the PSI to the CAT Bus */
-       outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT);
-       outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT);
-       outb(VOYAGER_CAT_RUN, CAT_CMD);
-       cat_disconnect(&psi, &psi_asic);
-       /* Read the status */
-       cat_subread(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG, 1, &data);
-       outb(VOYAGER_CAT_END, CAT_CMD);
-       CDEBUG(("PSI STATUS 0x%x\n", data));
-       /* These two writes are power off prep and perform */
-       data = PSI_CLEAR;
-       outb(VOYAGER_CAT_RUN, CAT_CMD);
-       cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG, 1, &data);
-       outb(VOYAGER_CAT_END, CAT_CMD);
-       data = PSI_POWER_DOWN;
-       outb(VOYAGER_CAT_RUN, CAT_CMD);
-       cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG, 1, &data);
-       outb(VOYAGER_CAT_END, CAT_CMD);
-}
-
-struct voyager_status voyager_status = { 0 };
-
-void voyager_cat_psi(__u8 cmd, __u16 reg, __u8 * data)
-{
-       voyager_module_t psi = { 0 };
-       voyager_asic_t psi_asic = { 0 };
-
-       psi.asic = &psi_asic;
-       psi.asic->asic_id = VOYAGER_CAT_ID;
-       psi.asic->subaddr = VOYAGER_SUBADDR_HI;
-       psi.module_addr = VOYAGER_PSI;
-       psi.scan_path_connected = 0;
-
-       outb(VOYAGER_CAT_END, CAT_CMD);
-       /* Connect the PSI to the CAT Bus */
-       outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT);
-       outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT);
-       outb(VOYAGER_CAT_RUN, CAT_CMD);
-       cat_disconnect(&psi, &psi_asic);
-       switch (cmd) {
-       case VOYAGER_PSI_READ:
-               cat_read(&psi, &psi_asic, reg, data);
-               break;
-       case VOYAGER_PSI_WRITE:
-               cat_write(&psi, &psi_asic, reg, *data);
-               break;
-       case VOYAGER_PSI_SUBREAD:
-               cat_subread(&psi, &psi_asic, reg, 1, data);
-               break;
-       case VOYAGER_PSI_SUBWRITE:
-               cat_subwrite(&psi, &psi_asic, reg, 1, data);
-               break;
-       default:
-               printk(KERN_ERR "Voyager PSI, unrecognised command %d\n", cmd);
-               break;
-       }
-       outb(VOYAGER_CAT_END, CAT_CMD);
-}
-
-void voyager_cat_do_common_interrupt(void)
-{
-       /* This is caused either by a memory parity error or something
-        * in the PSI */
-       __u8 data;
-       voyager_module_t psi = { 0 };
-       voyager_asic_t psi_asic = { 0 };
-       struct voyager_psi psi_reg;
-       int i;
-      re_read:
-       psi.asic = &psi_asic;
-       psi.asic->asic_id = VOYAGER_CAT_ID;
-       psi.asic->subaddr = VOYAGER_SUBADDR_HI;
-       psi.module_addr = VOYAGER_PSI;
-       psi.scan_path_connected = 0;
-
-       outb(VOYAGER_CAT_END, CAT_CMD);
-       /* Connect the PSI to the CAT Bus */
-       outb(VOYAGER_CAT_DESELECT, VOYAGER_CAT_CONFIG_PORT);
-       outb(VOYAGER_PSI, VOYAGER_CAT_CONFIG_PORT);
-       outb(VOYAGER_CAT_RUN, CAT_CMD);
-       cat_disconnect(&psi, &psi_asic);
-       /* Read the status.  NOTE: Need to read *all* the PSI regs here
-        * otherwise the cmn int will be reasserted */
-       for (i = 0; i < sizeof(psi_reg.regs); i++) {
-               cat_read(&psi, &psi_asic, i, &((__u8 *) & psi_reg.regs)[i]);
-       }
-       outb(VOYAGER_CAT_END, CAT_CMD);
-       if ((psi_reg.regs.checkbit & 0x02) == 0) {
-               psi_reg.regs.checkbit |= 0x02;
-               cat_write(&psi, &psi_asic, 5, psi_reg.regs.checkbit);
-               printk("VOYAGER RE-READ PSI\n");
-               goto re_read;
-       }
-       outb(VOYAGER_CAT_RUN, CAT_CMD);
-       for (i = 0; i < sizeof(psi_reg.subregs); i++) {
-               /* This looks strange, but the PSI doesn't do auto increment
-                * correctly */
-               cat_subread(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG + i,
-                           1, &((__u8 *) & psi_reg.subregs)[i]);
-       }
-       outb(VOYAGER_CAT_END, CAT_CMD);
-#ifdef VOYAGER_CAT_DEBUG
-       printk("VOYAGER PSI: ");
-       for (i = 0; i < sizeof(psi_reg.regs); i++)
-               printk("%02x ", ((__u8 *) & psi_reg.regs)[i]);
-       printk("\n           ");
-       for (i = 0; i < sizeof(psi_reg.subregs); i++)
-               printk("%02x ", ((__u8 *) & psi_reg.subregs)[i]);
-       printk("\n");
-#endif
-       if (psi_reg.regs.intstatus & PSI_MON) {
-               /* switch off or power fail */
-
-               if (psi_reg.subregs.supply & PSI_SWITCH_OFF) {
-                       if (voyager_status.switch_off) {
-                               printk(KERN_ERR
-                                      "Voyager front panel switch turned off again---Immediate power off!\n");
-                               voyager_cat_power_off();
-                               /* not reached */
-                       } else {
-                               printk(KERN_ERR
-                                      "Voyager front panel switch turned off\n");
-                               voyager_status.switch_off = 1;
-                               voyager_status.request_from_kernel = 1;
-                               wake_up_process(voyager_thread);
-                       }
-                       /* Tell the hardware we're taking care of the
-                        * shutdown, otherwise it will power the box off
-                        * within 3 seconds of the switch being pressed and,
-                        * which is much more important to us, continue to 
-                        * assert the common interrupt */
-                       data = PSI_CLR_SWITCH_OFF;
-                       outb(VOYAGER_CAT_RUN, CAT_CMD);
-                       cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_SUPPLY_REG,
-                                    1, &data);
-                       outb(VOYAGER_CAT_END, CAT_CMD);
-               } else {
-
-                       VDEBUG(("Voyager ac fail reg 0x%x\n",
-                               psi_reg.subregs.ACfail));
-                       if ((psi_reg.subregs.ACfail & AC_FAIL_STAT_CHANGE) == 0) {
-                               /* No further update */
-                               return;
-                       }
-#if 0
-                       /* Don't bother trying to find out who failed.
-                        * FIXME: This probably makes the code incorrect on
-                        * anything other than a 345x */
-                       for (i = 0; i < 5; i++) {
-                               if (psi_reg.subregs.ACfail & (1 << i)) {
-                                       break;
-                               }
-                       }
-                       printk(KERN_NOTICE "AC FAIL IN SUPPLY %d\n", i);
-#endif
-                       /* DON'T do this: it shuts down the AC PSI 
-                          outb(VOYAGER_CAT_RUN, CAT_CMD);
-                          data = PSI_MASK_MASK | i;
-                          cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_MASK,
-                          1, &data);
-                          outb(VOYAGER_CAT_END, CAT_CMD);
-                        */
-                       printk(KERN_ERR "Voyager AC power failure\n");
-                       outb(VOYAGER_CAT_RUN, CAT_CMD);
-                       data = PSI_COLD_START;
-                       cat_subwrite(&psi, &psi_asic, VOYAGER_PSI_GENERAL_REG,
-                                    1, &data);
-                       outb(VOYAGER_CAT_END, CAT_CMD);
-                       voyager_status.power_fail = 1;
-                       voyager_status.request_from_kernel = 1;
-                       wake_up_process(voyager_thread);
-               }
-
-       } else if (psi_reg.regs.intstatus & PSI_FAULT) {
-               /* Major fault! */
-               printk(KERN_ERR
-                      "Voyager PSI Detected major fault, immediate power off!\n");
-               voyager_cat_power_off();
-               /* not reached */
-       } else if (psi_reg.regs.intstatus & (PSI_DC_FAIL | PSI_ALARM
-                                            | PSI_CURRENT | PSI_DVM
-                                            | PSI_PSCFAULT | PSI_STAT_CHG)) {
-               /* other psi fault */
-
-               printk(KERN_WARNING "Voyager PSI status 0x%x\n", data);
-               /* clear the PSI fault */
-               outb(VOYAGER_CAT_RUN, CAT_CMD);
-               cat_write(&psi, &psi_asic, VOYAGER_PSI_STATUS_REG, 0);
-               outb(VOYAGER_CAT_END, CAT_CMD);
-       }
-}
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
deleted file mode 100644 (file)
index b9cc84a..0000000
+++ /dev/null
@@ -1,1807 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* Copyright (C) 1999,2001
- *
- * Author: J.E.J.Bottomley@HansenPartnership.com
- *
- * This file provides all the same external entries as smp.c but uses
- * the voyager hal to provide the functionality
- */
-#include <linux/cpu.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/kernel_stat.h>
-#include <linux/delay.h>
-#include <linux/mc146818rtc.h>
-#include <linux/cache.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/bootmem.h>
-#include <linux/completion.h>
-#include <asm/desc.h>
-#include <asm/voyager.h>
-#include <asm/vic.h>
-#include <asm/mtrr.h>
-#include <asm/pgalloc.h>
-#include <asm/tlbflush.h>
-#include <asm/arch_hooks.h>
-#include <asm/trampoline.h>
-
-/* TLB state -- visible externally, indexed physically */
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { &init_mm, 0 };
-
-/* CPU IRQ affinity -- set to all ones initially */
-static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned =
-       {[0 ... NR_CPUS-1]  = ~0UL };
-
-/* per CPU data structure (for /proc/cpuinfo et al), visible externally
- * indexed physically */
-DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
-EXPORT_PER_CPU_SYMBOL(cpu_info);
-
-/* physical ID of the CPU used to boot the system */
-unsigned char boot_cpu_id;
-
-/* The memory line addresses for the Quad CPIs */
-struct voyager_qic_cpi *voyager_quad_cpi_addr[NR_CPUS] __cacheline_aligned;
-
-/* The masks for the Extended VIC processors, filled in by cat_init */
-__u32 voyager_extended_vic_processors = 0;
-
-/* Masks for the extended Quad processors which cannot be VIC booted */
-__u32 voyager_allowed_boot_processors = 0;
-
-/* The mask for the Quad Processors (both extended and non-extended) */
-__u32 voyager_quad_processors = 0;
-
-/* Total count of live CPUs, used in process.c to display
- * the CPU information and in irq.c for the per CPU irq
- * activity count.  Finally exported by i386_ksyms.c */
-static int voyager_extended_cpus = 1;
-
-/* Used for the invalidate map that's also checked in the spinlock */
-static volatile unsigned long smp_invalidate_needed;
-
-/* Bitmask of CPUs present in the system - exported by i386_syms.c, used
- * by scheduler but indexed physically */
-static cpumask_t voyager_phys_cpu_present_map = CPU_MASK_NONE;
-
-/* The internal functions */
-static void send_CPI(__u32 cpuset, __u8 cpi);
-static void ack_CPI(__u8 cpi);
-static int ack_QIC_CPI(__u8 cpi);
-static void ack_special_QIC_CPI(__u8 cpi);
-static void ack_VIC_CPI(__u8 cpi);
-static void send_CPI_allbutself(__u8 cpi);
-static void mask_vic_irq(unsigned int irq);
-static void unmask_vic_irq(unsigned int irq);
-static unsigned int startup_vic_irq(unsigned int irq);
-static void enable_local_vic_irq(unsigned int irq);
-static void disable_local_vic_irq(unsigned int irq);
-static void before_handle_vic_irq(unsigned int irq);
-static void after_handle_vic_irq(unsigned int irq);
-static void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask);
-static void ack_vic_irq(unsigned int irq);
-static void vic_enable_cpi(void);
-static void do_boot_cpu(__u8 cpuid);
-static void do_quad_bootstrap(void);
-static void initialize_secondary(void);
-
-int hard_smp_processor_id(void);
-int safe_smp_processor_id(void);
-
-/* Inline functions */
-static inline void send_one_QIC_CPI(__u8 cpu, __u8 cpi)
-{
-       voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi =
-           (smp_processor_id() << 16) + cpi;
-}
-
-static inline void send_QIC_CPI(__u32 cpuset, __u8 cpi)
-{
-       int cpu;
-
-       for_each_online_cpu(cpu) {
-               if (cpuset & (1 << cpu)) {
-#ifdef VOYAGER_DEBUG
-                       if (!cpu_online(cpu))
-                               VDEBUG(("CPU%d sending cpi %d to CPU%d not in "
-                                       "cpu_online_map\n",
-                                       hard_smp_processor_id(), cpi, cpu));
-#endif
-                       send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
-               }
-       }
-}
-
-static inline void wrapper_smp_local_timer_interrupt(void)
-{
-       irq_enter();
-       smp_local_timer_interrupt();
-       irq_exit();
-}
-
-static inline void send_one_CPI(__u8 cpu, __u8 cpi)
-{
-       if (voyager_quad_processors & (1 << cpu))
-               send_one_QIC_CPI(cpu, cpi - QIC_CPI_OFFSET);
-       else
-               send_CPI(1 << cpu, cpi);
-}
-
-static inline void send_CPI_allbutself(__u8 cpi)
-{
-       __u8 cpu = smp_processor_id();
-       __u32 mask = cpus_addr(cpu_online_map)[0] & ~(1 << cpu);
-       send_CPI(mask, cpi);
-}
-
-static inline int is_cpu_quad(void)
-{
-       __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
-       return ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER);
-}
-
-static inline int is_cpu_extended(void)
-{
-       __u8 cpu = hard_smp_processor_id();
-
-       return (voyager_extended_vic_processors & (1 << cpu));
-}
-
-static inline int is_cpu_vic_boot(void)
-{
-       __u8 cpu = hard_smp_processor_id();
-
-       return (voyager_extended_vic_processors
-               & voyager_allowed_boot_processors & (1 << cpu));
-}
-
-static inline void ack_CPI(__u8 cpi)
-{
-       switch (cpi) {
-       case VIC_CPU_BOOT_CPI:
-               if (is_cpu_quad() && !is_cpu_vic_boot())
-                       ack_QIC_CPI(cpi);
-               else
-                       ack_VIC_CPI(cpi);
-               break;
-       case VIC_SYS_INT:
-       case VIC_CMN_INT:
-               /* These are slightly strange.  Even on the Quad card,
-                * They are vectored as VIC CPIs */
-               if (is_cpu_quad())
-                       ack_special_QIC_CPI(cpi);
-               else
-                       ack_VIC_CPI(cpi);
-               break;
-       default:
-               printk("VOYAGER ERROR: CPI%d is in common CPI code\n", cpi);
-               break;
-       }
-}
-
-/* local variables */
-
-/* The VIC IRQ descriptors -- these look almost identical to the
- * 8259 IRQs except that masks and things must be kept per processor
- */
-static struct irq_chip vic_chip = {
-       .name = "VIC",
-       .startup = startup_vic_irq,
-       .mask = mask_vic_irq,
-       .unmask = unmask_vic_irq,
-       .set_affinity = set_vic_irq_affinity,
-};
-
-/* used to count up as CPUs are brought on line (starts at 0) */
-static int cpucount = 0;
-
-/* The per cpu profile stuff - used in smp_local_timer_interrupt */
-static DEFINE_PER_CPU(int, prof_multiplier) = 1;
-static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
-static DEFINE_PER_CPU(int, prof_counter) = 1;
-
-/* the map used to check if a CPU has booted */
-static __u32 cpu_booted_map;
-
-/* the synchronize flag used to hold all secondary CPUs spinning in
- * a tight loop until the boot sequence is ready for them */
-static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
-
-/* This is for the new dynamic CPU boot code */
-
-/* The per processor IRQ masks (these are usually kept in sync) */
-static __u16 vic_irq_mask[NR_CPUS] __cacheline_aligned;
-
-/* the list of IRQs to be enabled by the VIC_ENABLE_IRQ_CPI */
-static __u16 vic_irq_enable_mask[NR_CPUS] __cacheline_aligned = { 0 };
-
-/* Lock for enable/disable of VIC interrupts */
-static __cacheline_aligned DEFINE_SPINLOCK(vic_irq_lock);
-
-/* The boot processor is correctly set up in PC mode when it
- * comes up, but the secondaries need their master/slave 8259
- * pairs initializing correctly */
-
-/* Interrupt counters (per cpu) and total - used to try to
- * even up the interrupt handling routines */
-static long vic_intr_total = 0;
-static long vic_intr_count[NR_CPUS] __cacheline_aligned = { 0 };
-static unsigned long vic_tick[NR_CPUS] __cacheline_aligned = { 0 };
-
-/* Since we can only use CPI0, we fake all the other CPIs */
-static unsigned long vic_cpi_mailbox[NR_CPUS] __cacheline_aligned;
-
-/* debugging routine to read the isr of the cpu's pic */
-static inline __u16 vic_read_isr(void)
-{
-       __u16 isr;
-
-       outb(0x0b, 0xa0);
-       isr = inb(0xa0) << 8;
-       outb(0x0b, 0x20);
-       isr |= inb(0x20);
-
-       return isr;
-}
-
-static __init void qic_setup(void)
-{
-       if (!is_cpu_quad()) {
-               /* not a quad, no setup */
-               return;
-       }
-       outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
-       outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
-
-       if (is_cpu_extended()) {
-               /* the QIC duplicate of the VIC base register */
-               outb(VIC_DEFAULT_CPI_BASE, QIC_VIC_CPI_BASE_REGISTER);
-               outb(QIC_DEFAULT_CPI_BASE, QIC_CPI_BASE_REGISTER);
-
-               /* FIXME: should set up the QIC timer and memory parity
-                * error vectors here */
-       }
-}
-
-static __init void vic_setup_pic(void)
-{
-       outb(1, VIC_REDIRECT_REGISTER_1);
-       /* clear the claim registers for dynamic routing */
-       outb(0, VIC_CLAIM_REGISTER_0);
-       outb(0, VIC_CLAIM_REGISTER_1);
-
-       outb(0, VIC_PRIORITY_REGISTER);
-       /* Set the Primary and Secondary Microchannel vector
-        * bases to be the same as the ordinary interrupts
-        *
-        * FIXME: This would be more efficient using separate
-        * vectors. */
-       outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
-       outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
-       /* Now initiallise the master PIC belonging to this CPU by
-        * sending the four ICWs */
-
-       /* ICW1: level triggered, ICW4 needed */
-       outb(0x19, 0x20);
-
-       /* ICW2: vector base */
-       outb(FIRST_EXTERNAL_VECTOR, 0x21);
-
-       /* ICW3: slave at line 2 */
-       outb(0x04, 0x21);
-
-       /* ICW4: 8086 mode */
-       outb(0x01, 0x21);
-
-       /* now the same for the slave PIC */
-
-       /* ICW1: level trigger, ICW4 needed */
-       outb(0x19, 0xA0);
-
-       /* ICW2: slave vector base */
-       outb(FIRST_EXTERNAL_VECTOR + 8, 0xA1);
-
-       /* ICW3: slave ID */
-       outb(0x02, 0xA1);
-
-       /* ICW4: 8086 mode */
-       outb(0x01, 0xA1);
-}
-
-static void do_quad_bootstrap(void)
-{
-       if (is_cpu_quad() && is_cpu_vic_boot()) {
-               int i;
-               unsigned long flags;
-               __u8 cpuid = hard_smp_processor_id();
-
-               local_irq_save(flags);
-
-               for (i = 0; i < 4; i++) {
-                       /* FIXME: this would be >>3 &0x7 on the 32 way */
-                       if (((cpuid >> 2) & 0x03) == i)
-                               /* don't lower our own mask! */
-                               continue;
-
-                       /* masquerade as local Quad CPU */
-                       outb(QIC_CPUID_ENABLE | i, QIC_PROCESSOR_ID);
-                       /* enable the startup CPI */
-                       outb(QIC_BOOT_CPI_MASK, QIC_MASK_REGISTER1);
-                       /* restore cpu id */
-                       outb(0, QIC_PROCESSOR_ID);
-               }
-               local_irq_restore(flags);
-       }
-}
-
-void prefill_possible_map(void)
-{
-       /* This is empty on voyager because we need a much
-        * earlier detection which is done in find_smp_config */
-}
-
-/* Set up all the basic stuff: read the SMP config and make all the
- * SMP information reflect only the boot cpu.  All others will be
- * brought on-line later. */
-void __init find_smp_config(void)
-{
-       int i;
-
-       boot_cpu_id = hard_smp_processor_id();
-
-       printk("VOYAGER SMP: Boot cpu is %d\n", boot_cpu_id);
-
-       /* initialize the CPU structures (moved from smp_boot_cpus) */
-       for (i = 0; i < nr_cpu_ids; i++)
-               cpu_irq_affinity[i] = ~0;
-       cpu_online_map = cpumask_of_cpu(boot_cpu_id);
-
-       /* The boot CPU must be extended */
-       voyager_extended_vic_processors = 1 << boot_cpu_id;
-       /* initially, all of the first 8 CPUs can boot */
-       voyager_allowed_boot_processors = 0xff;
-       /* set up everything for just this CPU, we can alter
-        * this as we start the other CPUs later */
-       /* now get the CPU disposition from the extended CMOS */
-       cpus_addr(voyager_phys_cpu_present_map)[0] =
-           voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK);
-       cpus_addr(voyager_phys_cpu_present_map)[0] |=
-           voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK + 1) << 8;
-       cpus_addr(voyager_phys_cpu_present_map)[0] |=
-           voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
-                                      2) << 16;
-       cpus_addr(voyager_phys_cpu_present_map)[0] |=
-           voyager_extended_cmos_read(VOYAGER_PROCESSOR_PRESENT_MASK +
-                                      3) << 24;
-       init_cpu_possible(&voyager_phys_cpu_present_map);
-       printk("VOYAGER SMP: voyager_phys_cpu_present_map = 0x%lx\n",
-              cpus_addr(voyager_phys_cpu_present_map)[0]);
-       /* Here we set up the VIC to enable SMP */
-       /* enable the CPIs by writing the base vector to their register */
-       outb(VIC_DEFAULT_CPI_BASE, VIC_CPI_BASE_REGISTER);
-       outb(1, VIC_REDIRECT_REGISTER_1);
-       /* set the claim registers for static routing --- Boot CPU gets
-        * all interrupts untill all other CPUs started */
-       outb(0xff, VIC_CLAIM_REGISTER_0);
-       outb(0xff, VIC_CLAIM_REGISTER_1);
-       /* Set the Primary and Secondary Microchannel vector
-        * bases to be the same as the ordinary interrupts
-        *
-        * FIXME: This would be more efficient using separate
-        * vectors. */
-       outb(FIRST_EXTERNAL_VECTOR, VIC_PRIMARY_MC_BASE);
-       outb(FIRST_EXTERNAL_VECTOR, VIC_SECONDARY_MC_BASE);
-
-       /* Finally tell the firmware that we're driving */
-       outb(inb(VOYAGER_SUS_IN_CONTROL_PORT) | VOYAGER_IN_CONTROL_FLAG,
-            VOYAGER_SUS_IN_CONTROL_PORT);
-
-       current_thread_info()->cpu = boot_cpu_id;
-       x86_write_percpu(cpu_number, boot_cpu_id);
-}
-
-/*
- *     The bootstrap kernel entry code has set these up. Save them
- *     for a given CPU, id is physical */
-void __init smp_store_cpu_info(int id)
-{
-       struct cpuinfo_x86 *c = &cpu_data(id);
-
-       *c = boot_cpu_data;
-       c->cpu_index = id;
-
-       identify_secondary_cpu(c);
-}
-
-/* Routine initially called when a non-boot CPU is brought online */
-static void __init start_secondary(void *unused)
-{
-       __u8 cpuid = hard_smp_processor_id();
-
-       cpu_init();
-
-       /* OK, we're in the routine */
-       ack_CPI(VIC_CPU_BOOT_CPI);
-
-       /* setup the 8259 master slave pair belonging to this CPU ---
-        * we won't actually receive any until the boot CPU
-        * relinquishes it's static routing mask */
-       vic_setup_pic();
-
-       qic_setup();
-
-       if (is_cpu_quad() && !is_cpu_vic_boot()) {
-               /* clear the boot CPI */
-               __u8 dummy;
-
-               dummy =
-                   voyager_quad_cpi_addr[cpuid]->qic_cpi[VIC_CPU_BOOT_CPI].cpi;
-               printk("read dummy %d\n", dummy);
-       }
-
-       /* lower the mask to receive CPIs */
-       vic_enable_cpi();
-
-       VDEBUG(("VOYAGER SMP: CPU%d, stack at about %p\n", cpuid, &cpuid));
-
-       notify_cpu_starting(cpuid);
-
-       /* enable interrupts */
-       local_irq_enable();
-
-       /* get our bogomips */
-       calibrate_delay();
-
-       /* save our processor parameters */
-       smp_store_cpu_info(cpuid);
-
-       /* if we're a quad, we may need to bootstrap other CPUs */
-       do_quad_bootstrap();
-
-       /* FIXME: this is rather a poor hack to prevent the CPU
-        * activating softirqs while it's supposed to be waiting for
-        * permission to proceed.  Without this, the new per CPU stuff
-        * in the softirqs will fail */
-       local_irq_disable();
-       cpu_set(cpuid, cpu_callin_map);
-
-       /* signal that we're done */
-       cpu_booted_map = 1;
-
-       while (!cpu_isset(cpuid, smp_commenced_mask))
-               rep_nop();
-       local_irq_enable();
-
-       local_flush_tlb();
-
-       cpu_set(cpuid, cpu_online_map);
-       wmb();
-       cpu_idle();
-}
-
-/* Routine to kick start the given CPU and wait for it to report ready
- * (or timeout in startup).  When this routine returns, the requested
- * CPU is either fully running and configured or known to be dead.
- *
- * We call this routine sequentially 1 CPU at a time, so no need for
- * locking */
-
-static void __init do_boot_cpu(__u8 cpu)
-{
-       struct task_struct *idle;
-       int timeout;
-       unsigned long flags;
-       int quad_boot = (1 << cpu) & voyager_quad_processors
-           & ~(voyager_extended_vic_processors
-               & voyager_allowed_boot_processors);
-
-       /* This is the format of the CPI IDT gate (in real mode) which
-        * we're hijacking to boot the CPU */
-       union IDTFormat {
-               struct seg {
-                       __u16 Offset;
-                       __u16 Segment;
-               } idt;
-               __u32 val;
-       } hijack_source;
-
-       __u32 *hijack_vector;
-       __u32 start_phys_address = setup_trampoline();
-
-       /* There's a clever trick to this: The linux trampoline is
-        * compiled to begin at absolute location zero, so make the
-        * address zero but have the data segment selector compensate
-        * for the actual address */
-       hijack_source.idt.Offset = start_phys_address & 0x000F;
-       hijack_source.idt.Segment = (start_phys_address >> 4) & 0xFFFF;
-
-       cpucount++;
-       alternatives_smp_switch(1);
-
-       idle = fork_idle(cpu);
-       if (IS_ERR(idle))
-               panic("failed fork for CPU%d", cpu);
-       idle->thread.ip = (unsigned long)start_secondary;
-       /* init_tasks (in sched.c) is indexed logically */
-       stack_start.sp = (void *)idle->thread.sp;
-
-       init_gdt(cpu);
-       per_cpu(current_task, cpu) = idle;
-       early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
-       irq_ctx_init(cpu);
-
-       /* Note: Don't modify initial ss override */
-       VDEBUG(("VOYAGER SMP: Booting CPU%d at 0x%lx[%x:%x], stack %p\n", cpu,
-               (unsigned long)hijack_source.val, hijack_source.idt.Segment,
-               hijack_source.idt.Offset, stack_start.sp));
-
-       /* init lowmem identity mapping */
-       clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
-                       min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
-       flush_tlb_all();
-
-       if (quad_boot) {
-               printk("CPU %d: non extended Quad boot\n", cpu);
-               hijack_vector =
-                   (__u32 *)
-                   phys_to_virt((VIC_CPU_BOOT_CPI + QIC_DEFAULT_CPI_BASE) * 4);
-               *hijack_vector = hijack_source.val;
-       } else {
-               printk("CPU%d: extended VIC boot\n", cpu);
-               hijack_vector =
-                   (__u32 *)
-                   phys_to_virt((VIC_CPU_BOOT_CPI + VIC_DEFAULT_CPI_BASE) * 4);
-               *hijack_vector = hijack_source.val;
-               /* VIC errata, may also receive interrupt at this address */
-               hijack_vector =
-                   (__u32 *)
-                   phys_to_virt((VIC_CPU_BOOT_ERRATA_CPI +
-                                 VIC_DEFAULT_CPI_BASE) * 4);
-               *hijack_vector = hijack_source.val;
-       }
-       /* All non-boot CPUs start with interrupts fully masked.  Need
-        * to lower the mask of the CPI we're about to send.  We do
-        * this in the VIC by masquerading as the processor we're
-        * about to boot and lowering its interrupt mask */
-       local_irq_save(flags);
-       if (quad_boot) {
-               send_one_QIC_CPI(cpu, VIC_CPU_BOOT_CPI);
-       } else {
-               outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
-               /* here we're altering registers belonging to `cpu' */
-
-               outb(VIC_BOOT_INTERRUPT_MASK, 0x21);
-               /* now go back to our original identity */
-               outb(boot_cpu_id, VIC_PROCESSOR_ID);
-
-               /* and boot the CPU */
-
-               send_CPI((1 << cpu), VIC_CPU_BOOT_CPI);
-       }
-       cpu_booted_map = 0;
-       local_irq_restore(flags);
-
-       /* now wait for it to become ready (or timeout) */
-       for (timeout = 0; timeout < 50000; timeout++) {
-               if (cpu_booted_map)
-                       break;
-               udelay(100);
-       }
-       /* reset the page table */
-       zap_low_mappings();
-
-       if (cpu_booted_map) {
-               VDEBUG(("CPU%d: Booted successfully, back in CPU %d\n",
-                       cpu, smp_processor_id()));
-
-               printk("CPU%d: ", cpu);
-               print_cpu_info(&cpu_data(cpu));
-               wmb();
-               cpu_set(cpu, cpu_callout_map);
-               cpu_set(cpu, cpu_present_map);
-       } else {
-               printk("CPU%d FAILED TO BOOT: ", cpu);
-               if (*
-                   ((volatile unsigned char *)phys_to_virt(start_phys_address))
-                   == 0xA5)
-                       printk("Stuck.\n");
-               else
-                       printk("Not responding.\n");
-
-               cpucount--;
-       }
-}
-
-void __init smp_boot_cpus(void)
-{
-       int i;
-
-       /* CAT BUS initialisation must be done after the memory */
-       /* FIXME: The L4 has a catbus too, it just needs to be
-        * accessed in a totally different way */
-       if (voyager_level == 5) {
-               voyager_cat_init();
-
-               /* now that the cat has probed the Voyager System Bus, sanity
-                * check the cpu map */
-               if (((voyager_quad_processors | voyager_extended_vic_processors)
-                    & cpus_addr(voyager_phys_cpu_present_map)[0]) !=
-                   cpus_addr(voyager_phys_cpu_present_map)[0]) {
-                       /* should panic */
-                       printk("\n\n***WARNING*** "
-                              "Sanity check of CPU present map FAILED\n");
-               }
-       } else if (voyager_level == 4)
-               voyager_extended_vic_processors =
-                   cpus_addr(voyager_phys_cpu_present_map)[0];
-
-       /* this sets up the idle task to run on the current cpu */
-       voyager_extended_cpus = 1;
-       /* Remove the global_irq_holder setting, it triggers a BUG() on
-        * schedule at the moment */
-       //global_irq_holder = boot_cpu_id;
-
-       /* FIXME: Need to do something about this but currently only works
-        * on CPUs with a tsc which none of mine have.
-        smp_tune_scheduling();
-        */
-       smp_store_cpu_info(boot_cpu_id);
-       /* setup the jump vector */
-       initial_code = (unsigned long)initialize_secondary;
-       printk("CPU%d: ", boot_cpu_id);
-       print_cpu_info(&cpu_data(boot_cpu_id));
-
-       if (is_cpu_quad()) {
-               /* booting on a Quad CPU */
-               printk("VOYAGER SMP: Boot CPU is Quad\n");
-               qic_setup();
-               do_quad_bootstrap();
-       }
-
-       /* enable our own CPIs */
-       vic_enable_cpi();
-
-       cpu_set(boot_cpu_id, cpu_online_map);
-       cpu_set(boot_cpu_id, cpu_callout_map);
-
-       /* loop over all the extended VIC CPUs and boot them.  The
-        * Quad CPUs must be bootstrapped by their extended VIC cpu */
-       for (i = 0; i < nr_cpu_ids; i++) {
-               if (i == boot_cpu_id || !cpu_isset(i, voyager_phys_cpu_present_map))
-                       continue;
-               do_boot_cpu(i);
-               /* This udelay seems to be needed for the Quad boots
-                * don't remove unless you know what you're doing */
-               udelay(1000);
-       }
-       /* we could compute the total bogomips here, but why bother?,
-        * Code added from smpboot.c */
-       {
-               unsigned long bogosum = 0;
-
-               for_each_online_cpu(i)
-                       bogosum += cpu_data(i).loops_per_jiffy;
-               printk(KERN_INFO "Total of %d processors activated "
-                      "(%lu.%02lu BogoMIPS).\n",
-                      cpucount + 1, bogosum / (500000 / HZ),
-                      (bogosum / (5000 / HZ)) % 100);
-       }
-       voyager_extended_cpus = hweight32(voyager_extended_vic_processors);
-       printk("VOYAGER: Extended (interrupt handling CPUs): "
-              "%d, non-extended: %d\n", voyager_extended_cpus,
-              num_booting_cpus() - voyager_extended_cpus);
-       /* that's it, switch to symmetric mode */
-       outb(0, VIC_PRIORITY_REGISTER);
-       outb(0, VIC_CLAIM_REGISTER_0);
-       outb(0, VIC_CLAIM_REGISTER_1);
-
-       VDEBUG(("VOYAGER SMP: Booted with %d CPUs\n", num_booting_cpus()));
-}
-
-/* Reload the secondary CPUs task structure (this function does not
- * return ) */
-static void __init initialize_secondary(void)
-{
-#if 0
-       // AC kernels only
-       set_current(hard_get_current());
-#endif
-
-       /*
-        * We don't actually need to load the full TSS,
-        * basically just the stack pointer and the eip.
-        */
-
-       asm volatile ("movl %0,%%esp\n\t"
-                     "jmp *%1"::"r" (current->thread.sp),
-                     "r"(current->thread.ip));
-}
-
-/* handle a Voyager SYS_INT -- If we don't, the base board will
- * panic the system.
- *
- * System interrupts occur because some problem was detected on the
- * various busses.  To find out what you have to probe all the
- * hardware via the CAT bus.  FIXME: At the moment we do nothing. */
-void smp_vic_sys_interrupt(struct pt_regs *regs)
-{
-       ack_CPI(VIC_SYS_INT);
-       printk("Voyager SYSTEM INTERRUPT\n");
-}
-
-/* Handle a voyager CMN_INT; These interrupts occur either because of
- * a system status change or because a single bit memory error
- * occurred.  FIXME: At the moment, ignore all this. */
-void smp_vic_cmn_interrupt(struct pt_regs *regs)
-{
-       static __u8 in_cmn_int = 0;
-       static DEFINE_SPINLOCK(cmn_int_lock);
-
-       /* common ints are broadcast, so make sure we only do this once */
-       _raw_spin_lock(&cmn_int_lock);
-       if (in_cmn_int)
-               goto unlock_end;
-
-       in_cmn_int++;
-       _raw_spin_unlock(&cmn_int_lock);
-
-       VDEBUG(("Voyager COMMON INTERRUPT\n"));
-
-       if (voyager_level == 5)
-               voyager_cat_do_common_interrupt();
-
-       _raw_spin_lock(&cmn_int_lock);
-       in_cmn_int = 0;
-      unlock_end:
-       _raw_spin_unlock(&cmn_int_lock);
-       ack_CPI(VIC_CMN_INT);
-}
-
-/*
- * Reschedule call back. Nothing to do, all the work is done
- * automatically when we return from the interrupt.  */
-static void smp_reschedule_interrupt(void)
-{
-       /* do nothing */
-}
-
-static struct mm_struct *flush_mm;
-static unsigned long flush_va;
-static DEFINE_SPINLOCK(tlbstate_lock);
-
-/*
- * We cannot call mmdrop() because we are in interrupt context,
- * instead update mm->cpu_vm_mask.
- *
- * We need to reload %cr3 since the page tables may be going
- * away from under us..
- */
-static inline void voyager_leave_mm(unsigned long cpu)
-{
-       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
-               BUG();
-       cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
-       load_cr3(swapper_pg_dir);
-}
-
-/*
- * Invalidate call-back
- */
-static void smp_invalidate_interrupt(void)
-{
-       __u8 cpu = smp_processor_id();
-
-       if (!test_bit(cpu, &smp_invalidate_needed))
-               return;
-       /* This will flood messages.  Don't uncomment unless you see
-        * Problems with cross cpu invalidation
-        VDEBUG(("VOYAGER SMP: CPU%d received INVALIDATE_CPI\n",
-        smp_processor_id()));
-        */
-
-       if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
-               if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
-                       if (flush_va == TLB_FLUSH_ALL)
-                               local_flush_tlb();
-                       else
-                               __flush_tlb_one(flush_va);
-               } else
-                       voyager_leave_mm(cpu);
-       }
-       smp_mb__before_clear_bit();
-       clear_bit(cpu, &smp_invalidate_needed);
-       smp_mb__after_clear_bit();
-}
-
-/* All the new flush operations for 2.4 */
-
-/* This routine is called with a physical cpu mask */
-static void
-voyager_flush_tlb_others(unsigned long cpumask, struct mm_struct *mm,
-                        unsigned long va)
-{
-       int stuck = 50000;
-
-       if (!cpumask)
-               BUG();
-       if ((cpumask & cpus_addr(cpu_online_map)[0]) != cpumask)
-               BUG();
-       if (cpumask & (1 << smp_processor_id()))
-               BUG();
-       if (!mm)
-               BUG();
-
-       spin_lock(&tlbstate_lock);
-
-       flush_mm = mm;
-       flush_va = va;
-       atomic_set_mask(cpumask, &smp_invalidate_needed);
-       /*
-        * We have to send the CPI only to
-        * CPUs affected.
-        */
-       send_CPI(cpumask, VIC_INVALIDATE_CPI);
-
-       while (smp_invalidate_needed) {
-               mb();
-               if (--stuck == 0) {
-                       printk("***WARNING*** Stuck doing invalidate CPI "
-                              "(CPU%d)\n", smp_processor_id());
-                       break;
-               }
-       }
-
-       /* Uncomment only to debug invalidation problems
-          VDEBUG(("VOYAGER SMP: Completed invalidate CPI (CPU%d)\n", cpu));
-        */
-
-       flush_mm = NULL;
-       flush_va = 0;
-       spin_unlock(&tlbstate_lock);
-}
-
-void flush_tlb_current_task(void)
-{
-       struct mm_struct *mm = current->mm;
-       unsigned long cpu_mask;
-
-       preempt_disable();
-
-       cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
-       local_flush_tlb();
-       if (cpu_mask)
-               voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-
-       preempt_enable();
-}
-
-void flush_tlb_mm(struct mm_struct *mm)
-{
-       unsigned long cpu_mask;
-
-       preempt_disable();
-
-       cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
-
-       if (current->active_mm == mm) {
-               if (current->mm)
-                       local_flush_tlb();
-               else
-                       voyager_leave_mm(smp_processor_id());
-       }
-       if (cpu_mask)
-               voyager_flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
-
-       preempt_enable();
-}
-
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long cpu_mask;
-
-       preempt_disable();
-
-       cpu_mask = cpus_addr(mm->cpu_vm_mask)[0] & ~(1 << smp_processor_id());
-       if (current->active_mm == mm) {
-               if (current->mm)
-                       __flush_tlb_one(va);
-               else
-                       voyager_leave_mm(smp_processor_id());
-       }
-
-       if (cpu_mask)
-               voyager_flush_tlb_others(cpu_mask, mm, va);
-
-       preempt_enable();
-}
-
-EXPORT_SYMBOL(flush_tlb_page);
-
-/* enable the requested IRQs */
-static void smp_enable_irq_interrupt(void)
-{
-       __u8 irq;
-       __u8 cpu = get_cpu();
-
-       VDEBUG(("VOYAGER SMP: CPU%d enabling irq mask 0x%x\n", cpu,
-               vic_irq_enable_mask[cpu]));
-
-       spin_lock(&vic_irq_lock);
-       for (irq = 0; irq < 16; irq++) {
-               if (vic_irq_enable_mask[cpu] & (1 << irq))
-                       enable_local_vic_irq(irq);
-       }
-       vic_irq_enable_mask[cpu] = 0;
-       spin_unlock(&vic_irq_lock);
-
-       put_cpu_no_resched();
-}
-
-/*
- *     CPU halt call-back
- */
-static void smp_stop_cpu_function(void *dummy)
-{
-       VDEBUG(("VOYAGER SMP: CPU%d is STOPPING\n", smp_processor_id()));
-       cpu_clear(smp_processor_id(), cpu_online_map);
-       local_irq_disable();
-       for (;;)
-               halt();
-}
-
-/* execute a thread on a new CPU.  The function to be called must be
- * previously set up.  This is used to schedule a function for
- * execution on all CPUs - set up the function then broadcast a
- * function_interrupt CPI to come here on each CPU */
-static void smp_call_function_interrupt(void)
-{
-       irq_enter();
-       generic_smp_call_function_interrupt();
-       __get_cpu_var(irq_stat).irq_call_count++;
-       irq_exit();
-}
-
-static void smp_call_function_single_interrupt(void)
-{
-       irq_enter();
-       generic_smp_call_function_single_interrupt();
-       __get_cpu_var(irq_stat).irq_call_count++;
-       irq_exit();
-}
-
-/* Sorry about the name.  In an APIC based system, the APICs
- * themselves are programmed to send a timer interrupt.  This is used
- * by linux to reschedule the processor.  Voyager doesn't have this,
- * so we use the system clock to interrupt one processor, which in
- * turn, broadcasts a timer CPI to all the others --- we receive that
- * CPI here.  We don't use this actually for counting so losing
- * ticks doesn't matter
- *
- * FIXME: For those CPUs which actually have a local APIC, we could
- * try to use it to trigger this interrupt instead of having to
- * broadcast the timer tick.  Unfortunately, all my pentium DYADs have
- * no local APIC, so I can't do this
- *
- * This function is currently a placeholder and is unused in the code */
-void smp_apic_timer_interrupt(struct pt_regs *regs)
-{
-       struct pt_regs *old_regs = set_irq_regs(regs);
-       wrapper_smp_local_timer_interrupt();
-       set_irq_regs(old_regs);
-}
-
-/* All of the QUAD interrupt GATES */
-void smp_qic_timer_interrupt(struct pt_regs *regs)
-{
-       struct pt_regs *old_regs = set_irq_regs(regs);
-       ack_QIC_CPI(QIC_TIMER_CPI);
-       wrapper_smp_local_timer_interrupt();
-       set_irq_regs(old_regs);
-}
-
-void smp_qic_invalidate_interrupt(struct pt_regs *regs)
-{
-       ack_QIC_CPI(QIC_INVALIDATE_CPI);
-       smp_invalidate_interrupt();
-}
-
-void smp_qic_reschedule_interrupt(struct pt_regs *regs)
-{
-       ack_QIC_CPI(QIC_RESCHEDULE_CPI);
-       smp_reschedule_interrupt();
-}
-
-void smp_qic_enable_irq_interrupt(struct pt_regs *regs)
-{
-       ack_QIC_CPI(QIC_ENABLE_IRQ_CPI);
-       smp_enable_irq_interrupt();
-}
-
-void smp_qic_call_function_interrupt(struct pt_regs *regs)
-{
-       ack_QIC_CPI(QIC_CALL_FUNCTION_CPI);
-       smp_call_function_interrupt();
-}
-
-void smp_qic_call_function_single_interrupt(struct pt_regs *regs)
-{
-       ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI);
-       smp_call_function_single_interrupt();
-}
-
-void smp_vic_cpi_interrupt(struct pt_regs *regs)
-{
-       struct pt_regs *old_regs = set_irq_regs(regs);
-       __u8 cpu = smp_processor_id();
-
-       if (is_cpu_quad())
-               ack_QIC_CPI(VIC_CPI_LEVEL0);
-       else
-               ack_VIC_CPI(VIC_CPI_LEVEL0);
-
-       if (test_and_clear_bit(VIC_TIMER_CPI, &vic_cpi_mailbox[cpu]))
-               wrapper_smp_local_timer_interrupt();
-       if (test_and_clear_bit(VIC_INVALIDATE_CPI, &vic_cpi_mailbox[cpu]))
-               smp_invalidate_interrupt();
-       if (test_and_clear_bit(VIC_RESCHEDULE_CPI, &vic_cpi_mailbox[cpu]))
-               smp_reschedule_interrupt();
-       if (test_and_clear_bit(VIC_ENABLE_IRQ_CPI, &vic_cpi_mailbox[cpu]))
-               smp_enable_irq_interrupt();
-       if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu]))
-               smp_call_function_interrupt();
-       if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu]))
-               smp_call_function_single_interrupt();
-       set_irq_regs(old_regs);
-}
-
-static void do_flush_tlb_all(void *info)
-{
-       unsigned long cpu = smp_processor_id();
-
-       __flush_tlb_all();
-       if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
-               voyager_leave_mm(cpu);
-}
-
-/* flush the TLB of every active CPU in the system */
-void flush_tlb_all(void)
-{
-       on_each_cpu(do_flush_tlb_all, 0, 1);
-}
-
-/* send a reschedule CPI to one CPU by physical CPU number*/
-static void voyager_smp_send_reschedule(int cpu)
-{
-       send_one_CPI(cpu, VIC_RESCHEDULE_CPI);
-}
-
-int hard_smp_processor_id(void)
-{
-       __u8 i;
-       __u8 cpumask = inb(VIC_PROC_WHO_AM_I);
-       if ((cpumask & QUAD_IDENTIFIER) == QUAD_IDENTIFIER)
-               return cpumask & 0x1F;
-
-       for (i = 0; i < 8; i++) {
-               if (cpumask & (1 << i))
-                       return i;
-       }
-       printk("** WARNING ** Illegal cpuid returned by VIC: %d", cpumask);
-       return 0;
-}
-
-int safe_smp_processor_id(void)
-{
-       return hard_smp_processor_id();
-}
-
-/* broadcast a halt to all other CPUs */
-static void voyager_smp_send_stop(void)
-{
-       smp_call_function(smp_stop_cpu_function, NULL, 1);
-}
-
-/* this function is triggered in time.c when a clock tick fires
- * we need to re-broadcast the tick to all CPUs */
-void smp_vic_timer_interrupt(void)
-{
-       send_CPI_allbutself(VIC_TIMER_CPI);
-       smp_local_timer_interrupt();
-}
-
-/* local (per CPU) timer interrupt.  It does both profiling and
- * process statistics/rescheduling.
- *
- * We do profiling in every local tick, statistics/rescheduling
- * happen only every 'profiling multiplier' ticks. The default
- * multiplier is 1 and it can be changed by writing the new multiplier
- * value into /proc/profile.
- */
-void smp_local_timer_interrupt(void)
-{
-       int cpu = smp_processor_id();
-       long weight;
-
-       profile_tick(CPU_PROFILING);
-       if (--per_cpu(prof_counter, cpu) <= 0) {
-               /*
-                * The multiplier may have changed since the last time we got
-                * to this point as a result of the user writing to
-                * /proc/profile. In this case we need to adjust the APIC
-                * timer accordingly.
-                *
-                * Interrupts are already masked off at this point.
-                */
-               per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
-               if (per_cpu(prof_counter, cpu) !=
-                   per_cpu(prof_old_multiplier, cpu)) {
-                       /* FIXME: need to update the vic timer tick here */
-                       per_cpu(prof_old_multiplier, cpu) =
-                           per_cpu(prof_counter, cpu);
-               }
-
-               update_process_times(user_mode_vm(get_irq_regs()));
-       }
-
-       if (((1 << cpu) & voyager_extended_vic_processors) == 0)
-               /* only extended VIC processors participate in
-                * interrupt distribution */
-               return;
-
-       /*
-        * We take the 'long' return path, and there every subsystem
-        * grabs the appropriate locks (kernel lock/ irq lock).
-        *
-        * we might want to decouple profiling from the 'long path',
-        * and do the profiling totally in assembly.
-        *
-        * Currently this isn't too much of an issue (performance wise),
-        * we can take more than 100K local irqs per second on a 100 MHz P5.
-        */
-
-       if ((++vic_tick[cpu] & 0x7) != 0)
-               return;
-       /* get here every 16 ticks (about every 1/6 of a second) */
-
-       /* Change our priority to give someone else a chance at getting
-        * the IRQ. The algorithm goes like this:
-        *
-        * In the VIC, the dynamically routed interrupt is always
-        * handled by the lowest priority eligible (i.e. receiving
-        * interrupts) CPU.  If >1 eligible CPUs are equal lowest, the
-        * lowest processor number gets it.
-        *
-        * The priority of a CPU is controlled by a special per-CPU
-        * VIC priority register which is 3 bits wide 0 being lowest
-        * and 7 highest priority..
-        *
-        * Therefore we subtract the average number of interrupts from
-        * the number we've fielded.  If this number is negative, we
-        * lower the activity count and if it is positive, we raise
-        * it.
-        *
-        * I'm afraid this still leads to odd looking interrupt counts:
-        * the totals are all roughly equal, but the individual ones
-        * look rather skewed.
-        *
-        * FIXME: This algorithm is total crap when mixed with SMP
-        * affinity code since we now try to even up the interrupt
-        * counts when an affinity binding is keeping them on a
-        * particular CPU*/
-       weight = (vic_intr_count[cpu] * voyager_extended_cpus
-                 - vic_intr_total) >> 4;
-       weight += 4;
-       if (weight > 7)
-               weight = 7;
-       if (weight < 0)
-               weight = 0;
-
-       outb((__u8) weight, VIC_PRIORITY_REGISTER);
-
-#ifdef VOYAGER_DEBUG
-       if ((vic_tick[cpu] & 0xFFF) == 0) {
-               /* print this message roughly every 25 secs */
-               printk("VOYAGER SMP: vic_tick[%d] = %lu, weight = %ld\n",
-                      cpu, vic_tick[cpu], weight);
-       }
-#endif
-}
-
-/* setup the profiling timer */
-int setup_profiling_timer(unsigned int multiplier)
-{
-       int i;
-
-       if ((!multiplier))
-               return -EINVAL;
-
-       /*
-        * Set the new multiplier for each CPU. CPUs don't start using the
-        * new values until the next timer interrupt in which they do process
-        * accounting.
-        */
-       for (i = 0; i < nr_cpu_ids; ++i)
-               per_cpu(prof_multiplier, i) = multiplier;
-
-       return 0;
-}
-
-/* This is a bit of a mess, but forced on us by the genirq changes
- * there's no genirq handler that really does what voyager wants
- * so hack it up with the simple IRQ handler */
-static void handle_vic_irq(unsigned int irq, struct irq_desc *desc)
-{
-       before_handle_vic_irq(irq);
-       handle_simple_irq(irq, desc);
-       after_handle_vic_irq(irq);
-}
-
-/*  The CPIs are handled in the per cpu 8259s, so they must be
- *  enabled to be received: FIX: enabling the CPIs in the early
- *  boot sequence interferes with bug checking; enable them later
- *  on in smp_init */
-#define VIC_SET_GATE(cpi, vector) \
-       set_intr_gate((cpi) + VIC_DEFAULT_CPI_BASE, (vector))
-#define QIC_SET_GATE(cpi, vector) \
-       set_intr_gate((cpi) + QIC_DEFAULT_CPI_BASE, (vector))
-
-void __init voyager_smp_intr_init(void)
-{
-       int i;
-
-       /* initialize the per cpu irq mask to all disabled */
-       for (i = 0; i < nr_cpu_ids; i++)
-               vic_irq_mask[i] = 0xFFFF;
-
-       VIC_SET_GATE(VIC_CPI_LEVEL0, vic_cpi_interrupt);
-
-       VIC_SET_GATE(VIC_SYS_INT, vic_sys_interrupt);
-       VIC_SET_GATE(VIC_CMN_INT, vic_cmn_interrupt);
-
-       QIC_SET_GATE(QIC_TIMER_CPI, qic_timer_interrupt);
-       QIC_SET_GATE(QIC_INVALIDATE_CPI, qic_invalidate_interrupt);
-       QIC_SET_GATE(QIC_RESCHEDULE_CPI, qic_reschedule_interrupt);
-       QIC_SET_GATE(QIC_ENABLE_IRQ_CPI, qic_enable_irq_interrupt);
-       QIC_SET_GATE(QIC_CALL_FUNCTION_CPI, qic_call_function_interrupt);
-
-       /* now put the VIC descriptor into the first 48 IRQs
-        *
-        * This is for later: first 16 correspond to PC IRQs; next 16
-        * are Primary MC IRQs and final 16 are Secondary MC IRQs */
-       for (i = 0; i < 48; i++)
-               set_irq_chip_and_handler(i, &vic_chip, handle_vic_irq);
-}
-
-/* send a CPI at level cpi to a set of cpus in cpuset (set 1 bit per
- * processor to receive CPI */
-static void send_CPI(__u32 cpuset, __u8 cpi)
-{
-       int cpu;
-       __u32 quad_cpuset = (cpuset & voyager_quad_processors);
-
-       if (cpi < VIC_START_FAKE_CPI) {
-               /* fake CPI are only used for booting, so send to the
-                * extended quads as well---Quads must be VIC booted */
-               outb((__u8) (cpuset), VIC_CPI_Registers[cpi]);
-               return;
-       }
-       if (quad_cpuset)
-               send_QIC_CPI(quad_cpuset, cpi);
-       cpuset &= ~quad_cpuset;
-       cpuset &= 0xff;         /* only first 8 CPUs vaild for VIC CPI */
-       if (cpuset == 0)
-               return;
-       for_each_online_cpu(cpu) {
-               if (cpuset & (1 << cpu))
-                       set_bit(cpi, &vic_cpi_mailbox[cpu]);
-       }
-       if (cpuset)
-               outb((__u8) cpuset, VIC_CPI_Registers[VIC_CPI_LEVEL0]);
-}
-
-/* Acknowledge receipt of CPI in the QIC, clear in QIC hardware and
- * set the cache line to shared by reading it.
- *
- * DON'T make this inline otherwise the cache line read will be
- * optimised away
- * */
-static int ack_QIC_CPI(__u8 cpi)
-{
-       __u8 cpu = hard_smp_processor_id();
-
-       cpi &= 7;
-
-       outb(1 << cpi, QIC_INTERRUPT_CLEAR1);
-       return voyager_quad_cpi_addr[cpu]->qic_cpi[cpi].cpi;
-}
-
-static void ack_special_QIC_CPI(__u8 cpi)
-{
-       switch (cpi) {
-       case VIC_CMN_INT:
-               outb(QIC_CMN_INT, QIC_INTERRUPT_CLEAR0);
-               break;
-       case VIC_SYS_INT:
-               outb(QIC_SYS_INT, QIC_INTERRUPT_CLEAR0);
-               break;
-       }
-       /* also clear at the VIC, just in case (nop for non-extended proc) */
-       ack_VIC_CPI(cpi);
-}
-
-/* Acknowledge receipt of CPI in the VIC (essentially an EOI) */
-static void ack_VIC_CPI(__u8 cpi)
-{
-#ifdef VOYAGER_DEBUG
-       unsigned long flags;
-       __u16 isr;
-       __u8 cpu = smp_processor_id();
-
-       local_irq_save(flags);
-       isr = vic_read_isr();
-       if ((isr & (1 << (cpi & 7))) == 0) {
-               printk("VOYAGER SMP: CPU%d lost CPI%d\n", cpu, cpi);
-       }
-#endif
-       /* send specific EOI; the two system interrupts have
-        * bit 4 set for a separate vector but behave as the
-        * corresponding 3 bit intr */
-       outb_p(0x60 | (cpi & 7), 0x20);
-
-#ifdef VOYAGER_DEBUG
-       if ((vic_read_isr() & (1 << (cpi & 7))) != 0) {
-               printk("VOYAGER SMP: CPU%d still asserting CPI%d\n", cpu, cpi);
-       }
-       local_irq_restore(flags);
-#endif
-}
-
-/* cribbed with thanks from irq.c */
-#define __byte(x,y)    (((unsigned char *)&(y))[x])
-#define cached_21(cpu) (__byte(0,vic_irq_mask[cpu]))
-#define cached_A1(cpu) (__byte(1,vic_irq_mask[cpu]))
-
-static unsigned int startup_vic_irq(unsigned int irq)
-{
-       unmask_vic_irq(irq);
-
-       return 0;
-}
-
-/* The enable and disable routines.  This is where we run into
- * conflicting architectural philosophy.  Fundamentally, the voyager
- * architecture does not expect to have to disable interrupts globally
- * (the IRQ controllers belong to each CPU).  The processor masquerade
- * which is used to start the system shouldn't be used in a running OS
- * since it will cause great confusion if two separate CPUs drive to
- * the same IRQ controller (I know, I've tried it).
- *
- * The solution is a variant on the NCR lazy SPL design:
- *
- * 1) To disable an interrupt, do nothing (other than set the
- *    IRQ_DISABLED flag).  This dares the interrupt actually to arrive.
- *
- * 2) If the interrupt dares to come in, raise the local mask against
- *    it (this will result in all the CPU masks being raised
- *    eventually).
- *
- * 3) To enable the interrupt, lower the mask on the local CPU and
- *    broadcast an Interrupt enable CPI which causes all other CPUs to
- *    adjust their masks accordingly.  */
-
-static void unmask_vic_irq(unsigned int irq)
-{
-       /* linux doesn't to processor-irq affinity, so enable on
-        * all CPUs we know about */
-       int cpu = smp_processor_id(), real_cpu;
-       __u16 mask = (1 << irq);
-       __u32 processorList = 0;
-       unsigned long flags;
-
-       VDEBUG(("VOYAGER: unmask_vic_irq(%d) CPU%d affinity 0x%lx\n",
-               irq, cpu, cpu_irq_affinity[cpu]));
-       spin_lock_irqsave(&vic_irq_lock, flags);
-       for_each_online_cpu(real_cpu) {
-               if (!(voyager_extended_vic_processors & (1 << real_cpu)))
-                       continue;
-               if (!(cpu_irq_affinity[real_cpu] & mask)) {
-                       /* irq has no affinity for this CPU, ignore */
-                       continue;
-               }
-               if (real_cpu == cpu) {
-                       enable_local_vic_irq(irq);
-               } else if (vic_irq_mask[real_cpu] & mask) {
-                       vic_irq_enable_mask[real_cpu] |= mask;
-                       processorList |= (1 << real_cpu);
-               }
-       }
-       spin_unlock_irqrestore(&vic_irq_lock, flags);
-       if (processorList)
-               send_CPI(processorList, VIC_ENABLE_IRQ_CPI);
-}
-
-static void mask_vic_irq(unsigned int irq)
-{
-       /* lazy disable, do nothing */
-}
-
-static void enable_local_vic_irq(unsigned int irq)
-{
-       __u8 cpu = smp_processor_id();
-       __u16 mask = ~(1 << irq);
-       __u16 old_mask = vic_irq_mask[cpu];
-
-       vic_irq_mask[cpu] &= mask;
-       if (vic_irq_mask[cpu] == old_mask)
-               return;
-
-       VDEBUG(("VOYAGER DEBUG: Enabling irq %d in hardware on CPU %d\n",
-               irq, cpu));
-
-       if (irq & 8) {
-               outb_p(cached_A1(cpu), 0xA1);
-               (void)inb_p(0xA1);
-       } else {
-               outb_p(cached_21(cpu), 0x21);
-               (void)inb_p(0x21);
-       }
-}
-
-static void disable_local_vic_irq(unsigned int irq)
-{
-       __u8 cpu = smp_processor_id();
-       __u16 mask = (1 << irq);
-       __u16 old_mask = vic_irq_mask[cpu];
-
-       if (irq == 7)
-               return;
-
-       vic_irq_mask[cpu] |= mask;
-       if (old_mask == vic_irq_mask[cpu])
-               return;
-
-       VDEBUG(("VOYAGER DEBUG: Disabling irq %d in hardware on CPU %d\n",
-               irq, cpu));
-
-       if (irq & 8) {
-               outb_p(cached_A1(cpu), 0xA1);
-               (void)inb_p(0xA1);
-       } else {
-               outb_p(cached_21(cpu), 0x21);
-               (void)inb_p(0x21);
-       }
-}
-
-/* The VIC is level triggered, so the ack can only be issued after the
- * interrupt completes.  However, we do Voyager lazy interrupt
- * handling here: It is an extremely expensive operation to mask an
- * interrupt in the vic, so we merely set a flag (IRQ_DISABLED).  If
- * this interrupt actually comes in, then we mask and ack here to push
- * the interrupt off to another CPU */
-static void before_handle_vic_irq(unsigned int irq)
-{
-       irq_desc_t *desc = irq_to_desc(irq);
-       __u8 cpu = smp_processor_id();
-
-       _raw_spin_lock(&vic_irq_lock);
-       vic_intr_total++;
-       vic_intr_count[cpu]++;
-
-       if (!(cpu_irq_affinity[cpu] & (1 << irq))) {
-               /* The irq is not in our affinity mask, push it off
-                * onto another CPU */
-               VDEBUG(("VOYAGER DEBUG: affinity triggered disable of irq %d "
-                       "on cpu %d\n", irq, cpu));
-               disable_local_vic_irq(irq);
-               /* set IRQ_INPROGRESS to prevent the handler in irq.c from
-                * actually calling the interrupt routine */
-               desc->status |= IRQ_REPLAY | IRQ_INPROGRESS;
-       } else if (desc->status & IRQ_DISABLED) {
-               /* Damn, the interrupt actually arrived, do the lazy
-                * disable thing. The interrupt routine in irq.c will
-                * not handle a IRQ_DISABLED interrupt, so nothing more
-                * need be done here */
-               VDEBUG(("VOYAGER DEBUG: lazy disable of irq %d on CPU %d\n",
-                       irq, cpu));
-               disable_local_vic_irq(irq);
-               desc->status |= IRQ_REPLAY;
-       } else {
-               desc->status &= ~IRQ_REPLAY;
-       }
-
-       _raw_spin_unlock(&vic_irq_lock);
-}
-
-/* Finish the VIC interrupt: basically mask */
-static void after_handle_vic_irq(unsigned int irq)
-{
-       irq_desc_t *desc = irq_to_desc(irq);
-
-       _raw_spin_lock(&vic_irq_lock);
-       {
-               unsigned int status = desc->status & ~IRQ_INPROGRESS;
-#ifdef VOYAGER_DEBUG
-               __u16 isr;
-#endif
-
-               desc->status = status;
-               if ((status & IRQ_DISABLED))
-                       disable_local_vic_irq(irq);
-#ifdef VOYAGER_DEBUG
-               /* DEBUG: before we ack, check what's in progress */
-               isr = vic_read_isr();
-               if ((isr & (1 << irq) && !(status & IRQ_REPLAY)) == 0) {
-                       int i;
-                       __u8 cpu = smp_processor_id();
-                       __u8 real_cpu;
-                       int mask;       /* Um... initialize me??? --RR */
-
-                       printk("VOYAGER SMP: CPU%d lost interrupt %d\n",
-                              cpu, irq);
-                       for_each_possible_cpu(real_cpu, mask) {
-
-                               outb(VIC_CPU_MASQUERADE_ENABLE | real_cpu,
-                                    VIC_PROCESSOR_ID);
-                               isr = vic_read_isr();
-                               if (isr & (1 << irq)) {
-                                       printk
-                                           ("VOYAGER SMP: CPU%d ack irq %d\n",
-                                            real_cpu, irq);
-                                       ack_vic_irq(irq);
-                               }
-                               outb(cpu, VIC_PROCESSOR_ID);
-                       }
-               }
-#endif /* VOYAGER_DEBUG */
-               /* as soon as we ack, the interrupt is eligible for
-                * receipt by another CPU so everything must be in
-                * order here  */
-               ack_vic_irq(irq);
-               if (status & IRQ_REPLAY) {
-                       /* replay is set if we disable the interrupt
-                        * in the before_handle_vic_irq() routine, so
-                        * clear the in progress bit here to allow the
-                        * next CPU to handle this correctly */
-                       desc->status &= ~(IRQ_REPLAY | IRQ_INPROGRESS);
-               }
-#ifdef VOYAGER_DEBUG
-               isr = vic_read_isr();
-               if ((isr & (1 << irq)) != 0)
-                       printk("VOYAGER SMP: after_handle_vic_irq() after "
-                              "ack irq=%d, isr=0x%x\n", irq, isr);
-#endif /* VOYAGER_DEBUG */
-       }
-       _raw_spin_unlock(&vic_irq_lock);
-
-       /* All code after this point is out of the main path - the IRQ
-        * may be intercepted by another CPU if reasserted */
-}
-
-/* Linux processor - interrupt affinity manipulations.
- *
- * For each processor, we maintain a 32 bit irq affinity mask.
- * Initially it is set to all 1's so every processor accepts every
- * interrupt.  In this call, we change the processor's affinity mask:
- *
- * Change from enable to disable:
- *
- * If the interrupt ever comes in to the processor, we will disable it
- * and ack it to push it off to another CPU, so just accept the mask here.
- *
- * Change from disable to enable:
- *
- * change the mask and then do an interrupt enable CPI to re-enable on
- * the selected processors */
-
-void set_vic_irq_affinity(unsigned int irq, const struct cpumask *mask)
-{
-       /* Only extended processors handle interrupts */
-       unsigned long real_mask;
-       unsigned long irq_mask = 1 << irq;
-       int cpu;
-
-       real_mask = cpus_addr(*mask)[0] & voyager_extended_vic_processors;
-
-       if (cpus_addr(*mask)[0] == 0)
-               /* can't have no CPUs to accept the interrupt -- extremely
-                * bad things will happen */
-               return;
-
-       if (irq == 0)
-               /* can't change the affinity of the timer IRQ.  This
-                * is due to the constraint in the voyager
-                * architecture that the CPI also comes in on and IRQ
-                * line and we have chosen IRQ0 for this.  If you
-                * raise the mask on this interrupt, the processor
-                * will no-longer be able to accept VIC CPIs */
-               return;
-
-       if (irq >= 32)
-               /* You can only have 32 interrupts in a voyager system
-                * (and 32 only if you have a secondary microchannel
-                * bus) */
-               return;
-
-       for_each_online_cpu(cpu) {
-               unsigned long cpu_mask = 1 << cpu;
-
-               if (cpu_mask & real_mask) {
-                       /* enable the interrupt for this cpu */
-                       cpu_irq_affinity[cpu] |= irq_mask;
-               } else {
-                       /* disable the interrupt for this cpu */
-                       cpu_irq_affinity[cpu] &= ~irq_mask;
-               }
-       }
-       /* this is magic, we now have the correct affinity maps, so
-        * enable the interrupt.  This will send an enable CPI to
-        * those CPUs who need to enable it in their local masks,
-        * causing them to correct for the new affinity . If the
-        * interrupt is currently globally disabled, it will simply be
-        * disabled again as it comes in (voyager lazy disable).  If
-        * the affinity map is tightened to disable the interrupt on a
-        * cpu, it will be pushed off when it comes in */
-       unmask_vic_irq(irq);
-}
-
-static void ack_vic_irq(unsigned int irq)
-{
-       if (irq & 8) {
-               outb(0x62, 0x20);       /* Specific EOI to cascade */
-               outb(0x60 | (irq & 7), 0xA0);
-       } else {
-               outb(0x60 | (irq & 7), 0x20);
-       }
-}
-
-/* enable the CPIs.  In the VIC, the CPIs are delivered by the 8259
- * but are not vectored by it.  This means that the 8259 mask must be
- * lowered to receive them */
-static __init void vic_enable_cpi(void)
-{
-       __u8 cpu = smp_processor_id();
-
-       /* just take a copy of the current mask (nop for boot cpu) */
-       vic_irq_mask[cpu] = vic_irq_mask[boot_cpu_id];
-
-       enable_local_vic_irq(VIC_CPI_LEVEL0);
-       enable_local_vic_irq(VIC_CPI_LEVEL1);
-       /* for sys int and cmn int */
-       enable_local_vic_irq(7);
-
-       if (is_cpu_quad()) {
-               outb(QIC_DEFAULT_MASK0, QIC_MASK_REGISTER0);
-               outb(QIC_CPI_ENABLE, QIC_MASK_REGISTER1);
-               VDEBUG(("VOYAGER SMP: QIC ENABLE CPI: CPU%d: MASK 0x%x\n",
-                       cpu, QIC_CPI_ENABLE));
-       }
-
-       VDEBUG(("VOYAGER SMP: ENABLE CPI: CPU%d: MASK 0x%x\n",
-               cpu, vic_irq_mask[cpu]));
-}
-
-void voyager_smp_dump()
-{
-       int old_cpu = smp_processor_id(), cpu;
-
-       /* dump the interrupt masks of each processor */
-       for_each_online_cpu(cpu) {
-               __u16 imr, isr, irr;
-               unsigned long flags;
-
-               local_irq_save(flags);
-               outb(VIC_CPU_MASQUERADE_ENABLE | cpu, VIC_PROCESSOR_ID);
-               imr = (inb(0xa1) << 8) | inb(0x21);
-               outb(0x0a, 0xa0);
-               irr = inb(0xa0) << 8;
-               outb(0x0a, 0x20);
-               irr |= inb(0x20);
-               outb(0x0b, 0xa0);
-               isr = inb(0xa0) << 8;
-               outb(0x0b, 0x20);
-               isr |= inb(0x20);
-               outb(old_cpu, VIC_PROCESSOR_ID);
-               local_irq_restore(flags);
-               printk("\tCPU%d: mask=0x%x, IMR=0x%x, IRR=0x%x, ISR=0x%x\n",
-                      cpu, vic_irq_mask[cpu], imr, irr, isr);
-#if 0
-               /* These lines are put in to try to unstick an un ack'd irq */
-               if (isr != 0) {
-                       int irq;
-                       for (irq = 0; irq < 16; irq++) {
-                               if (isr & (1 << irq)) {
-                                       printk("\tCPU%d: ack irq %d\n",
-                                              cpu, irq);
-                                       local_irq_save(flags);
-                                       outb(VIC_CPU_MASQUERADE_ENABLE | cpu,
-                                            VIC_PROCESSOR_ID);
-                                       ack_vic_irq(irq);
-                                       outb(old_cpu, VIC_PROCESSOR_ID);
-                                       local_irq_restore(flags);
-                               }
-                       }
-               }
-#endif
-       }
-}
-
-void smp_voyager_power_off(void *dummy)
-{
-       if (smp_processor_id() == boot_cpu_id)
-               voyager_power_off();
-       else
-               smp_stop_cpu_function(NULL);
-}
-
-static void __init voyager_smp_prepare_cpus(unsigned int max_cpus)
-{
-       /* FIXME: ignore max_cpus for now */
-       smp_boot_cpus();
-}
-
-static void __cpuinit voyager_smp_prepare_boot_cpu(void)
-{
-       init_gdt(smp_processor_id());
-       switch_to_new_gdt();
-
-       cpu_online_map = cpumask_of_cpu(smp_processor_id());
-       cpu_callout_map = cpumask_of_cpu(smp_processor_id());
-       cpu_callin_map = CPU_MASK_NONE;
-       cpu_present_map = cpumask_of_cpu(smp_processor_id());
-
-}
-
-static int __cpuinit voyager_cpu_up(unsigned int cpu)
-{
-       /* This only works at boot for x86.  See "rewrite" above. */
-       if (cpu_isset(cpu, smp_commenced_mask))
-               return -ENOSYS;
-
-       /* In case one didn't come up */
-       if (!cpu_isset(cpu, cpu_callin_map))
-               return -EIO;
-       /* Unleash the CPU! */
-       cpu_set(cpu, smp_commenced_mask);
-       while (!cpu_online(cpu))
-               mb();
-       return 0;
-}
-
-static void __init voyager_smp_cpus_done(unsigned int max_cpus)
-{
-       zap_low_mappings();
-}
-
-void __init smp_setup_processor_id(void)
-{
-       current_thread_info()->cpu = hard_smp_processor_id();
-       x86_write_percpu(cpu_number, hard_smp_processor_id());
-}
-
-static void voyager_send_call_func(const struct cpumask *callmask)
-{
-       __u32 mask = cpus_addr(*callmask)[0] & ~(1 << smp_processor_id());
-       send_CPI(mask, VIC_CALL_FUNCTION_CPI);
-}
-
-static void voyager_send_call_func_single(int cpu)
-{
-       send_CPI(1 << cpu, VIC_CALL_FUNCTION_SINGLE_CPI);
-}
-
-struct smp_ops smp_ops = {
-       .smp_prepare_boot_cpu = voyager_smp_prepare_boot_cpu,
-       .smp_prepare_cpus = voyager_smp_prepare_cpus,
-       .cpu_up = voyager_cpu_up,
-       .smp_cpus_done = voyager_smp_cpus_done,
-
-       .smp_send_stop = voyager_smp_send_stop,
-       .smp_send_reschedule = voyager_smp_send_reschedule,
-
-       .send_call_func_ipi = voyager_send_call_func,
-       .send_call_func_single_ipi = voyager_send_call_func_single,
-};
diff --git a/arch/x86/mach-voyager/voyager_thread.c b/arch/x86/mach-voyager/voyager_thread.c
deleted file mode 100644 (file)
index 15464a2..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8 -*- */
-
-/* Copyright (C) 2001
- *
- * Author: J.E.J.Bottomley@HansenPartnership.com
- *
- * This module provides the machine status monitor thread for the
- * voyager architecture.  This allows us to monitor the machine
- * environment (temp, voltage, fan function) and the front panel and
- * internal UPS.  If a fault is detected, this thread takes corrective
- * action (usually just informing init)
- * */
-
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/kernel_stat.h>
-#include <linux/delay.h>
-#include <linux/mc146818rtc.h>
-#include <linux/init.h>
-#include <linux/bootmem.h>
-#include <linux/kmod.h>
-#include <linux/completion.h>
-#include <linux/sched.h>
-#include <linux/kthread.h>
-#include <asm/desc.h>
-#include <asm/voyager.h>
-#include <asm/vic.h>
-#include <asm/mtrr.h>
-#include <asm/msr.h>
-
-struct task_struct *voyager_thread;
-static __u8 set_timeout;
-
-static int execute(const char *string)
-{
-       int ret;
-
-       char *envp[] = {
-               "HOME=/",
-               "TERM=linux",
-               "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
-               NULL,
-       };
-       char *argv[] = {
-               "/bin/bash",
-               "-c",
-               (char *)string,
-               NULL,
-       };
-
-       if ((ret =
-            call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC)) != 0) {
-               printk(KERN_ERR "Voyager failed to run \"%s\": %i\n", string,
-                      ret);
-       }
-       return ret;
-}
-
-static void check_from_kernel(void)
-{
-       if (voyager_status.switch_off) {
-
-               /* FIXME: This should be configurable via proc */
-               execute("umask 600; echo 0 > /etc/initrunlvl; kill -HUP 1");
-       } else if (voyager_status.power_fail) {
-               VDEBUG(("Voyager daemon detected AC power failure\n"));
-
-               /* FIXME: This should be configureable via proc */
-               execute("umask 600; echo F > /etc/powerstatus; kill -PWR 1");
-               set_timeout = 1;
-       }
-}
-
-static void check_continuing_condition(void)
-{
-       if (voyager_status.power_fail) {
-               __u8 data;
-               voyager_cat_psi(VOYAGER_PSI_SUBREAD,
-                               VOYAGER_PSI_AC_FAIL_REG, &data);
-               if ((data & 0x1f) == 0) {
-                       /* all power restored */
-                       printk(KERN_NOTICE
-                              "VOYAGER AC power restored, cancelling shutdown\n");
-                       /* FIXME: should be user configureable */
-                       execute
-                           ("umask 600; echo O > /etc/powerstatus; kill -PWR 1");
-                       set_timeout = 0;
-               }
-       }
-}
-
-static int thread(void *unused)
-{
-       printk(KERN_NOTICE "Voyager starting monitor thread\n");
-
-       for (;;) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(set_timeout ? HZ : MAX_SCHEDULE_TIMEOUT);
-
-               VDEBUG(("Voyager Daemon awoken\n"));
-               if (voyager_status.request_from_kernel == 0) {
-                       /* probably awoken from timeout */
-                       check_continuing_condition();
-               } else {
-                       check_from_kernel();
-                       voyager_status.request_from_kernel = 0;
-               }
-       }
-}
-
-static int __init voyager_thread_start(void)
-{
-       voyager_thread = kthread_run(thread, NULL, "kvoyagerd");
-       if (IS_ERR(voyager_thread)) {
-               printk(KERN_ERR
-                      "Voyager: Failed to create system monitor thread.\n");
-               return PTR_ERR(voyager_thread);
-       }
-       return 0;
-}
-
-static void __exit voyager_thread_stop(void)
-{
-       kthread_stop(voyager_thread);
-}
-
-module_init(voyager_thread_start);
-module_exit(voyager_thread_stop);
index 420b3b6e39152d86d1c81e4e6ab69f65dbc69cbf..6ef5e99380f92134ba86a6a693b5ac6d3434e6d4 100644 (file)
@@ -150,11 +150,9 @@ static long pm_address(u_char FPU_modrm, u_char segment,
 #endif /* PARANOID */
 
        switch (segment) {
-               /* gs isn't used by the kernel, so it still has its
-                  user-space value. */
        case PREFIX_GS_ - 1:
-               /* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */
-               savesegment(gs, addr->selector);
+               /* user gs handling can be lazy, use special accessors */
+               addr->selector = get_user_gs(FPU_info->regs);
                break;
        default:
                addr->selector = PM_REG_(segment);
index d8cc96a2738f739a9f5dba76f77ee8c9a0885e6a..08537747cb589b92db2bdeb4c7dd75c7f3ccf01c 100644 (file)
@@ -1,6 +1,8 @@
-obj-y  :=  init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
+obj-y  :=  init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
            pat.o pgtable.o gup.o
 
+obj-$(CONFIG_SMP)              += tlb.o
+
 obj-$(CONFIG_X86_32)           += pgtable_32.o iomap_32.o
 
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
index 7e8db53528a7618820bd338ccaaf1039a14f241f..61b41ca3b5a2ed4bbf1b9100fd870578885182ad 100644 (file)
@@ -23,6 +23,12 @@ int fixup_exception(struct pt_regs *regs)
 
        fixup = search_exception_tables(regs->ip);
        if (fixup) {
+               /* If fixup is less than 16, it means uaccess error */
+               if (fixup->fixup < 16) {
+                       current_thread_info()->uaccess_err = -EFAULT;
+                       regs->ip += fixup->fixup;
+                       return 1;
+               }
                regs->ip = fixup->fixup;
                return 1;
        }
index c76ef1d701c9f48625aed06d4e7b4ec3d98e8862..a03b7279efa018850def9042339fec4af4249042 100644 (file)
@@ -1,73 +1,79 @@
 /*
  *  Copyright (C) 1995  Linus Torvalds
- *  Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
+ *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
+ *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
  */
-
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mmiotrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
 #include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/vt_kern.h>             /* For unblank_screen() */
+#include <linux/mmiotrace.h>
+#include <linux/bootmem.h>
 #include <linux/compiler.h>
 #include <linux/highmem.h>
-#include <linux/bootmem.h>             /* for max_low_pfn */
-#include <linux/vmalloc.h>
-#include <linux/module.h>
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/vt_kern.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/module.h>
 #include <linux/kdebug.h>
+#include <linux/errno.h>
+#include <linux/magic.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mman.h>
+#include <linux/tty.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+
+#include <asm-generic/sections.h>
 
-#include <asm/system.h>
-#include <asm/desc.h>
-#include <asm/segment.h>
-#include <asm/pgalloc.h>
-#include <asm/smp.h>
 #include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+#include <asm/segment.h>
+#include <asm/system.h>
 #include <asm/proto.h>
-#include <asm-generic/sections.h>
 #include <asm/traps.h>
+#include <asm/desc.h>
 
 /*
- * Page fault error code bits
- *     bit 0 == 0 means no page found, 1 means protection fault
- *     bit 1 == 0 means read, 1 means write
- *     bit 2 == 0 means kernel, 1 means user-mode
- *     bit 3 == 1 means use of reserved bit detected
- *     bit 4 == 1 means fault was an instruction fetch
+ * Page fault error code bits:
+ *
+ *   bit 0 ==   0: no page found       1: protection fault
+ *   bit 1 ==   0: read access         1: write access
+ *   bit 2 ==   0: kernel-mode access  1: user-mode access
+ *   bit 3 ==                          1: use of reserved bit detected
+ *   bit 4 ==                          1: fault was an instruction fetch
  */
-#define PF_PROT                (1<<0)
-#define PF_WRITE       (1<<1)
-#define PF_USER                (1<<2)
-#define PF_RSVD                (1<<3)
-#define PF_INSTR       (1<<4)
+enum x86_pf_error_code {
 
+       PF_PROT         =               1 << 0,
+       PF_WRITE        =               1 << 1,
+       PF_USER         =               1 << 2,
+       PF_RSVD         =               1 << 3,
+       PF_INSTR        =               1 << 4,
+};
+
+/*
+ * Returns 0 if mmiotrace is disabled, or if the fault is not
+ * handled by mmiotrace:
+ */
 static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
 {
-#ifdef CONFIG_MMIOTRACE
        if (unlikely(is_kmmio_active()))
                if (kmmio_handler(regs, addr) == 1)
                        return -1;
-#endif
        return 0;
 }
 
 static inline int notify_page_fault(struct pt_regs *regs)
 {
-#ifdef CONFIG_KPROBES
        int ret = 0;
 
        /* kprobe_running() needs smp_processor_id() */
-       if (!user_mode_vm(regs)) {
+       if (kprobes_built_in() && !user_mode_vm(regs)) {
                preempt_disable();
                if (kprobe_running() && kprobe_fault_handler(regs, 14))
                        ret = 1;
@@ -75,29 +81,76 @@ static inline int notify_page_fault(struct pt_regs *regs)
        }
 
        return ret;
-#else
-       return 0;
-#endif
 }
 
 /*
- * X86_32
- * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
- * Check that here and ignore it.
+ * Prefetch quirks:
  *
- * X86_64
- * Sometimes the CPU reports invalid exceptions on prefetch.
- * Check that here and ignore it.
+ * 32-bit mode:
  *
- * Opcode checker based on code by Richard Brunner
+ *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+ *   Check that here and ignore it.
+ *
+ * 64-bit mode:
+ *
+ *   Sometimes the CPU reports invalid exceptions on prefetch.
+ *   Check that here and ignore it.
+ *
+ * Opcode checker based on code by Richard Brunner.
  */
-static int is_prefetch(struct pt_regs *regs, unsigned long addr,
-                      unsigned long error_code)
+static inline int
+check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
+                     unsigned char opcode, int *prefetch)
+{
+       unsigned char instr_hi = opcode & 0xf0;
+       unsigned char instr_lo = opcode & 0x0f;
+
+       switch (instr_hi) {
+       case 0x20:
+       case 0x30:
+               /*
+                * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
+                * In X86_64 long mode, the CPU will signal invalid
+                * opcode if some of these prefixes are present so
+                * X86_64 will never get here anyway
+                */
+               return ((instr_lo & 7) == 0x6);
+#ifdef CONFIG_X86_64
+       case 0x40:
+               /*
+                * In AMD64 long mode 0x40..0x4F are valid REX prefixes
+                * Need to figure out under what instruction mode the
+                * instruction was issued. Could check the LDT for lm,
+                * but for now it's good enough to assume that long
+                * mode only uses well known segments or kernel.
+                */
+               return (!user_mode(regs)) || (regs->cs == __USER_CS);
+#endif
+       case 0x60:
+               /* 0x64 thru 0x67 are valid prefixes in all modes. */
+               return (instr_lo & 0xC) == 0x4;
+       case 0xF0:
+               /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
+               return !instr_lo || (instr_lo>>1) == 1;
+       case 0x00:
+               /* Prefetch instruction is 0x0F0D or 0x0F18 */
+               if (probe_kernel_address(instr, opcode))
+                       return 0;
+
+               *prefetch = (instr_lo == 0xF) &&
+                       (opcode == 0x0D || opcode == 0x18);
+               return 0;
+       default:
+               return 0;
+       }
+}
+
+static int
+is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
 {
+       unsigned char *max_instr;
        unsigned char *instr;
-       int scan_more = 1;
        int prefetch = 0;
-       unsigned char *max_instr;
 
        /*
         * If it was a exec (instruction fetch) fault on NX page, then
@@ -106,106 +159,170 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr,
        if (error_code & PF_INSTR)
                return 0;
 
-       instr = (unsigned char *)convert_ip_to_linear(current, regs);
+       instr = (void *)convert_ip_to_linear(current, regs);
        max_instr = instr + 15;
 
        if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
                return 0;
 
-       while (scan_more && instr < max_instr) {
+       while (instr < max_instr) {
                unsigned char opcode;
-               unsigned char instr_hi;
-               unsigned char instr_lo;
 
                if (probe_kernel_address(instr, opcode))
                        break;
 
-               instr_hi = opcode & 0xf0;
-               instr_lo = opcode & 0x0f;
                instr++;
 
-               switch (instr_hi) {
-               case 0x20:
-               case 0x30:
-                       /*
-                        * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
-                        * In X86_64 long mode, the CPU will signal invalid
-                        * opcode if some of these prefixes are present so
-                        * X86_64 will never get here anyway
-                        */
-                       scan_more = ((instr_lo & 7) == 0x6);
-                       break;
-#ifdef CONFIG_X86_64
-               case 0x40:
-                       /*
-                        * In AMD64 long mode 0x40..0x4F are valid REX prefixes
-                        * Need to figure out under what instruction mode the
-                        * instruction was issued. Could check the LDT for lm,
-                        * but for now it's good enough to assume that long
-                        * mode only uses well known segments or kernel.
-                        */
-                       scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
-                       break;
-#endif
-               case 0x60:
-                       /* 0x64 thru 0x67 are valid prefixes in all modes. */
-                       scan_more = (instr_lo & 0xC) == 0x4;
-                       break;
-               case 0xF0:
-                       /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
-                       scan_more = !instr_lo || (instr_lo>>1) == 1;
+               if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
                        break;
-               case 0x00:
-                       /* Prefetch instruction is 0x0F0D or 0x0F18 */
-                       scan_more = 0;
-
-                       if (probe_kernel_address(instr, opcode))
-                               break;
-                       prefetch = (instr_lo == 0xF) &&
-                               (opcode == 0x0D || opcode == 0x18);
-                       break;
-               default:
-                       scan_more = 0;
-                       break;
-               }
        }
        return prefetch;
 }
 
-static void force_sig_info_fault(int si_signo, int si_code,
-       unsigned long address, struct task_struct *tsk)
+static void
+force_sig_info_fault(int si_signo, int si_code, unsigned long address,
+                    struct task_struct *tsk)
 {
        siginfo_t info;
 
-       info.si_signo = si_signo;
-       info.si_errno = 0;
-       info.si_code = si_code;
-       info.si_addr = (void __user *)address;
+       info.si_signo   = si_signo;
+       info.si_errno   = 0;
+       info.si_code    = si_code;
+       info.si_addr    = (void __user *)address;
+
        force_sig_info(si_signo, &info, tsk);
 }
 
-#ifdef CONFIG_X86_64
-static int bad_address(void *p)
+DEFINE_SPINLOCK(pgd_lock);
+LIST_HEAD(pgd_list);
+
+#ifdef CONFIG_X86_32
+static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 {
-       unsigned long dummy;
-       return probe_kernel_address((unsigned long *)p, dummy);
+       unsigned index = pgd_index(address);
+       pgd_t *pgd_k;
+       pud_t *pud, *pud_k;
+       pmd_t *pmd, *pmd_k;
+
+       pgd += index;
+       pgd_k = init_mm.pgd + index;
+
+       if (!pgd_present(*pgd_k))
+               return NULL;
+
+       /*
+        * set_pgd(pgd, *pgd_k); here would be useless on PAE
+        * and redundant with the set_pmd() on non-PAE. As would
+        * set_pud.
+        */
+       pud = pud_offset(pgd, address);
+       pud_k = pud_offset(pgd_k, address);
+       if (!pud_present(*pud_k))
+               return NULL;
+
+       pmd = pmd_offset(pud, address);
+       pmd_k = pmd_offset(pud_k, address);
+       if (!pmd_present(*pmd_k))
+               return NULL;
+
+       if (!pmd_present(*pmd)) {
+               set_pmd(pmd, *pmd_k);
+               arch_flush_lazy_mmu_mode();
+       } else {
+               BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+       }
+
+       return pmd_k;
+}
+
+void vmalloc_sync_all(void)
+{
+       unsigned long address;
+
+       if (SHARED_KERNEL_PMD)
+               return;
+
+       for (address = VMALLOC_START & PMD_MASK;
+            address >= TASK_SIZE && address < FIXADDR_TOP;
+            address += PMD_SIZE) {
+
+               unsigned long flags;
+               struct page *page;
+
+               spin_lock_irqsave(&pgd_lock, flags);
+               list_for_each_entry(page, &pgd_list, lru) {
+                       if (!vmalloc_sync_one(page_address(page), address))
+                               break;
+               }
+               spin_unlock_irqrestore(&pgd_lock, flags);
+       }
+}
+
+/*
+ * 32-bit:
+ *
+ *   Handle a fault on the vmalloc or module mapping area
+ */
+static noinline int vmalloc_fault(unsigned long address)
+{
+       unsigned long pgd_paddr;
+       pmd_t *pmd_k;
+       pte_t *pte_k;
+
+       /* Make sure we are in vmalloc area: */
+       if (!(address >= VMALLOC_START && address < VMALLOC_END))
+               return -1;
+
+       /*
+        * Synchronize this task's top level page-table
+        * with the 'reference' page table.
+        *
+        * Do _not_ use "current" here. We might be inside
+        * an interrupt in the middle of a task switch..
+        */
+       pgd_paddr = read_cr3();
+       pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
+       if (!pmd_k)
+               return -1;
+
+       pte_k = pte_offset_kernel(pmd_k, address);
+       if (!pte_present(*pte_k))
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Did it hit the DOS screen memory VA from vm86 mode?
+ */
+static inline void
+check_v8086_mode(struct pt_regs *regs, unsigned long address,
+                struct task_struct *tsk)
+{
+       unsigned long bit;
+
+       if (!v8086_mode(regs))
+               return;
+
+       bit = (address - 0xA0000) >> PAGE_SHIFT;
+       if (bit < 32)
+               tsk->thread.screen_bitmap |= 1 << bit;
 }
-#endif
 
 static void dump_pagetable(unsigned long address)
 {
-#ifdef CONFIG_X86_32
        __typeof__(pte_val(__pte(0))) page;
 
        page = read_cr3();
        page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
+
 #ifdef CONFIG_X86_PAE
        printk("*pdpt = %016Lx ", page);
        if ((page >> PAGE_SHIFT) < max_low_pfn
            && page & _PAGE_PRESENT) {
                page &= PAGE_MASK;
                page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
-                                                        & (PTRS_PER_PMD - 1)];
+                                                       & (PTRS_PER_PMD - 1)];
                printk(KERN_CONT "*pde = %016Lx ", page);
                page &= ~_PAGE_NX;
        }
@@ -217,19 +334,145 @@ static void dump_pagetable(unsigned long address)
         * We must not directly access the pte in the highpte
         * case if the page table is located in highmem.
         * And let's rather not kmap-atomic the pte, just in case
-        * it's allocated already.
+        * it's allocated already:
         */
        if ((page >> PAGE_SHIFT) < max_low_pfn
            && (page & _PAGE_PRESENT)
            && !(page & _PAGE_PSE)) {
+
                page &= PAGE_MASK;
                page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
-                                                        & (PTRS_PER_PTE - 1)];
+                                                       & (PTRS_PER_PTE - 1)];
                printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
        }
 
        printk("\n");
-#else /* CONFIG_X86_64 */
+}
+
+#else /* CONFIG_X86_64: */
+
+void vmalloc_sync_all(void)
+{
+       unsigned long address;
+
+       for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
+            address += PGDIR_SIZE) {
+
+               const pgd_t *pgd_ref = pgd_offset_k(address);
+               unsigned long flags;
+               struct page *page;
+
+               if (pgd_none(*pgd_ref))
+                       continue;
+
+               spin_lock_irqsave(&pgd_lock, flags);
+               list_for_each_entry(page, &pgd_list, lru) {
+                       pgd_t *pgd;
+                       pgd = (pgd_t *)page_address(page) + pgd_index(address);
+                       if (pgd_none(*pgd))
+                               set_pgd(pgd, *pgd_ref);
+                       else
+                               BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+               }
+               spin_unlock_irqrestore(&pgd_lock, flags);
+       }
+}
+
+/*
+ * 64-bit:
+ *
+ *   Handle a fault on the vmalloc area
+ *
+ * This assumes no large pages in there.
+ */
+static noinline int vmalloc_fault(unsigned long address)
+{
+       pgd_t *pgd, *pgd_ref;
+       pud_t *pud, *pud_ref;
+       pmd_t *pmd, *pmd_ref;
+       pte_t *pte, *pte_ref;
+
+       /* Make sure we are in vmalloc area: */
+       if (!(address >= VMALLOC_START && address < VMALLOC_END))
+               return -1;
+
+       /*
+        * Copy kernel mappings over when needed. This can also
+        * happen within a race in page table update. In the later
+        * case just flush:
+        */
+       pgd = pgd_offset(current->active_mm, address);
+       pgd_ref = pgd_offset_k(address);
+       if (pgd_none(*pgd_ref))
+               return -1;
+
+       if (pgd_none(*pgd))
+               set_pgd(pgd, *pgd_ref);
+       else
+               BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+
+       /*
+        * Below here mismatches are bugs because these lower tables
+        * are shared:
+        */
+
+       pud = pud_offset(pgd, address);
+       pud_ref = pud_offset(pgd_ref, address);
+       if (pud_none(*pud_ref))
+               return -1;
+
+       if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+               BUG();
+
+       pmd = pmd_offset(pud, address);
+       pmd_ref = pmd_offset(pud_ref, address);
+       if (pmd_none(*pmd_ref))
+               return -1;
+
+       if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+               BUG();
+
+       pte_ref = pte_offset_kernel(pmd_ref, address);
+       if (!pte_present(*pte_ref))
+               return -1;
+
+       pte = pte_offset_kernel(pmd, address);
+
+       /*
+        * Don't use pte_page here, because the mappings can point
+        * outside mem_map, and the NUMA hash lookup cannot handle
+        * that:
+        */
+       if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
+               BUG();
+
+       return 0;
+}
+
+static const char errata93_warning[] =
+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
+KERN_ERR "******* Please consider a BIOS update.\n"
+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
+
+/*
+ * No vm86 mode in 64-bit mode:
+ */
+static inline void
+check_v8086_mode(struct pt_regs *regs, unsigned long address,
+                struct task_struct *tsk)
+{
+}
+
+static int bad_address(void *p)
+{
+       unsigned long dummy;
+
+       return probe_kernel_address((unsigned long *)p, dummy);
+}
+
+static void dump_pagetable(unsigned long address)
+{
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
@@ -238,102 +481,77 @@ static void dump_pagetable(unsigned long address)
        pgd = (pgd_t *)read_cr3();
 
        pgd = __va((unsigned long)pgd & PHYSICAL_PAGE_MASK);
+
        pgd += pgd_index(address);
-       if (bad_address(pgd)) goto bad;
+       if (bad_address(pgd))
+               goto bad;
+
        printk("PGD %lx ", pgd_val(*pgd));
-       if (!pgd_present(*pgd)) goto ret;
+
+       if (!pgd_present(*pgd))
+               goto out;
 
        pud = pud_offset(pgd, address);
-       if (bad_address(pud)) goto bad;
+       if (bad_address(pud))
+               goto bad;
+
        printk("PUD %lx ", pud_val(*pud));
        if (!pud_present(*pud) || pud_large(*pud))
-               goto ret;
+               goto out;
 
        pmd = pmd_offset(pud, address);
-       if (bad_address(pmd)) goto bad;
+       if (bad_address(pmd))
+               goto bad;
+
        printk("PMD %lx ", pmd_val(*pmd));
-       if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
+       if (!pmd_present(*pmd) || pmd_large(*pmd))
+               goto out;
 
        pte = pte_offset_kernel(pmd, address);
-       if (bad_address(pte)) goto bad;
+       if (bad_address(pte))
+               goto bad;
+
        printk("PTE %lx", pte_val(*pte));
-ret:
+out:
        printk("\n");
        return;
 bad:
        printk("BAD\n");
-#endif
 }
 
-#ifdef CONFIG_X86_32
-static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
-{
-       unsigned index = pgd_index(address);
-       pgd_t *pgd_k;
-       pud_t *pud, *pud_k;
-       pmd_t *pmd, *pmd_k;
-
-       pgd += index;
-       pgd_k = init_mm.pgd + index;
-
-       if (!pgd_present(*pgd_k))
-               return NULL;
+#endif /* CONFIG_X86_64 */
 
-       /*
-        * set_pgd(pgd, *pgd_k); here would be useless on PAE
-        * and redundant with the set_pmd() on non-PAE. As would
-        * set_pud.
-        */
-
-       pud = pud_offset(pgd, address);
-       pud_k = pud_offset(pgd_k, address);
-       if (!pud_present(*pud_k))
-               return NULL;
-
-       pmd = pmd_offset(pud, address);
-       pmd_k = pmd_offset(pud_k, address);
-       if (!pmd_present(*pmd_k))
-               return NULL;
-       if (!pmd_present(*pmd)) {
-               set_pmd(pmd, *pmd_k);
-               arch_flush_lazy_mmu_mode();
-       } else
-               BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
-       return pmd_k;
-}
-#endif
-
-#ifdef CONFIG_X86_64
-static const char errata93_warning[] =
-KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
-KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
-KERN_ERR "******* Please consider a BIOS update.\n"
-KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
-#endif
-
-/* Workaround for K8 erratum #93 & buggy BIOS.
-   BIOS SMM functions are required to use a specific workaround
-   to avoid corruption of the 64bit RIP register on C stepping K8.
-   A lot of BIOS that didn't get tested properly miss this.
-   The OS sees this as a page fault with the upper 32bits of RIP cleared.
-   Try to work around it here.
-   Note we only handle faults in kernel here.
-   Does nothing for X86_32
+/*
+ * Workaround for K8 erratum #93 & buggy BIOS.
+ *
+ * BIOS SMM functions are required to use a specific workaround
+ * to avoid corruption of the 64bit RIP register on C stepping K8.
+ *
+ * A lot of BIOS that didn't get tested properly miss this.
+ *
+ * The OS sees this as a page fault with the upper 32bits of RIP cleared.
+ * Try to work around it here.
+ *
+ * Note we only handle faults in kernel here.
+ * Does nothing on 32-bit.
  */
 static int is_errata93(struct pt_regs *regs, unsigned long address)
 {
 #ifdef CONFIG_X86_64
-       static int warned;
+       static int once;
+
        if (address != regs->ip)
                return 0;
+
        if ((address >> 32) != 0)
                return 0;
+
        address |= 0xffffffffUL << 32;
        if ((address >= (u64)_stext && address <= (u64)_etext) ||
            (address >= MODULES_VADDR && address <= MODULES_END)) {
-               if (!warned) {
+               if (!once) {
                        printk(errata93_warning);
-                       warned = 1;
+                       once = 1;
                }
                regs->ip = address;
                return 1;
@@ -343,16 +561,17 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
 }
 
 /*
- * Work around K8 erratum #100 K8 in compat mode occasionally jumps to illegal
- * addresses >4GB.  We catch this in the page fault handler because these
- * addresses are not reachable. Just detect this case and return.  Any code
+ * Work around K8 erratum #100 K8 in compat mode occasionally jumps
+ * to illegal addresses >4GB.
+ *
+ * We catch this in the page fault handler because these addresses
+ * are not reachable. Just detect this case and return.  Any code
  * segment in LDT is compatibility mode.
  */
 static int is_errata100(struct pt_regs *regs, unsigned long address)
 {
 #ifdef CONFIG_X86_64
-       if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
-           (address >> 32))
+       if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
                return 1;
 #endif
        return 0;
@@ -362,8 +581,9 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
 {
 #ifdef CONFIG_X86_F00F_BUG
        unsigned long nr;
+
        /*
-        * Pentium F0 0F C7 C8 bug workaround.
+        * Pentium F0 0F C7 C8 bug workaround:
         */
        if (boot_cpu_data.f00f_bug) {
                nr = (address - idt_descr.address) >> 3;
@@ -377,62 +597,277 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
        return 0;
 }
 
-static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
-                           unsigned long address)
+static const char nx_warning[] = KERN_CRIT
+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
+
+static void
+show_fault_oops(struct pt_regs *regs, unsigned long error_code,
+               unsigned long address)
 {
-#ifdef CONFIG_X86_32
        if (!oops_may_print())
                return;
-#endif
 
-#ifdef CONFIG_X86_PAE
        if (error_code & PF_INSTR) {
                unsigned int level;
+
                pte_t *pte = lookup_address(address, &level);
 
                if (pte && pte_present(*pte) && !pte_exec(*pte))
-                       printk(KERN_CRIT "kernel tried to execute "
-                               "NX-protected page - exploit attempt? "
-                               "(uid: %d)\n", current_uid());
+                       printk(nx_warning, current_uid());
        }
-#endif
 
        printk(KERN_ALERT "BUG: unable to handle kernel ");
        if (address < PAGE_SIZE)
                printk(KERN_CONT "NULL pointer dereference");
        else
                printk(KERN_CONT "paging request");
+
        printk(KERN_CONT " at %p\n", (void *) address);
        printk(KERN_ALERT "IP:");
        printk_address(regs->ip, 1);
+
        dump_pagetable(address);
 }
 
-#ifdef CONFIG_X86_64
-static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
-                                unsigned long error_code)
+static noinline void
+pgtable_bad(struct pt_regs *regs, unsigned long error_code,
+           unsigned long address)
 {
-       unsigned long flags = oops_begin();
-       int sig = SIGKILL;
        struct task_struct *tsk;
+       unsigned long flags;
+       int sig;
+
+       flags = oops_begin();
+       tsk = current;
+       sig = SIGKILL;
 
        printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
-              current->comm, address);
+              tsk->comm, address);
        dump_pagetable(address);
-       tsk = current;
-       tsk->thread.cr2 = address;
-       tsk->thread.trap_no = 14;
-       tsk->thread.error_code = error_code;
+
+       tsk->thread.cr2         = address;
+       tsk->thread.trap_no     = 14;
+       tsk->thread.error_code  = error_code;
+
        if (__die("Bad pagetable", regs, error_code))
                sig = 0;
+
        oops_end(flags, regs, sig);
 }
-#endif
+
+static noinline void
+no_context(struct pt_regs *regs, unsigned long error_code,
+          unsigned long address)
+{
+       struct task_struct *tsk = current;
+       unsigned long *stackend;
+       unsigned long flags;
+       int sig;
+
+       /* Are we prepared to handle this kernel fault? */
+       if (fixup_exception(regs))
+               return;
+
+       /*
+        * 32-bit:
+        *
+        *   Valid to do another page fault here, because if this fault
+        *   had been triggered by is_prefetch fixup_exception would have
+        *   handled it.
+        *
+        * 64-bit:
+        *
+        *   Hall of shame of CPU/BIOS bugs.
+        */
+       if (is_prefetch(regs, error_code, address))
+               return;
+
+       if (is_errata93(regs, address))
+               return;
+
+       /*
+        * Oops. The kernel tried to access some bad page. We'll have to
+        * terminate things with extreme prejudice:
+        */
+       flags = oops_begin();
+
+       show_fault_oops(regs, error_code, address);
+
+       stackend = end_of_stack(tsk);
+       if (*stackend != STACK_END_MAGIC)
+               printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
+
+       tsk->thread.cr2         = address;
+       tsk->thread.trap_no     = 14;
+       tsk->thread.error_code  = error_code;
+
+       sig = SIGKILL;
+       if (__die("Oops", regs, error_code))
+               sig = 0;
+
+       /* Executive summary in case the body of the oops scrolled away */
+       printk(KERN_EMERG "CR2: %016lx\n", address);
+
+       oops_end(flags, regs, sig);
+}
+
+/*
+ * Print out info about fatal segfaults, if the show_unhandled_signals
+ * sysctl is set:
+ */
+static inline void
+show_signal_msg(struct pt_regs *regs, unsigned long error_code,
+               unsigned long address, struct task_struct *tsk)
+{
+       if (!unhandled_signal(tsk, SIGSEGV))
+               return;
+
+       if (!printk_ratelimit())
+               return;
+
+       printk(KERN_CONT "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
+               task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
+               tsk->comm, task_pid_nr(tsk), address,
+               (void *)regs->ip, (void *)regs->sp, error_code);
+
+       print_vma_addr(KERN_CONT " in ", regs->ip);
+
+       printk(KERN_CONT "\n");
+}
+
+static void
+__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+                      unsigned long address, int si_code)
+{
+       struct task_struct *tsk = current;
+
+       /* User mode accesses just cause a SIGSEGV */
+       if (error_code & PF_USER) {
+               /*
+                * It's possible to have interrupts off here:
+                */
+               local_irq_enable();
+
+               /*
+                * Valid to do another page fault here because this one came
+                * from user space:
+                */
+               if (is_prefetch(regs, error_code, address))
+                       return;
+
+               if (is_errata100(regs, address))
+                       return;
+
+               if (unlikely(show_unhandled_signals))
+                       show_signal_msg(regs, error_code, address, tsk);
+
+               /* Kernel addresses are always protection faults: */
+               tsk->thread.cr2         = address;
+               tsk->thread.error_code  = error_code | (address >= TASK_SIZE);
+               tsk->thread.trap_no     = 14;
+
+               force_sig_info_fault(SIGSEGV, si_code, address, tsk);
+
+               return;
+       }
+
+       if (is_f00f_bug(regs, address))
+               return;
+
+       no_context(regs, error_code, address);
+}
+
+static noinline void
+bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
+                    unsigned long address)
+{
+       __bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
+}
+
+static void
+__bad_area(struct pt_regs *regs, unsigned long error_code,
+          unsigned long address, int si_code)
+{
+       struct mm_struct *mm = current->mm;
+
+       /*
+        * Something tried to access memory that isn't in our memory map..
+        * Fix it, but check if it's kernel or user first..
+        */
+       up_read(&mm->mmap_sem);
+
+       __bad_area_nosemaphore(regs, error_code, address, si_code);
+}
+
+static noinline void
+bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
+{
+       __bad_area(regs, error_code, address, SEGV_MAPERR);
+}
+
+static noinline void
+bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
+                     unsigned long address)
+{
+       __bad_area(regs, error_code, address, SEGV_ACCERR);
+}
+
+/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
+static void
+out_of_memory(struct pt_regs *regs, unsigned long error_code,
+             unsigned long address)
+{
+       /*
+        * We ran out of memory, call the OOM killer, and return the userspace
+        * (which will retry the fault, or kill us if we got oom-killed):
+        */
+       up_read(&current->mm->mmap_sem);
+
+       pagefault_out_of_memory();
+}
+
+static void
+do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address)
+{
+       struct task_struct *tsk = current;
+       struct mm_struct *mm = tsk->mm;
+
+       up_read(&mm->mmap_sem);
+
+       /* Kernel mode? Handle exceptions or die: */
+       if (!(error_code & PF_USER))
+               no_context(regs, error_code, address);
+
+       /* User-space => ok to do another page fault: */
+       if (is_prefetch(regs, error_code, address))
+               return;
+
+       tsk->thread.cr2         = address;
+       tsk->thread.error_code  = error_code;
+       tsk->thread.trap_no     = 14;
+
+       force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
+}
+
+static noinline void
+mm_fault_error(struct pt_regs *regs, unsigned long error_code,
+              unsigned long address, unsigned int fault)
+{
+       if (fault & VM_FAULT_OOM) {
+               out_of_memory(regs, error_code, address);
+       } else {
+               if (fault & VM_FAULT_SIGBUS)
+                       do_sigbus(regs, error_code, address);
+               else
+                       BUG();
+       }
+}
 
 static int spurious_fault_check(unsigned long error_code, pte_t *pte)
 {
        if ((error_code & PF_WRITE) && !pte_write(*pte))
                return 0;
+
        if ((error_code & PF_INSTR) && !pte_exec(*pte))
                return 0;
 
@@ -440,21 +875,25 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
 }
 
 /*
- * Handle a spurious fault caused by a stale TLB entry.  This allows
- * us to lazily refresh the TLB when increasing the permissions of a
- * kernel page (RO -> RW or NX -> X).  Doing it eagerly is very
- * expensive since that implies doing a full cross-processor TLB
- * flush, even if no stale TLB entries exist on other processors.
+ * Handle a spurious fault caused by a stale TLB entry.
+ *
+ * This allows us to lazily refresh the TLB when increasing the
+ * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
+ * eagerly is very expensive since that implies doing a full
+ * cross-processor TLB flush, even if no stale TLB entries exist
+ * on other processors.
+ *
  * There are no security implications to leaving a stale TLB when
  * increasing the permissions on a page.
  */
-static int spurious_fault(unsigned long address,
-                         unsigned long error_code)
+static noinline int
+spurious_fault(unsigned long error_code, unsigned long address)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
+       int ret;
 
        /* Reserved-bit violation or user access to kernel space? */
        if (error_code & (PF_USER | PF_RSVD))
@@ -482,127 +921,71 @@ static int spurious_fault(unsigned long address,
        if (!pte_present(*pte))
                return 0;
 
-       return spurious_fault_check(error_code, pte);
-}
-
-/*
- * X86_32
- * Handle a fault on the vmalloc or module mapping area
- *
- * X86_64
- * Handle a fault on the vmalloc area
- *
- * This assumes no large pages in there.
- */
-static int vmalloc_fault(unsigned long address)
-{
-#ifdef CONFIG_X86_32
-       unsigned long pgd_paddr;
-       pmd_t *pmd_k;
-       pte_t *pte_k;
-
-       /* Make sure we are in vmalloc area */
-       if (!(address >= VMALLOC_START && address < VMALLOC_END))
-               return -1;
+       ret = spurious_fault_check(error_code, pte);
+       if (!ret)
+               return 0;
 
        /*
-        * Synchronize this task's top level page-table
-        * with the 'reference' page table.
-        *
-        * Do _not_ use "current" here. We might be inside
-        * an interrupt in the middle of a task switch..
+        * Make sure we have permissions in PMD.
+        * If not, then there's a bug in the page tables:
         */
-       pgd_paddr = read_cr3();
-       pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
-       if (!pmd_k)
-               return -1;
-       pte_k = pte_offset_kernel(pmd_k, address);
-       if (!pte_present(*pte_k))
-               return -1;
-       return 0;
-#else
-       pgd_t *pgd, *pgd_ref;
-       pud_t *pud, *pud_ref;
-       pmd_t *pmd, *pmd_ref;
-       pte_t *pte, *pte_ref;
+       ret = spurious_fault_check(error_code, (pte_t *) pmd);
+       WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
 
-       /* Make sure we are in vmalloc area */
-       if (!(address >= VMALLOC_START && address < VMALLOC_END))
-               return -1;
+       return ret;
+}
 
-       /* Copy kernel mappings over when needed. This can also
-          happen within a race in page table update. In the later
-          case just flush. */
+int show_unhandled_signals = 1;
 
-       pgd = pgd_offset(current->active_mm, address);
-       pgd_ref = pgd_offset_k(address);
-       if (pgd_none(*pgd_ref))
-               return -1;
-       if (pgd_none(*pgd))
-               set_pgd(pgd, *pgd_ref);
-       else
-               BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+static inline int
+access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
+{
+       if (write) {
+               /* write, present and write, not present: */
+               if (unlikely(!(vma->vm_flags & VM_WRITE)))
+                       return 1;
+               return 0;
+       }
 
-       /* Below here mismatches are bugs because these lower tables
-          are shared */
+       /* read, present: */
+       if (unlikely(error_code & PF_PROT))
+               return 1;
+
+       /* read, not present: */
+       if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+               return 1;
 
-       pud = pud_offset(pgd, address);
-       pud_ref = pud_offset(pgd_ref, address);
-       if (pud_none(*pud_ref))
-               return -1;
-       if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
-               BUG();
-       pmd = pmd_offset(pud, address);
-       pmd_ref = pmd_offset(pud_ref, address);
-       if (pmd_none(*pmd_ref))
-               return -1;
-       if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
-               BUG();
-       pte_ref = pte_offset_kernel(pmd_ref, address);
-       if (!pte_present(*pte_ref))
-               return -1;
-       pte = pte_offset_kernel(pmd, address);
-       /* Don't use pte_page here, because the mappings can point
-          outside mem_map, and the NUMA hash lookup cannot handle
-          that. */
-       if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
-               BUG();
        return 0;
-#endif
 }
 
-int show_unhandled_signals = 1;
+static int fault_in_kernel_space(unsigned long address)
+{
+       return address >= TASK_SIZE_MAX;
+}
 
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
  * routines.
  */
-#ifdef CONFIG_X86_64
-asmlinkage
-#endif
-void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
+dotraplinkage void __kprobes
+do_page_fault(struct pt_regs *regs, unsigned long error_code)
 {
-       struct task_struct *tsk;
-       struct mm_struct *mm;
        struct vm_area_struct *vma;
+       struct task_struct *tsk;
        unsigned long address;
-       int write, si_code;
+       struct mm_struct *mm;
+       int write;
        int fault;
-#ifdef CONFIG_X86_64
-       unsigned long flags;
-       int sig;
-#endif
 
        tsk = current;
        mm = tsk->mm;
+
        prefetchw(&mm->mmap_sem);
 
-       /* get the address */
+       /* Get the faulting address: */
        address = read_cr2();
 
-       si_code = SEGV_MAPERR;
-
        if (unlikely(kmmio_fault(regs, address)))
                return;
 
@@ -619,319 +1002,147 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
         * (error_code & 4) == 0, and that the fault was not a
         * protection error (error_code & 9) == 0.
         */
-#ifdef CONFIG_X86_32
-       if (unlikely(address >= TASK_SIZE)) {
-#else
-       if (unlikely(address >= TASK_SIZE64)) {
-#endif
+       if (unlikely(fault_in_kernel_space(address))) {
                if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
                    vmalloc_fault(address) >= 0)
                        return;
 
-               /* Can handle a stale RO->RW TLB */
-               if (spurious_fault(address, error_code))
+               /* Can handle a stale RO->RW TLB: */
+               if (spurious_fault(error_code, address))
                        return;
 
-               /* kprobes don't want to hook the spurious faults. */
+               /* kprobes don't want to hook the spurious faults: */
                if (notify_page_fault(regs))
                        return;
                /*
                 * Don't take the mm semaphore here. If we fixup a prefetch
-                * fault we could otherwise deadlock.
+                * fault we could otherwise deadlock:
                 */
-               goto bad_area_nosemaphore;
-       }
+               bad_area_nosemaphore(regs, error_code, address);
 
-       /* kprobes don't want to hook the spurious faults. */
-       if (notify_page_fault(regs))
                return;
+       }
 
+       /* kprobes don't want to hook the spurious faults: */
+       if (unlikely(notify_page_fault(regs)))
+               return;
        /*
         * It's safe to allow irq's after cr2 has been saved and the
         * vmalloc fault has been handled.
         *
         * User-mode registers count as a user access even for any
-        * potential system fault or CPU buglet.
+        * potential system fault or CPU buglet:
         */
        if (user_mode_vm(regs)) {
                local_irq_enable();
                error_code |= PF_USER;
-       } else if (regs->flags & X86_EFLAGS_IF)
-               local_irq_enable();
+       } else {
+               if (regs->flags & X86_EFLAGS_IF)
+                       local_irq_enable();
+       }
 
-#ifdef CONFIG_X86_64
        if (unlikely(error_code & PF_RSVD))
-               pgtable_bad(address, regs, error_code);
-#endif
+               pgtable_bad(regs, error_code, address);
 
        /*
-        * If we're in an interrupt, have no user context or are running in an
-        * atomic region then we must not take the fault.
+        * If we're in an interrupt, have no user context or are running
+        * in an atomic region then we must not take the fault:
         */
-       if (unlikely(in_atomic() || !mm))
-               goto bad_area_nosemaphore;
+       if (unlikely(in_atomic() || !mm)) {
+               bad_area_nosemaphore(regs, error_code, address);
+               return;
+       }
 
        /*
         * When running in the kernel we expect faults to occur only to
-        * addresses in user space.  All other faults represent errors in the
-        * kernel and should generate an OOPS.  Unfortunately, in the case of an
-        * erroneous fault occurring in a code path which already holds mmap_sem
-        * we will deadlock attempting to validate the fault against the
-        * address space.  Luckily the kernel only validly references user
-        * space from well defined areas of code, which are listed in the
-        * exceptions table.
+        * addresses in user space.  All other faults represent errors in
+        * the kernel and should generate an OOPS.  Unfortunately, in the
+        * case of an erroneous fault occurring in a code path which already
+        * holds mmap_sem we will deadlock attempting to validate the fault
+        * against the address space.  Luckily the kernel only validly
+        * references user space from well defined areas of code, which are
+        * listed in the exceptions table.
         *
         * As the vast majority of faults will be valid we will only perform
-        * the source reference check when there is a possibility of a deadlock.
-        * Attempt to lock the address space, if we cannot we then validate the
-        * source.  If this is invalid we can skip the address space check,
-        * thus avoiding the deadlock.
+        * the source reference check when there is a possibility of a
+        * deadlock. Attempt to lock the address space, if we cannot we then
+        * validate the source. If this is invalid we can skip the address
+        * space check, thus avoiding the deadlock:
         */
-       if (!down_read_trylock(&mm->mmap_sem)) {
+       if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
                if ((error_code & PF_USER) == 0 &&
-                   !search_exception_tables(regs->ip))
-                       goto bad_area_nosemaphore;
+                   !search_exception_tables(regs->ip)) {
+                       bad_area_nosemaphore(regs, error_code, address);
+                       return;
+               }
                down_read(&mm->mmap_sem);
+       } else {
+               /*
+                * The above down_read_trylock() might have succeeded in
+                * which case we'll have missed the might_sleep() from
+                * down_read():
+                */
+               might_sleep();
        }
 
        vma = find_vma(mm, address);
-       if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
+       if (unlikely(!vma)) {
+               bad_area(regs, error_code, address);
+               return;
+       }
+       if (likely(vma->vm_start <= address))
                goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
+       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
+               bad_area(regs, error_code, address);
+               return;
+       }
        if (error_code & PF_USER) {
                /*
                 * Accessing the stack below %sp is always a bug.
                 * The large cushion allows instructions like enter
-                * and pusha to work.  ("enter $65535,$31" pushes
+                * and pusha to work. ("enter $65535, $31" pushes
                 * 32 pointers and then decrements %sp by 65535.)
                 */
-               if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
-                       goto bad_area;
+               if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
+                       bad_area(regs, error_code, address);
+                       return;
+               }
        }
-       if (expand_stack(vma, address))
-               goto bad_area;
-/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
- */
+       if (unlikely(expand_stack(vma, address))) {
+               bad_area(regs, error_code, address);
+               return;
+       }
+
+       /*
+        * Ok, we have a good vm_area for this memory access, so
+        * we can handle it..
+        */
 good_area:
-       si_code = SEGV_ACCERR;
-       write = 0;
-       switch (error_code & (PF_PROT|PF_WRITE)) {
-       default:        /* 3: write, present */
-               /* fall through */
-       case PF_WRITE:          /* write, not present */
-               if (!(vma->vm_flags & VM_WRITE))
-                       goto bad_area;
-               write++;
-               break;
-       case PF_PROT:           /* read, present */
-               goto bad_area;
-       case 0:                 /* read, not present */
-               if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
-                       goto bad_area;
+       write = error_code & PF_WRITE;
+
+       if (unlikely(access_error(error_code, write, vma))) {
+               bad_area_access_error(regs, error_code, address);
+               return;
        }
 
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
-        * the fault.
+        * the fault:
         */
        fault = handle_mm_fault(mm, vma, address, write);
+
        if (unlikely(fault & VM_FAULT_ERROR)) {
-               if (fault & VM_FAULT_OOM)
-                       goto out_of_memory;
-               else if (fault & VM_FAULT_SIGBUS)
-                       goto do_sigbus;
-               BUG();
+               mm_fault_error(regs, error_code, address, fault);
+               return;
        }
+
        if (fault & VM_FAULT_MAJOR)
                tsk->maj_flt++;
        else
                tsk->min_flt++;
 
-#ifdef CONFIG_X86_32
-       /*
-        * Did it hit the DOS screen memory VA from vm86 mode?
-        */
-       if (v8086_mode(regs)) {
-               unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
-               if (bit < 32)
-                       tsk->thread.screen_bitmap |= 1 << bit;
-       }
-#endif
-       up_read(&mm->mmap_sem);
-       return;
-
-/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
- */
-bad_area:
-       up_read(&mm->mmap_sem);
-
-bad_area_nosemaphore:
-       /* User mode accesses just cause a SIGSEGV */
-       if (error_code & PF_USER) {
-               /*
-                * It's possible to have interrupts off here.
-                */
-               local_irq_enable();
-
-               /*
-                * Valid to do another page fault here because this one came
-                * from user space.
-                */
-               if (is_prefetch(regs, address, error_code))
-                       return;
-
-               if (is_errata100(regs, address))
-                       return;
-
-               if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
-                   printk_ratelimit()) {
-                       printk(
-                       "%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
-                       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
-                       tsk->comm, task_pid_nr(tsk), address,
-                       (void *) regs->ip, (void *) regs->sp, error_code);
-                       print_vma_addr(" in ", regs->ip);
-                       printk("\n");
-               }
-
-               tsk->thread.cr2 = address;
-               /* Kernel addresses are always protection faults */
-               tsk->thread.error_code = error_code | (address >= TASK_SIZE);
-               tsk->thread.trap_no = 14;
-               force_sig_info_fault(SIGSEGV, si_code, address, tsk);
-               return;
-       }
-
-       if (is_f00f_bug(regs, address))
-               return;
-
-no_context:
-       /* Are we prepared to handle this kernel fault?  */
-       if (fixup_exception(regs))
-               return;
-
-       /*
-        * X86_32
-        * Valid to do another page fault here, because if this fault
-        * had been triggered by is_prefetch fixup_exception would have
-        * handled it.
-        *
-        * X86_64
-        * Hall of shame of CPU/BIOS bugs.
-        */
-       if (is_prefetch(regs, address, error_code))
-               return;
-
-       if (is_errata93(regs, address))
-               return;
-
-/*
- * Oops. The kernel tried to access some bad page. We'll have to
- * terminate things with extreme prejudice.
- */
-#ifdef CONFIG_X86_32
-       bust_spinlocks(1);
-#else
-       flags = oops_begin();
-#endif
-
-       show_fault_oops(regs, error_code, address);
-
-       tsk->thread.cr2 = address;
-       tsk->thread.trap_no = 14;
-       tsk->thread.error_code = error_code;
-
-#ifdef CONFIG_X86_32
-       die("Oops", regs, error_code);
-       bust_spinlocks(0);
-       do_exit(SIGKILL);
-#else
-       sig = SIGKILL;
-       if (__die("Oops", regs, error_code))
-               sig = 0;
-       /* Executive summary in case the body of the oops scrolled away */
-       printk(KERN_EMERG "CR2: %016lx\n", address);
-       oops_end(flags, regs, sig);
-#endif
-
-out_of_memory:
-       /*
-        * We ran out of memory, call the OOM killer, and return the userspace
-        * (which will retry the fault, or kill us if we got oom-killed).
-        */
-       up_read(&mm->mmap_sem);
-       pagefault_out_of_memory();
-       return;
+       check_v8086_mode(regs, address, tsk);
 
-do_sigbus:
        up_read(&mm->mmap_sem);
-
-       /* Kernel mode? Handle exceptions or die */
-       if (!(error_code & PF_USER))
-               goto no_context;
-#ifdef CONFIG_X86_32
-       /* User space => ok to do another page fault */
-       if (is_prefetch(regs, address, error_code))
-               return;
-#endif
-       tsk->thread.cr2 = address;
-       tsk->thread.error_code = error_code;
-       tsk->thread.trap_no = 14;
-       force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
-}
-
-DEFINE_SPINLOCK(pgd_lock);
-LIST_HEAD(pgd_list);
-
-void vmalloc_sync_all(void)
-{
-       unsigned long address;
-
-#ifdef CONFIG_X86_32
-       if (SHARED_KERNEL_PMD)
-               return;
-
-       for (address = VMALLOC_START & PMD_MASK;
-            address >= TASK_SIZE && address < FIXADDR_TOP;
-            address += PMD_SIZE) {
-               unsigned long flags;
-               struct page *page;
-
-               spin_lock_irqsave(&pgd_lock, flags);
-               list_for_each_entry(page, &pgd_list, lru) {
-                       if (!vmalloc_sync_one(page_address(page),
-                                             address))
-                               break;
-               }
-               spin_unlock_irqrestore(&pgd_lock, flags);
-       }
-#else /* CONFIG_X86_64 */
-       for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
-            address += PGDIR_SIZE) {
-               const pgd_t *pgd_ref = pgd_offset_k(address);
-               unsigned long flags;
-               struct page *page;
-
-               if (pgd_none(*pgd_ref))
-                       continue;
-               spin_lock_irqsave(&pgd_lock, flags);
-               list_for_each_entry(page, &pgd_list, lru) {
-                       pgd_t *pgd;
-                       pgd = (pgd_t *)page_address(page) + pgd_index(address);
-                       if (pgd_none(*pgd))
-                               set_pgd(pgd, *pgd_ref);
-                       else
-                               BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
-               }
-               spin_unlock_irqrestore(&pgd_lock, flags);
-       }
-#endif
 }
index bcc079c282dd33112a48b250ec227610c39697cb..522db5e3d0bf973e54ca71e33201454c4aadefda 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/highmem.h>
 #include <linux/module.h>
+#include <linux/swap.h> /* for totalram_pages */
 
 void *kmap(struct page *page)
 {
@@ -120,22 +121,13 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
        pagefault_enable();
 }
 
-/* This is the same as kmap_atomic() but can map memory that doesn't
+/*
+ * This is the same as kmap_atomic() but can map memory that doesn't
  * have a struct page associated with it.
  */
 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 {
-       enum fixed_addresses idx;
-       unsigned long vaddr;
-
-       pagefault_disable();
-
-       idx = type + KM_TYPE_NR*smp_processor_id();
-       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-       set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
-       arch_flush_lazy_mmu_mode();
-
-       return (void*) vaddr;
+       return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
 }
 EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
 
@@ -156,3 +148,27 @@ EXPORT_SYMBOL(kmap);
 EXPORT_SYMBOL(kunmap);
 EXPORT_SYMBOL(kmap_atomic);
 EXPORT_SYMBOL(kunmap_atomic);
+
+void __init set_highmem_pages_init(void)
+{
+       struct zone *zone;
+       int nid;
+
+       for_each_zone(zone) {
+               unsigned long zone_start_pfn, zone_end_pfn;
+
+               if (!is_highmem(zone))
+                       continue;
+
+               zone_start_pfn = zone->zone_start_pfn;
+               zone_end_pfn = zone_start_pfn + zone->spanned_pages;
+
+               nid = zone_to_nid(zone);
+               printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
+                               zone->name, nid, zone_start_pfn, zone_end_pfn);
+
+               add_highpages_with_active_regions(nid, zone_start_pfn,
+                                zone_end_pfn);
+       }
+       totalram_pages += totalhigh_pages;
+}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
new file mode 100644 (file)
index 0000000..fd3da1d
--- /dev/null
@@ -0,0 +1,393 @@
+#include <linux/ioport.h>
+#include <linux/swap.h>
+
+#include <asm/cacheflush.h>
+#include <asm/e820.h>
+#include <asm/init.h>
+#include <asm/page.h>
+#include <asm/page_types.h>
+#include <asm/sections.h>
+#include <asm/system.h>
+#include <asm/tlbflush.h>
+
+unsigned long __initdata e820_table_start;
+unsigned long __meminitdata e820_table_end;
+unsigned long __meminitdata e820_table_top;
+
+int after_bootmem;
+
+int direct_gbpages
+#ifdef CONFIG_DIRECT_GBPAGES
+                               = 1
+#endif
+;
+
+static void __init find_early_table_space(unsigned long end, int use_pse,
+                                         int use_gbpages)
+{
+       unsigned long puds, pmds, ptes, tables, start;
+
+       puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
+       tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+
+       if (use_gbpages) {
+               unsigned long extra;
+
+               extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
+               pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
+       } else
+               pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+
+       tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+
+       if (use_pse) {
+               unsigned long extra;
+
+               extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
+#ifdef CONFIG_X86_32
+               extra += PMD_SIZE;
+#endif
+               ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       } else
+               ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
+
+#ifdef CONFIG_X86_32
+       /* for fixmap */
+       tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
+#endif
+
+       /*
+        * RED-PEN putting page tables only on node 0 could
+        * cause a hotspot and fill up ZONE_DMA. The page tables
+        * need roughly 0.5KB per GB.
+        */
+#ifdef CONFIG_X86_32
+       start = 0x7000;
+       e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
+                                       tables, PAGE_SIZE);
+#else /* CONFIG_X86_64 */
+       start = 0x8000;
+       e820_table_start = find_e820_area(start, end, tables, PAGE_SIZE);
+#endif
+       if (e820_table_start == -1UL)
+               panic("Cannot find space for the kernel page tables");
+
+       e820_table_start >>= PAGE_SHIFT;
+       e820_table_end = e820_table_start;
+       e820_table_top = e820_table_start + (tables >> PAGE_SHIFT);
+
+       printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
+               end, e820_table_start << PAGE_SHIFT, e820_table_top << PAGE_SHIFT);
+}
+
+struct map_range {
+       unsigned long start;
+       unsigned long end;
+       unsigned page_size_mask;
+};
+
+#ifdef CONFIG_X86_32
+#define NR_RANGE_MR 3
+#else /* CONFIG_X86_64 */
+#define NR_RANGE_MR 5
+#endif
+
+static int __meminit save_mr(struct map_range *mr, int nr_range,
+                            unsigned long start_pfn, unsigned long end_pfn,
+                            unsigned long page_size_mask)
+{
+       if (start_pfn < end_pfn) {
+               if (nr_range >= NR_RANGE_MR)
+                       panic("run out of range for init_memory_mapping\n");
+               mr[nr_range].start = start_pfn<<PAGE_SHIFT;
+               mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
+               mr[nr_range].page_size_mask = page_size_mask;
+               nr_range++;
+       }
+
+       return nr_range;
+}
+
+#ifdef CONFIG_X86_64
+static void __init init_gbpages(void)
+{
+       if (direct_gbpages && cpu_has_gbpages)
+               printk(KERN_INFO "Using GB pages for direct mapping\n");
+       else
+               direct_gbpages = 0;
+}
+#else
+static inline void init_gbpages(void)
+{
+}
+#endif
+
+/*
+ * Setup the direct mapping of the physical memory at PAGE_OFFSET.
+ * This runs before bootmem is initialized and gets pages directly from
+ * the physical memory. To access them they are temporarily mapped.
+ */
+unsigned long __init_refok init_memory_mapping(unsigned long start,
+                                              unsigned long end)
+{
+       unsigned long page_size_mask = 0;
+       unsigned long start_pfn, end_pfn;
+       unsigned long ret = 0;
+       unsigned long pos;
+
+       struct map_range mr[NR_RANGE_MR];
+       int nr_range, i;
+       int use_pse, use_gbpages;
+
+       printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
+
+       if (!after_bootmem)
+               init_gbpages();
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       /*
+        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
+        * This will simplify cpa(), which otherwise needs to support splitting
+        * large pages into small in interrupt context, etc.
+        */
+       use_pse = use_gbpages = 0;
+#else
+       use_pse = cpu_has_pse;
+       use_gbpages = direct_gbpages;
+#endif
+
+#ifdef CONFIG_X86_32
+#ifdef CONFIG_X86_PAE
+       set_nx();
+       if (nx_enabled)
+               printk(KERN_INFO "NX (Execute Disable) protection: active\n");
+#endif
+
+       /* Enable PSE if available */
+       if (cpu_has_pse)
+               set_in_cr4(X86_CR4_PSE);
+
+       /* Enable PGE if available */
+       if (cpu_has_pge) {
+               set_in_cr4(X86_CR4_PGE);
+               __supported_pte_mask |= _PAGE_GLOBAL;
+       }
+#endif
+
+       if (use_gbpages)
+               page_size_mask |= 1 << PG_LEVEL_1G;
+       if (use_pse)
+               page_size_mask |= 1 << PG_LEVEL_2M;
+
+       memset(mr, 0, sizeof(mr));
+       nr_range = 0;
+
+       /* head if not big page alignment ? */
+       start_pfn = start >> PAGE_SHIFT;
+       pos = start_pfn << PAGE_SHIFT;
+#ifdef CONFIG_X86_32
+       /*
+        * Don't use a large page for the first 2/4MB of memory
+        * because there are often fixed size MTRRs in there
+        * and overlapping MTRRs into large pages can cause
+        * slowdowns.
+        */
+       if (pos == 0)
+               end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT);
+       else
+               end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
+                                << (PMD_SHIFT - PAGE_SHIFT);
+#else /* CONFIG_X86_64 */
+       end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
+                       << (PMD_SHIFT - PAGE_SHIFT);
+#endif
+       if (end_pfn > (end >> PAGE_SHIFT))
+               end_pfn = end >> PAGE_SHIFT;
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+               pos = end_pfn << PAGE_SHIFT;
+       }
+
+       /* big page (2M) range */
+       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
+                        << (PMD_SHIFT - PAGE_SHIFT);
+#ifdef CONFIG_X86_32
+       end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+#else /* CONFIG_X86_64 */
+       end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
+                        << (PUD_SHIFT - PAGE_SHIFT);
+       if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
+               end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
+#endif
+
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+                               page_size_mask & (1<<PG_LEVEL_2M));
+               pos = end_pfn << PAGE_SHIFT;
+       }
+
+#ifdef CONFIG_X86_64
+       /* big page (1G) range */
+       start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
+                        << (PUD_SHIFT - PAGE_SHIFT);
+       end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+                               page_size_mask &
+                                ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
+               pos = end_pfn << PAGE_SHIFT;
+       }
+
+       /* tail is not big page (1G) alignment */
+       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
+                        << (PMD_SHIFT - PAGE_SHIFT);
+       end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
+       if (start_pfn < end_pfn) {
+               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
+                               page_size_mask & (1<<PG_LEVEL_2M));
+               pos = end_pfn << PAGE_SHIFT;
+       }
+#endif
+
+       /* tail is not big page (2M) alignment */
+       start_pfn = pos>>PAGE_SHIFT;
+       end_pfn = end>>PAGE_SHIFT;
+       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
+
+       /* try to merge same page size and continuous */
+       for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
+               unsigned long old_start;
+               if (mr[i].end != mr[i+1].start ||
+                   mr[i].page_size_mask != mr[i+1].page_size_mask)
+                       continue;
+               /* move it */
+               old_start = mr[i].start;
+               memmove(&mr[i], &mr[i+1],
+                       (nr_range - 1 - i) * sizeof(struct map_range));
+               mr[i--].start = old_start;
+               nr_range--;
+       }
+
+       for (i = 0; i < nr_range; i++)
+               printk(KERN_DEBUG " %010lx - %010lx page %s\n",
+                               mr[i].start, mr[i].end,
+                       (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
+                        (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
+
+       /*
+        * Find space for the kernel direct mapping tables.
+        *
+        * Later we should allocate these tables in the local node of the
+        * memory mapped. Unfortunately this is done currently before the
+        * nodes are discovered.
+        */
+       if (!after_bootmem)
+               find_early_table_space(end, use_pse, use_gbpages);
+
+#ifdef CONFIG_X86_32
+       for (i = 0; i < nr_range; i++)
+               kernel_physical_mapping_init(mr[i].start, mr[i].end,
+                                            mr[i].page_size_mask);
+       ret = end;
+#else /* CONFIG_X86_64 */
+       for (i = 0; i < nr_range; i++)
+               ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
+                                                  mr[i].page_size_mask);
+#endif
+
+#ifdef CONFIG_X86_32
+       early_ioremap_page_table_range_init();
+
+       load_cr3(swapper_pg_dir);
+#endif
+
+#ifdef CONFIG_X86_64
+       if (!after_bootmem)
+               mmu_cr4_features = read_cr4();
+#endif
+       __flush_tlb_all();
+
+       if (!after_bootmem && e820_table_end > e820_table_start)
+               reserve_early(e820_table_start << PAGE_SHIFT,
+                                e820_table_end << PAGE_SHIFT, "PGTABLE");
+
+       if (!after_bootmem)
+               early_memtest(start, end);
+
+       return ret >> PAGE_SHIFT;
+}
+
+
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain address
+ * is valid. The argument is a physical page number.
+ *
+ *
+ * On x86, access has to be given to the first megabyte of ram because that area
+ * contains bios code and data regions used by X and dosemu and similar apps.
+ * Access has to be given to non-kernel-ram areas as well, these contain the PCI
+ * mmio resources as well as potential bios/acpi data regions.
+ */
+int devmem_is_allowed(unsigned long pagenr)
+{
+       if (pagenr <= 256)
+               return 1;
+       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+               return 0;
+       if (!page_is_ram(pagenr))
+               return 1;
+       return 0;
+}
+
+void free_init_pages(char *what, unsigned long begin, unsigned long end)
+{
+       unsigned long addr = begin;
+
+       if (addr >= end)
+               return;
+
+       /*
+        * If debugging page accesses then do not free this memory but
+        * mark them not present - any buggy init-section access will
+        * create a kernel page fault:
+        */
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
+               begin, PAGE_ALIGN(end));
+       set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
+#else
+       /*
+        * We just marked the kernel text read only above, now that
+        * we are going to free part of that, we need to make that
+        * writeable first.
+        */
+       set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
+
+       printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
+
+       for (; addr < end; addr += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(addr));
+               init_page_count(virt_to_page(addr));
+               memset((void *)(addr & ~(PAGE_SIZE-1)),
+                       POISON_FREE_INITMEM, PAGE_SIZE);
+               free_page(addr);
+               totalram_pages++;
+       }
+#endif
+}
+
+void free_initmem(void)
+{
+       free_init_pages("unused kernel memory",
+                       (unsigned long)(&__init_begin),
+                       (unsigned long)(&__init_end));
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+       free_init_pages("initrd memory", start, end);
+}
+#endif
index 2cef050744131698ff7239192b04a7ba136d9153..db81e9a8556b3b0bba854aeba5740bd0cc039039 100644 (file)
@@ -49,9 +49,7 @@
 #include <asm/paravirt.h>
 #include <asm/setup.h>
 #include <asm/cacheflush.h>
-#include <asm/smp.h>
-
-unsigned int __VMALLOC_RESERVE = 128 << 20;
+#include <asm/init.h>
 
 unsigned long max_low_pfn_mapped;
 unsigned long max_pfn_mapped;
@@ -61,19 +59,14 @@ unsigned long highstart_pfn, highend_pfn;
 
 static noinline int do_test_wp_bit(void);
 
-
-static unsigned long __initdata table_start;
-static unsigned long __meminitdata table_end;
-static unsigned long __meminitdata table_top;
-
-static int __initdata after_init_bootmem;
+bool __read_mostly __vmalloc_start_set = false;
 
 static __init void *alloc_low_page(void)
 {
-       unsigned long pfn = table_end++;
+       unsigned long pfn = e820_table_end++;
        void *adr;
 
-       if (pfn >= table_top)
+       if (pfn >= e820_table_top)
                panic("alloc_low_page: ran out of memory");
 
        adr = __va(pfn * PAGE_SIZE);
@@ -93,7 +86,7 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
 
 #ifdef CONFIG_X86_PAE
        if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
-               if (after_init_bootmem)
+               if (after_bootmem)
                        pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
                else
                        pmd_table = (pmd_t *)alloc_low_page();
@@ -120,7 +113,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
        if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
                pte_t *page_table = NULL;
 
-               if (after_init_bootmem) {
+               if (after_bootmem) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
                        page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
 #endif
@@ -138,6 +131,23 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
        return pte_offset_kernel(pmd, 0);
 }
 
+pmd_t * __init populate_extra_pmd(unsigned long vaddr)
+{
+       int pgd_idx = pgd_index(vaddr);
+       int pmd_idx = pmd_index(vaddr);
+
+       return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
+}
+
+pte_t * __init populate_extra_pte(unsigned long vaddr)
+{
+       int pte_idx = pte_index(vaddr);
+       pmd_t *pmd;
+
+       pmd = populate_extra_pmd(vaddr);
+       return one_page_table_init(pmd) + pte_idx;
+}
+
 static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
                                           unsigned long vaddr, pte_t *lastpte)
 {
@@ -154,12 +164,12 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
        if (pmd_idx_kmap_begin != pmd_idx_kmap_end
            && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
            && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
-           && ((__pa(pte) >> PAGE_SHIFT) < table_start
-               || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
+           && ((__pa(pte) >> PAGE_SHIFT) < e820_table_start
+               || (__pa(pte) >> PAGE_SHIFT) >= e820_table_end)) {
                pte_t *newpte;
                int i;
 
-               BUG_ON(after_init_bootmem);
+               BUG_ON(after_bootmem);
                newpte = alloc_low_page();
                for (i = 0; i < PTRS_PER_PTE; i++)
                        set_pte(newpte + i, pte[i]);
@@ -228,11 +238,14 @@ static inline int is_kernel_text(unsigned long addr)
  * of max_low_pfn pages, by creating page tables starting from address
  * PAGE_OFFSET:
  */
-static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
-                                               unsigned long start_pfn,
-                                               unsigned long end_pfn,
-                                               int use_pse)
+unsigned long __init
+kernel_physical_mapping_init(unsigned long start,
+                            unsigned long end,
+                            unsigned long page_size_mask)
 {
+       int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
+       unsigned long start_pfn, end_pfn;
+       pgd_t *pgd_base = swapper_pg_dir;
        int pgd_idx, pmd_idx, pte_ofs;
        unsigned long pfn;
        pgd_t *pgd;
@@ -241,6 +254,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base,
        unsigned pages_2m, pages_4k;
        int mapping_iter;
 
+       start_pfn = start >> PAGE_SHIFT;
+       end_pfn = end >> PAGE_SHIFT;
+
        /*
         * First iteration will setup identity mapping using large/small pages
         * based on use_pse, with other attributes same as set by
@@ -355,26 +371,6 @@ repeat:
                mapping_iter = 2;
                goto repeat;
        }
-}
-
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address
- * is valid. The argument is a physical page number.
- *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
-       if (pagenr <= 256)
-               return 1;
-       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
-               return 0;
-       if (!page_is_ram(pagenr))
-               return 1;
        return 0;
 }
 
@@ -470,22 +466,10 @@ void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
        work_with_active_regions(nid, add_highpages_work_fn, &data);
 }
 
-#ifndef CONFIG_NUMA
-static void __init set_highmem_pages_init(void)
-{
-       add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
-
-       totalram_pages += totalhigh_pages;
-}
-#endif /* !CONFIG_NUMA */
-
 #else
 static inline void permanent_kmaps_init(pgd_t *pgd_base)
 {
 }
-static inline void set_highmem_pages_init(void)
-{
-}
 #endif /* CONFIG_HIGHMEM */
 
 void __init native_pagetable_setup_start(pgd_t *base)
@@ -543,8 +527,9 @@ void __init native_pagetable_setup_done(pgd_t *base)
  * be partially populated, and so it avoids stomping on any existing
  * mappings.
  */
-static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
+void __init early_ioremap_page_table_range_init(void)
 {
+       pgd_t *pgd_base = swapper_pg_dir;
        unsigned long vaddr, end;
 
        /*
@@ -639,7 +624,7 @@ static int __init noexec_setup(char *str)
 }
 early_param("noexec", noexec_setup);
 
-static void __init set_nx(void)
+void __init set_nx(void)
 {
        unsigned int v[4], l, h;
 
@@ -675,75 +660,97 @@ static int __init parse_highmem(char *arg)
 }
 early_param("highmem", parse_highmem);
 
+#define MSG_HIGHMEM_TOO_BIG \
+       "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
+
+#define MSG_LOWMEM_TOO_SMALL \
+       "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
 /*
- * Determine low and high memory ranges:
+ * All of RAM fits into lowmem - but if user wants highmem
+ * artificially via the highmem=x boot parameter then create
+ * it:
  */
-void __init find_low_pfn_range(void)
+void __init lowmem_pfn_init(void)
 {
-       /* it could update max_pfn */
-
        /* max_low_pfn is 0, we already have early_res support */
-
        max_low_pfn = max_pfn;
-       if (max_low_pfn > MAXMEM_PFN) {
-               if (highmem_pages == -1)
-                       highmem_pages = max_pfn - MAXMEM_PFN;
-               if (highmem_pages + MAXMEM_PFN < max_pfn)
-                       max_pfn = MAXMEM_PFN + highmem_pages;
-               if (highmem_pages + MAXMEM_PFN > max_pfn) {
-                       printk(KERN_WARNING "only %luMB highmem pages "
-                               "available, ignoring highmem size of %uMB.\n",
-                               pages_to_mb(max_pfn - MAXMEM_PFN),
+
+       if (highmem_pages == -1)
+               highmem_pages = 0;
+#ifdef CONFIG_HIGHMEM
+       if (highmem_pages >= max_pfn) {
+               printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
+                       pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
+               highmem_pages = 0;
+       }
+       if (highmem_pages) {
+               if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
+                       printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
                                pages_to_mb(highmem_pages));
                        highmem_pages = 0;
                }
-               max_low_pfn = MAXMEM_PFN;
+               max_low_pfn -= highmem_pages;
+       }
+#else
+       if (highmem_pages)
+               printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
+#endif
+}
+
+#define MSG_HIGHMEM_TOO_SMALL \
+       "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
+
+#define MSG_HIGHMEM_TRIMMED \
+       "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
+/*
+ * We have more RAM than fits into lowmem - we try to put it into
+ * highmem, also taking the highmem=x boot parameter into account:
+ */
+void __init highmem_pfn_init(void)
+{
+       max_low_pfn = MAXMEM_PFN;
+
+       if (highmem_pages == -1)
+               highmem_pages = max_pfn - MAXMEM_PFN;
+
+       if (highmem_pages + MAXMEM_PFN < max_pfn)
+               max_pfn = MAXMEM_PFN + highmem_pages;
+
+       if (highmem_pages + MAXMEM_PFN > max_pfn) {
+               printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
+                       pages_to_mb(max_pfn - MAXMEM_PFN),
+                       pages_to_mb(highmem_pages));
+               highmem_pages = 0;
+       }
 #ifndef CONFIG_HIGHMEM
-               /* Maximum memory usable is what is directly addressable */
-               printk(KERN_WARNING "Warning only %ldMB will be used.\n",
-                                       MAXMEM>>20);
-               if (max_pfn > MAX_NONPAE_PFN)
-                       printk(KERN_WARNING
-                                "Use a HIGHMEM64G enabled kernel.\n");
-               else
-                       printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
-               max_pfn = MAXMEM_PFN;
+       /* Maximum memory usable is what is directly addressable */
+       printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
+       if (max_pfn > MAX_NONPAE_PFN)
+               printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
+       else
+               printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+       max_pfn = MAXMEM_PFN;
 #else /* !CONFIG_HIGHMEM */
 #ifndef CONFIG_HIGHMEM64G
-               if (max_pfn > MAX_NONPAE_PFN) {
-                       max_pfn = MAX_NONPAE_PFN;
-                       printk(KERN_WARNING "Warning only 4GB will be used."
-                               "Use a HIGHMEM64G enabled kernel.\n");
-               }
+       if (max_pfn > MAX_NONPAE_PFN) {
+               max_pfn = MAX_NONPAE_PFN;
+               printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
+       }
 #endif /* !CONFIG_HIGHMEM64G */
 #endif /* !CONFIG_HIGHMEM */
-       } else {
-               if (highmem_pages == -1)
-                       highmem_pages = 0;
-#ifdef CONFIG_HIGHMEM
-               if (highmem_pages >= max_pfn) {
-                       printk(KERN_ERR "highmem size specified (%uMB) is "
-                               "bigger than pages available (%luMB)!.\n",
-                               pages_to_mb(highmem_pages),
-                               pages_to_mb(max_pfn));
-                       highmem_pages = 0;
-               }
-               if (highmem_pages) {
-                       if (max_low_pfn - highmem_pages <
-                           64*1024*1024/PAGE_SIZE){
-                               printk(KERN_ERR "highmem size %uMB results in "
-                               "smaller than 64MB lowmem, ignoring it.\n"
-                                       , pages_to_mb(highmem_pages));
-                               highmem_pages = 0;
-                       }
-                       max_low_pfn -= highmem_pages;
-               }
-#else
-               if (highmem_pages)
-                       printk(KERN_ERR "ignoring highmem size on non-highmem"
-                                       " kernel!\n");
-#endif
-       }
+}
+
+/*
+ * Determine low and high memory ranges:
+ */
+void __init find_low_pfn_range(void)
+{
+       /* it could update max_pfn */
+
+       if (max_pfn <= MAXMEM_PFN)
+               lowmem_pfn_init();
+       else
+               highmem_pfn_init();
 }
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -769,6 +776,8 @@ void __init initmem_init(unsigned long start_pfn,
 #ifdef CONFIG_FLATMEM
        max_mapnr = num_physpages;
 #endif
+       __vmalloc_start_set = true;
+
        printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
                        pages_to_mb(max_low_pfn));
 
@@ -790,176 +799,66 @@ static void __init zone_sizes_init(void)
        free_area_init_nodes(max_zone_pfns);
 }
 
+static unsigned long __init setup_node_bootmem(int nodeid,
+                                unsigned long start_pfn,
+                                unsigned long end_pfn,
+                                unsigned long bootmap)
+{
+       unsigned long bootmap_size;
+
+       /* don't touch min_low_pfn */
+       bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
+                                        bootmap >> PAGE_SHIFT,
+                                        start_pfn, end_pfn);
+       printk(KERN_INFO "  node %d low ram: %08lx - %08lx\n",
+               nodeid, start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+       printk(KERN_INFO "  node %d bootmap %08lx - %08lx\n",
+                nodeid, bootmap, bootmap + bootmap_size);
+       free_bootmem_with_active_regions(nodeid, end_pfn);
+       early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
+
+       return bootmap + bootmap_size;
+}
+
 void __init setup_bootmem_allocator(void)
 {
-       int i;
+       int nodeid;
        unsigned long bootmap_size, bootmap;
        /*
         * Initialize the boot-time allocator (with low memory only):
         */
        bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
-       bootmap = find_e820_area(min_low_pfn<<PAGE_SHIFT,
-                                max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
+       bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
                                 PAGE_SIZE);
        if (bootmap == -1L)
                panic("Cannot find bootmem map of size %ld\n", bootmap_size);
        reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
 
-       /* don't touch min_low_pfn */
-       bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
-                                        min_low_pfn, max_low_pfn);
        printk(KERN_INFO "  mapped low ram: 0 - %08lx\n",
                 max_pfn_mapped<<PAGE_SHIFT);
-       printk(KERN_INFO "  low ram: %08lx - %08lx\n",
-                min_low_pfn<<PAGE_SHIFT, max_low_pfn<<PAGE_SHIFT);
-       printk(KERN_INFO "  bootmap %08lx - %08lx\n",
-                bootmap, bootmap + bootmap_size);
-       for_each_online_node(i)
-               free_bootmem_with_active_regions(i, max_low_pfn);
-       early_res_to_bootmem(0, max_low_pfn<<PAGE_SHIFT);
-
-       after_init_bootmem = 1;
-}
-
-static void __init find_early_table_space(unsigned long end, int use_pse)
-{
-       unsigned long puds, pmds, ptes, tables, start;
-
-       puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-       tables = PAGE_ALIGN(puds * sizeof(pud_t));
-
-       pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-       tables += PAGE_ALIGN(pmds * sizeof(pmd_t));
-
-       if (use_pse) {
-               unsigned long extra;
+       printk(KERN_INFO "  low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
 
-               extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
-               extra += PMD_SIZE;
-               ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       } else
-               ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       for_each_online_node(nodeid) {
+                unsigned long start_pfn, end_pfn;
 
-       tables += PAGE_ALIGN(ptes * sizeof(pte_t));
-
-       /* for fixmap */
-       tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t));
-
-       /*
-        * RED-PEN putting page tables only on node 0 could
-        * cause a hotspot and fill up ZONE_DMA. The page tables
-        * need roughly 0.5KB per GB.
-        */
-       start = 0x7000;
-       table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
-                                       tables, PAGE_SIZE);
-       if (table_start == -1UL)
-               panic("Cannot find space for the kernel page tables");
-
-       table_start >>= PAGE_SHIFT;
-       table_end = table_start;
-       table_top = table_start + (tables>>PAGE_SHIFT);
-
-       printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
-               end, table_start << PAGE_SHIFT,
-               (table_start << PAGE_SHIFT) + tables);
-}
-
-unsigned long __init_refok init_memory_mapping(unsigned long start,
-                                               unsigned long end)
-{
-       pgd_t *pgd_base = swapper_pg_dir;
-       unsigned long start_pfn, end_pfn;
-       unsigned long big_page_start;
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       /*
-        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
-        * This will simplify cpa(), which otherwise needs to support splitting
-        * large pages into small in interrupt context, etc.
-        */
-       int use_pse = 0;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+               start_pfn = node_start_pfn[nodeid];
+               end_pfn = node_end_pfn[nodeid];
+               if (start_pfn > max_low_pfn)
+                       continue;
+               if (end_pfn > max_low_pfn)
+                       end_pfn = max_low_pfn;
 #else
-       int use_pse = cpu_has_pse;
-#endif
-
-       /*
-        * Find space for the kernel direct mapping tables.
-        */
-       if (!after_init_bootmem)
-               find_early_table_space(end, use_pse);
-
-#ifdef CONFIG_X86_PAE
-       set_nx();
-       if (nx_enabled)
-               printk(KERN_INFO "NX (Execute Disable) protection: active\n");
+               start_pfn = 0;
+               end_pfn = max_low_pfn;
 #endif
-
-       /* Enable PSE if available */
-       if (cpu_has_pse)
-               set_in_cr4(X86_CR4_PSE);
-
-       /* Enable PGE if available */
-       if (cpu_has_pge) {
-               set_in_cr4(X86_CR4_PGE);
-               __supported_pte_mask |= _PAGE_GLOBAL;
-       }
-
-       /*
-        * Don't use a large page for the first 2/4MB of memory
-        * because there are often fixed size MTRRs in there
-        * and overlapping MTRRs into large pages can cause
-        * slowdowns.
-        */
-       big_page_start = PMD_SIZE;
-
-       if (start < big_page_start) {
-               start_pfn = start >> PAGE_SHIFT;
-               end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT);
-       } else {
-               /* head is not big page alignment ? */
-               start_pfn = start >> PAGE_SHIFT;
-               end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
-                                << (PMD_SHIFT - PAGE_SHIFT);
+               bootmap = setup_node_bootmem(nodeid, start_pfn, end_pfn,
+                                                bootmap);
        }
-       if (start_pfn < end_pfn)
-               kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0);
-
-       /* big page range */
-       start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
-                        << (PMD_SHIFT - PAGE_SHIFT);
-       if (start_pfn < (big_page_start >> PAGE_SHIFT))
-               start_pfn =  big_page_start >> PAGE_SHIFT;
-       end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
-       if (start_pfn < end_pfn)
-               kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn,
-                                            use_pse);
-
-       /* tail is not big page alignment ? */
-       start_pfn = end_pfn;
-       if (start_pfn > (big_page_start>>PAGE_SHIFT)) {
-               end_pfn = end >> PAGE_SHIFT;
-               if (start_pfn < end_pfn)
-                       kernel_physical_mapping_init(pgd_base, start_pfn,
-                                                        end_pfn, 0);
-       }
-
-       early_ioremap_page_table_range_init(pgd_base);
-
-       load_cr3(swapper_pg_dir);
-
-       __flush_tlb_all();
 
-       if (!after_init_bootmem)
-               reserve_early(table_start << PAGE_SHIFT,
-                                table_end << PAGE_SHIFT, "PGTABLE");
-
-       if (!after_init_bootmem)
-               early_memtest(start, end);
-
-       return end >> PAGE_SHIFT;
+       after_bootmem = 1;
 }
 
-
 /*
  * paging_init() sets up the page tables - note that the first 8MB are
  * already mapped by head.S.
@@ -1193,52 +1092,6 @@ void mark_rodata_ro(void)
 }
 #endif
 
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       /*
-        * If debugging page accesses then do not free this memory but
-        * mark them not present - any buggy init-section access will
-        * create a kernel page fault:
-        */
-       printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-               begin, PAGE_ALIGN(end));
-       set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
-#else
-       unsigned long addr;
-
-       /*
-        * We just marked the kernel text read only above, now that
-        * we are going to free part of that, we need to make that
-        * writeable first.
-        */
-       set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
-
-       for (addr = begin; addr < end; addr += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(addr));
-               init_page_count(virt_to_page(addr));
-               memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
-               free_page(addr);
-               totalram_pages++;
-       }
-       printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-#endif
-}
-
-void free_initmem(void)
-{
-       free_init_pages("unused kernel memory",
-                       (unsigned long)(&__init_begin),
-                       (unsigned long)(&__init_end));
-}
-
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       free_init_pages("initrd memory", start, end);
-}
-#endif
-
 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
                                   int flags)
 {
index b1352250096e0d734ea4a03650e7d92ae8c2d8ba..54efa57d1c039b648d6eb77bef1878c10e9cdb0b 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/kdebug.h>
 #include <asm/numa.h>
 #include <asm/cacheflush.h>
+#include <asm/init.h>
 
 /*
  * end_pfn only includes RAM, while max_pfn_mapped includes all e820 entries.
@@ -61,12 +62,6 @@ static unsigned long dma_reserve __initdata;
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
-int direct_gbpages
-#ifdef CONFIG_DIRECT_GBPAGES
-                               = 1
-#endif
-;
-
 static int __init parse_direct_gbpages_off(char *arg)
 {
        direct_gbpages = 0;
@@ -87,12 +82,10 @@ early_param("gbpages", parse_direct_gbpages_on);
  * around without checking the pgd every time.
  */
 
-int after_bootmem;
-
 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
 EXPORT_SYMBOL_GPL(__supported_pte_mask);
 
-static int do_not_nx __cpuinitdata;
+static int disable_nx __cpuinitdata;
 
 /*
  * noexec=on|off
@@ -107,9 +100,9 @@ static int __init nonx_setup(char *str)
                return -EINVAL;
        if (!strncmp(str, "on", 2)) {
                __supported_pte_mask |= _PAGE_NX;
-               do_not_nx = 0;
+               disable_nx = 0;
        } else if (!strncmp(str, "off", 3)) {
-               do_not_nx = 1;
+               disable_nx = 1;
                __supported_pte_mask &= ~_PAGE_NX;
        }
        return 0;
@@ -121,7 +114,7 @@ void __cpuinit check_efer(void)
        unsigned long efer;
 
        rdmsrl(MSR_EFER, efer);
-       if (!(efer & EFER_NX) || do_not_nx)
+       if (!(efer & EFER_NX) || disable_nx)
                __supported_pte_mask &= ~_PAGE_NX;
 }
 
@@ -168,34 +161,51 @@ static __ref void *spp_getpage(void)
        return ptr;
 }
 
-void
-set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
+static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
 {
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
+       if (pgd_none(*pgd)) {
+               pud_t *pud = (pud_t *)spp_getpage();
+               pgd_populate(&init_mm, pgd, pud);
+               if (pud != pud_offset(pgd, 0))
+                       printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
+                              pud, pud_offset(pgd, 0));
+       }
+       return pud_offset(pgd, vaddr);
+}
 
-       pud = pud_page + pud_index(vaddr);
+static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
+{
        if (pud_none(*pud)) {
-               pmd = (pmd_t *) spp_getpage();
+               pmd_t *pmd = (pmd_t *) spp_getpage();
                pud_populate(&init_mm, pud, pmd);
-               if (pmd != pmd_offset(pud, 0)) {
+               if (pmd != pmd_offset(pud, 0))
                        printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
-                               pmd, pmd_offset(pud, 0));
-                       return;
-               }
+                              pmd, pmd_offset(pud, 0));
        }
-       pmd = pmd_offset(pud, vaddr);
+       return pmd_offset(pud, vaddr);
+}
+
+static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
+{
        if (pmd_none(*pmd)) {
-               pte = (pte_t *) spp_getpage();
+               pte_t *pte = (pte_t *) spp_getpage();
                pmd_populate_kernel(&init_mm, pmd, pte);
-               if (pte != pte_offset_kernel(pmd, 0)) {
+               if (pte != pte_offset_kernel(pmd, 0))
                        printk(KERN_ERR "PAGETABLE BUG #02!\n");
-                       return;
-               }
        }
+       return pte_offset_kernel(pmd, vaddr);
+}
+
+void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
+{
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+
+       pud = pud_page + pud_index(vaddr);
+       pmd = fill_pmd(pud, vaddr);
+       pte = fill_pte(pmd, vaddr);
 
-       pte = pte_offset_kernel(pmd, vaddr);
        set_pte(pte, new_pte);
 
        /*
@@ -205,8 +215,7 @@ set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
        __flush_tlb_one(vaddr);
 }
 
-void
-set_pte_vaddr(unsigned long vaddr, pte_t pteval)
+void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
 {
        pgd_t *pgd;
        pud_t *pud_page;
@@ -223,6 +232,24 @@ set_pte_vaddr(unsigned long vaddr, pte_t pteval)
        set_pte_vaddr_pud(pud_page, vaddr, pteval);
 }
 
+pmd_t * __init populate_extra_pmd(unsigned long vaddr)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+
+       pgd = pgd_offset_k(vaddr);
+       pud = fill_pud(pgd, vaddr);
+       return fill_pmd(pud, vaddr);
+}
+
+pte_t * __init populate_extra_pte(unsigned long vaddr)
+{
+       pmd_t *pmd;
+
+       pmd = populate_extra_pmd(vaddr);
+       return fill_pte(pmd, vaddr);
+}
+
 /*
  * Create large page table mappings for a range of physical addresses.
  */
@@ -291,13 +318,9 @@ void __init cleanup_highmap(void)
        }
 }
 
-static unsigned long __initdata table_start;
-static unsigned long __meminitdata table_end;
-static unsigned long __meminitdata table_top;
-
 static __ref void *alloc_low_page(unsigned long *phys)
 {
-       unsigned long pfn = table_end++;
+       unsigned long pfn = e820_table_end++;
        void *adr;
 
        if (after_bootmem) {
@@ -307,7 +330,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
                return adr;
        }
 
-       if (pfn >= table_top)
+       if (pfn >= e820_table_top)
                panic("alloc_low_page: ran out of memory");
 
        adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
@@ -547,58 +570,10 @@ phys_pud_update(pgd_t *pgd, unsigned long addr, unsigned long end,
        return phys_pud_init(pud, addr, end, page_size_mask);
 }
 
-static void __init find_early_table_space(unsigned long end, int use_pse,
-                                         int use_gbpages)
-{
-       unsigned long puds, pmds, ptes, tables, start;
-
-       puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-       tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
-       if (use_gbpages) {
-               unsigned long extra;
-               extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
-               pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
-       } else
-               pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
-       tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
-
-       if (use_pse) {
-               unsigned long extra;
-               extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
-               ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       } else
-               ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
-
-       /*
-        * RED-PEN putting page tables only on node 0 could
-        * cause a hotspot and fill up ZONE_DMA. The page tables
-        * need roughly 0.5KB per GB.
-        */
-       start = 0x8000;
-       table_start = find_e820_area(start, end, tables, PAGE_SIZE);
-       if (table_start == -1UL)
-               panic("Cannot find space for the kernel page tables");
-
-       table_start >>= PAGE_SHIFT;
-       table_end = table_start;
-       table_top = table_start + (tables >> PAGE_SHIFT);
-
-       printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n",
-               end, table_start << PAGE_SHIFT, table_top << PAGE_SHIFT);
-}
-
-static void __init init_gbpages(void)
-{
-       if (direct_gbpages && cpu_has_gbpages)
-               printk(KERN_INFO "Using GB pages for direct mapping\n");
-       else
-               direct_gbpages = 0;
-}
-
-static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
-                                               unsigned long end,
-                                               unsigned long page_size_mask)
+unsigned long __init
+kernel_physical_mapping_init(unsigned long start,
+                            unsigned long end,
+                            unsigned long page_size_mask)
 {
 
        unsigned long next, last_map_addr = end;
@@ -635,176 +610,6 @@ static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
        return last_map_addr;
 }
 
-struct map_range {
-       unsigned long start;
-       unsigned long end;
-       unsigned page_size_mask;
-};
-
-#define NR_RANGE_MR 5
-
-static int save_mr(struct map_range *mr, int nr_range,
-                  unsigned long start_pfn, unsigned long end_pfn,
-                  unsigned long page_size_mask)
-{
-
-       if (start_pfn < end_pfn) {
-               if (nr_range >= NR_RANGE_MR)
-                       panic("run out of range for init_memory_mapping\n");
-               mr[nr_range].start = start_pfn<<PAGE_SHIFT;
-               mr[nr_range].end   = end_pfn<<PAGE_SHIFT;
-               mr[nr_range].page_size_mask = page_size_mask;
-               nr_range++;
-       }
-
-       return nr_range;
-}
-
-/*
- * Setup the direct mapping of the physical memory at PAGE_OFFSET.
- * This runs before bootmem is initialized and gets pages directly from
- * the physical memory. To access them they are temporarily mapped.
- */
-unsigned long __init_refok init_memory_mapping(unsigned long start,
-                                              unsigned long end)
-{
-       unsigned long last_map_addr = 0;
-       unsigned long page_size_mask = 0;
-       unsigned long start_pfn, end_pfn;
-       unsigned long pos;
-
-       struct map_range mr[NR_RANGE_MR];
-       int nr_range, i;
-       int use_pse, use_gbpages;
-
-       printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
-
-       /*
-        * Find space for the kernel direct mapping tables.
-        *
-        * Later we should allocate these tables in the local node of the
-        * memory mapped. Unfortunately this is done currently before the
-        * nodes are discovered.
-        */
-       if (!after_bootmem)
-               init_gbpages();
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       /*
-        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
-        * This will simplify cpa(), which otherwise needs to support splitting
-        * large pages into small in interrupt context, etc.
-        */
-       use_pse = use_gbpages = 0;
-#else
-       use_pse = cpu_has_pse;
-       use_gbpages = direct_gbpages;
-#endif
-
-       if (use_gbpages)
-               page_size_mask |= 1 << PG_LEVEL_1G;
-       if (use_pse)
-               page_size_mask |= 1 << PG_LEVEL_2M;
-
-       memset(mr, 0, sizeof(mr));
-       nr_range = 0;
-
-       /* head if not big page alignment ?*/
-       start_pfn = start >> PAGE_SHIFT;
-       pos = start_pfn << PAGE_SHIFT;
-       end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
-                       << (PMD_SHIFT - PAGE_SHIFT);
-       if (end_pfn > (end >> PAGE_SHIFT))
-               end_pfn = end >> PAGE_SHIFT;
-       if (start_pfn < end_pfn) {
-               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
-               pos = end_pfn << PAGE_SHIFT;
-       }
-
-       /* big page (2M) range*/
-       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
-                        << (PMD_SHIFT - PAGE_SHIFT);
-       end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
-                        << (PUD_SHIFT - PAGE_SHIFT);
-       if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
-               end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
-       if (start_pfn < end_pfn) {
-               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
-                               page_size_mask & (1<<PG_LEVEL_2M));
-               pos = end_pfn << PAGE_SHIFT;
-       }
-
-       /* big page (1G) range */
-       start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
-                        << (PUD_SHIFT - PAGE_SHIFT);
-       end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
-       if (start_pfn < end_pfn) {
-               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
-                               page_size_mask &
-                                ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
-               pos = end_pfn << PAGE_SHIFT;
-       }
-
-       /* tail is not big page (1G) alignment */
-       start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
-                        << (PMD_SHIFT - PAGE_SHIFT);
-       end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
-       if (start_pfn < end_pfn) {
-               nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
-                               page_size_mask & (1<<PG_LEVEL_2M));
-               pos = end_pfn << PAGE_SHIFT;
-       }
-
-       /* tail is not big page (2M) alignment */
-       start_pfn = pos>>PAGE_SHIFT;
-       end_pfn = end>>PAGE_SHIFT;
-       nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
-
-       /* try to merge same page size and continuous */
-       for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
-               unsigned long old_start;
-               if (mr[i].end != mr[i+1].start ||
-                   mr[i].page_size_mask != mr[i+1].page_size_mask)
-                       continue;
-               /* move it */
-               old_start = mr[i].start;
-               memmove(&mr[i], &mr[i+1],
-                        (nr_range - 1 - i) * sizeof (struct map_range));
-               mr[i--].start = old_start;
-               nr_range--;
-       }
-
-       for (i = 0; i < nr_range; i++)
-               printk(KERN_DEBUG " %010lx - %010lx page %s\n",
-                               mr[i].start, mr[i].end,
-                       (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
-                        (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
-
-       if (!after_bootmem)
-               find_early_table_space(end, use_pse, use_gbpages);
-
-       for (i = 0; i < nr_range; i++)
-               last_map_addr = kernel_physical_mapping_init(
-                                       mr[i].start, mr[i].end,
-                                       mr[i].page_size_mask);
-
-       if (!after_bootmem)
-               mmu_cr4_features = read_cr4();
-       __flush_tlb_all();
-
-       if (!after_bootmem && table_end > table_start)
-               reserve_early(table_start << PAGE_SHIFT,
-                                table_end << PAGE_SHIFT, "PGTABLE");
-
-       printk(KERN_INFO "last_map_addr: %lx end: %lx\n",
-                        last_map_addr, end);
-
-       if (!after_bootmem)
-               early_memtest(start, end);
-
-       return last_map_addr >> PAGE_SHIFT;
-}
-
 #ifndef CONFIG_NUMA
 void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn)
 {
@@ -876,28 +681,6 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
 
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
-/*
- * devmem_is_allowed() checks to see if /dev/mem access to a certain address
- * is valid. The argument is a physical page number.
- *
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains bios code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
- */
-int devmem_is_allowed(unsigned long pagenr)
-{
-       if (pagenr <= 256)
-               return 1;
-       if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
-               return 0;
-       if (!page_is_ram(pagenr))
-               return 1;
-       return 0;
-}
-
-
 static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
                         kcore_modules, kcore_vsyscall;
 
@@ -947,43 +730,6 @@ void __init mem_init(void)
                initsize >> 10);
 }
 
-void free_init_pages(char *what, unsigned long begin, unsigned long end)
-{
-       unsigned long addr = begin;
-
-       if (addr >= end)
-               return;
-
-       /*
-        * If debugging page accesses then do not free this memory but
-        * mark them not present - any buggy init-section access will
-        * create a kernel page fault:
-        */
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
-               begin, PAGE_ALIGN(end));
-       set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
-#else
-       printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
-
-       for (; addr < end; addr += PAGE_SIZE) {
-               ClearPageReserved(virt_to_page(addr));
-               init_page_count(virt_to_page(addr));
-               memset((void *)(addr & ~(PAGE_SIZE-1)),
-                       POISON_FREE_INITMEM, PAGE_SIZE);
-               free_page(addr);
-               totalram_pages++;
-       }
-#endif
-}
-
-void free_initmem(void)
-{
-       free_init_pages("unused kernel memory",
-                       (unsigned long)(&__init_begin),
-                       (unsigned long)(&__init_end));
-}
-
 #ifdef CONFIG_DEBUG_RODATA
 const int rodata_test_data = 0xC3;
 EXPORT_SYMBOL_GPL(rodata_test_data);
@@ -1022,13 +768,6 @@ void mark_rodata_ro(void)
 
 #endif
 
-#ifdef CONFIG_BLK_DEV_INITRD
-void free_initrd_mem(unsigned long start, unsigned long end)
-{
-       free_init_pages("initrd memory", start, end);
-}
-#endif
-
 int __init reserve_bootmem_generic(unsigned long phys, unsigned long len,
                                   int flags)
 {
index 04102d42ff4250873a519620958855e9b50770ff..6e60ba698cee3af9da346d6442726d22cde16b4e 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <asm/iomap.h>
 #include <asm/pat.h>
+#include <asm/highmem.h>
 #include <linux/module.h>
 
 int is_io_mapping_possible(resource_size_t base, unsigned long size)
@@ -31,16 +32,27 @@ int is_io_mapping_possible(resource_size_t base, unsigned long size)
 }
 EXPORT_SYMBOL_GPL(is_io_mapping_possible);
 
-/* Map 'pfn' using fixed map 'type' and protections 'prot'
- */
-void *
-iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
 {
        enum fixed_addresses idx;
        unsigned long vaddr;
 
        pagefault_disable();
 
+       idx = type + KM_TYPE_NR * smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+       set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
+       arch_flush_lazy_mmu_mode();
+
+       return (void *)vaddr;
+}
+
+/*
+ * Map 'pfn' using fixed map 'type' and protections 'prot'
+ */
+void *
+iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
+{
        /*
         * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
         * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
@@ -50,12 +62,7 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
        if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
                prot = PAGE_KERNEL_UC_MINUS;
 
-       idx = type + KM_TYPE_NR*smp_processor_id();
-       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-       set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
-       arch_flush_lazy_mmu_mode();
-
-       return (void*) vaddr;
+       return kmap_atomic_prot_pfn(pfn, type, prot);
 }
 EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
 
index f45d5e29a72edf4e15616d48c0d174f7905ac417..55e127f71ed9014348e4b5935274849ddd264fff 100644 (file)
 #include <asm/pgalloc.h>
 #include <asm/pat.h>
 
-#ifdef CONFIG_X86_64
-
-static inline int phys_addr_valid(unsigned long addr)
+static inline int phys_addr_valid(resource_size_t addr)
 {
-       return addr < (1UL << boot_cpu_data.x86_phys_bits);
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+       return !(addr >> boot_cpu_data.x86_phys_bits);
+#else
+       return 1;
+#endif
 }
 
+#ifdef CONFIG_X86_64
+
 unsigned long __phys_addr(unsigned long x)
 {
        if (x >= __START_KERNEL_map) {
@@ -38,8 +42,7 @@ unsigned long __phys_addr(unsigned long x)
        } else {
                VIRTUAL_BUG_ON(x < PAGE_OFFSET);
                x -= PAGE_OFFSET;
-               VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
-                                       !phys_addr_valid(x));
+               VIRTUAL_BUG_ON(!phys_addr_valid(x));
        }
        return x;
 }
@@ -56,10 +59,8 @@ bool __virt_addr_valid(unsigned long x)
                if (x < PAGE_OFFSET)
                        return false;
                x -= PAGE_OFFSET;
-               if (system_state == SYSTEM_BOOTING ?
-                               x > MAXMEM : !phys_addr_valid(x)) {
+               if (!phys_addr_valid(x))
                        return false;
-               }
        }
 
        return pfn_valid(x >> PAGE_SHIFT);
@@ -68,18 +69,12 @@ EXPORT_SYMBOL(__virt_addr_valid);
 
 #else
 
-static inline int phys_addr_valid(unsigned long addr)
-{
-       return 1;
-}
-
 #ifdef CONFIG_DEBUG_VIRTUAL
 unsigned long __phys_addr(unsigned long x)
 {
-       /* VMALLOC_* aren't constants; not available at the boot time */
+       /* VMALLOC_* aren't constants  */
        VIRTUAL_BUG_ON(x < PAGE_OFFSET);
-       VIRTUAL_BUG_ON(system_state != SYSTEM_BOOTING &&
-               is_vmalloc_addr((void *) x));
+       VIRTUAL_BUG_ON(__vmalloc_start_set && is_vmalloc_addr((void *) x));
        return x - PAGE_OFFSET;
 }
 EXPORT_SYMBOL(__phys_addr);
@@ -89,7 +84,9 @@ bool __virt_addr_valid(unsigned long x)
 {
        if (x < PAGE_OFFSET)
                return false;
-       if (system_state != SYSTEM_BOOTING && is_vmalloc_addr((void *) x))
+       if (__vmalloc_start_set && is_vmalloc_addr((void *) x))
+               return false;
+       if (x >= FIXADDR_START)
                return false;
        return pfn_valid((x - PAGE_OFFSET) >> PAGE_SHIFT);
 }
@@ -348,7 +345,7 @@ EXPORT_SYMBOL(ioremap_nocache);
  *
  * Must be freed with iounmap.
  */
-void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
 {
        if (pat_enabled)
                return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
@@ -490,7 +487,12 @@ static int __init early_ioremap_debug_setup(char *str)
 early_param("early_ioremap_debug", early_ioremap_debug_setup);
 
 static __initdata int after_paging_init;
-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
+#define __FIXADDR_TOP (-PAGE_SIZE)
+static pte_t bm_pte[(__fix_to_virt(FIX_DBGP_BASE)
+                    ^ __fix_to_virt(FIX_BTMAP_BEGIN)) >> PMD_SHIFT
+                   ? PAGE_SIZE / sizeof(pte_t) : 0] __page_aligned_bss;
+#undef __FIXADDR_TOP
+static __initdata pte_t *bm_ptep;
 
 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 {
@@ -505,19 +507,33 @@ static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
 
 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
 {
+       if (!sizeof(bm_pte))
+               return &bm_ptep[pte_index(addr)];
        return &bm_pte[pte_index(addr)];
 }
 
+static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
+
 void __init early_ioremap_init(void)
 {
        pmd_t *pmd;
+       int i;
 
        if (early_ioremap_debug)
                printk(KERN_INFO "early_ioremap_init()\n");
 
+       for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+               slot_virt[i] = fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+
        pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
-       memset(bm_pte, 0, sizeof(bm_pte));
-       pmd_populate_kernel(&init_mm, pmd, bm_pte);
+       if (sizeof(bm_pte)) {
+               memset(bm_pte, 0, sizeof(bm_pte));
+               pmd_populate_kernel(&init_mm, pmd, bm_pte);
+       } else {
+               bm_ptep = pte_offset_kernel(pmd, 0);
+               if (early_ioremap_debug)
+                       printk(KERN_INFO "bm_ptep=%p\n", bm_ptep);
+       }
 
        /*
         * The boot-ioremap range spans multiple pmds, for which
@@ -581,6 +597,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
 
 static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
 static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
+
 static int __init check_early_ioremap_leak(void)
 {
        int count = 0;
@@ -602,7 +619,8 @@ static int __init check_early_ioremap_leak(void)
 }
 late_initcall(check_early_ioremap_leak);
 
-static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
+static void __init __iomem *
+__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
 {
        unsigned long offset, last_addr;
        unsigned int nrpages;
@@ -668,9 +686,9 @@ static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned lo
                --nrpages;
        }
        if (early_ioremap_debug)
-               printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
+               printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
 
-       prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
+       prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
        return prev_map[slot];
 }
 
@@ -738,8 +756,3 @@ void __init early_iounmap(void __iomem *addr, unsigned long size)
        }
        prev_map[slot] = NULL;
 }
-
-void __this_fixmap_does_not_exist(void)
-{
-       WARN_ON(1);
-}
index 9cab18b0b857e2d199d54d402c4a9dd110a58d5c..605c8be06217b0da36344abd5f23d06326bcd87a 100644 (file)
@@ -9,44 +9,44 @@
 
 #include <asm/e820.h>
 
-static void __init memtest(unsigned long start_phys, unsigned long size,
-                                unsigned pattern)
+static u64 patterns[] __initdata = {
+       0,
+       0xffffffffffffffffULL,
+       0x5555555555555555ULL,
+       0xaaaaaaaaaaaaaaaaULL,
+       0x1111111111111111ULL,
+       0x2222222222222222ULL,
+       0x4444444444444444ULL,
+       0x8888888888888888ULL,
+       0x3333333333333333ULL,
+       0x6666666666666666ULL,
+       0x9999999999999999ULL,
+       0xccccccccccccccccULL,
+       0x7777777777777777ULL,
+       0xbbbbbbbbbbbbbbbbULL,
+       0xddddddddddddddddULL,
+       0xeeeeeeeeeeeeeeeeULL,
+       0x7a6c7258554e494cULL, /* yeah ;-) */
+};
+
+static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
 {
-       unsigned long i;
-       unsigned long *start;
-       unsigned long start_bad;
-       unsigned long last_bad;
-       unsigned long val;
-       unsigned long start_phys_aligned;
-       unsigned long count;
-       unsigned long incr;
-
-       switch (pattern) {
-       case 0:
-               val = 0UL;
-               break;
-       case 1:
-               val = -1UL;
-               break;
-       case 2:
-#ifdef CONFIG_X86_64
-               val = 0x5555555555555555UL;
-#else
-               val = 0x55555555UL;
-#endif
-               break;
-       case 3:
-#ifdef CONFIG_X86_64
-               val = 0xaaaaaaaaaaaaaaaaUL;
-#else
-               val = 0xaaaaaaaaUL;
-#endif
-               break;
-       default:
-               return;
-       }
+       printk(KERN_INFO "  %016llx bad mem addr %010llx - %010llx reserved\n",
+              (unsigned long long) pattern,
+              (unsigned long long) start_bad,
+              (unsigned long long) end_bad);
+       reserve_early(start_bad, end_bad, "BAD RAM");
+}
+
+static void __init memtest(u64 pattern, u64 start_phys, u64 size)
+{
+       u64 i, count;
+       u64 *start;
+       u64 start_bad, last_bad;
+       u64 start_phys_aligned;
+       size_t incr;
 
-       incr = sizeof(unsigned long);
+       incr = sizeof(pattern);
        start_phys_aligned = ALIGN(start_phys, incr);
        count = (size - (start_phys_aligned - start_phys))/incr;
        start = __va(start_phys_aligned);
@@ -54,25 +54,42 @@ static void __init memtest(unsigned long start_phys, unsigned long size,
        last_bad = 0;
 
        for (i = 0; i < count; i++)
-               start[i] = val;
+               start[i] = pattern;
        for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
-               if (*start != val) {
-                       if (start_phys_aligned == last_bad + incr) {
-                               last_bad += incr;
-                       } else {
-                               if (start_bad) {
-                                       printk(KERN_CONT "\n  %016lx bad mem addr %010lx - %010lx reserved",
-                                               val, start_bad, last_bad + incr);
-                                       reserve_early(start_bad, last_bad + incr, "BAD RAM");
-                               }
-                               start_bad = last_bad = start_phys_aligned;
-                       }
+               if (*start == pattern)
+                       continue;
+               if (start_phys_aligned == last_bad + incr) {
+                       last_bad += incr;
+                       continue;
                }
+               if (start_bad)
+                       reserve_bad_mem(pattern, start_bad, last_bad + incr);
+               start_bad = last_bad = start_phys_aligned;
        }
-       if (start_bad) {
-               printk(KERN_CONT "\n  %016lx bad mem addr %010lx - %010lx reserved",
-                       val, start_bad, last_bad + incr);
-               reserve_early(start_bad, last_bad + incr, "BAD RAM");
+       if (start_bad)
+               reserve_bad_mem(pattern, start_bad, last_bad + incr);
+}
+
+static void __init do_one_pass(u64 pattern, u64 start, u64 end)
+{
+       u64 size = 0;
+
+       while (start < end) {
+               start = find_e820_area_size(start, &size, 1);
+
+               /* done ? */
+               if (start >= end)
+                       break;
+               if (start + size > end)
+                       size = end - start;
+
+               printk(KERN_INFO "  %010llx - %010llx pattern %016llx\n",
+                      (unsigned long long) start,
+                      (unsigned long long) start + size,
+                      (unsigned long long) cpu_to_be64(pattern));
+               memtest(pattern, start, size);
+
+               start += size;
        }
 }
 
@@ -83,6 +100,9 @@ static int __init parse_memtest(char *arg)
 {
        if (arg)
                memtest_pattern = simple_strtoul(arg, NULL, 0);
+       else
+               memtest_pattern = ARRAY_SIZE(patterns);
+
        return 0;
 }
 
@@ -90,33 +110,22 @@ early_param("memtest", parse_memtest);
 
 void __init early_memtest(unsigned long start, unsigned long end)
 {
-       u64 t_start, t_size;
-       unsigned pattern;
+       unsigned int i;
+       unsigned int idx = 0;
 
        if (!memtest_pattern)
                return;
 
-       printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern);
-       for (pattern = 0; pattern < memtest_pattern; pattern++) {
-               t_start = start;
-               t_size = 0;
-               while (t_start < end) {
-                       t_start = find_e820_area_size(t_start, &t_size, 1);
-
-                       /* done ? */
-                       if (t_start >= end)
-                               break;
-                       if (t_start + t_size > end)
-                               t_size = end - t_start;
-
-                       printk(KERN_CONT "\n  %010llx - %010llx pattern %d",
-                               (unsigned long long)t_start,
-                               (unsigned long long)t_start + t_size, pattern);
-
-                       memtest(t_start, t_size, pattern);
+       printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
+       for (i = 0; i < memtest_pattern; i++) {
+               idx = i % ARRAY_SIZE(patterns);
+               do_one_pass(patterns[idx], start, end);
+       }
 
-                       t_start += t_size;
-               }
+       if (idx > 0) {
+               printk(KERN_INFO "early_memtest: wipe out "
+                      "test pattern from memory\n");
+               /* additional test with pattern 0 will do this */
+               do_one_pass(0, start, end);
        }
-       printk(KERN_CONT "\n");
 }
index 56fe7124fbec932c0b14f8d9dbe9b448fcdd88b7..1658296005668d7cb1013751d45cac1e4a22b15d 100644 (file)
@@ -4,7 +4,7 @@
  * Based on code by Ingo Molnar and Andi Kleen, copyrighted
  * as follows:
  *
- * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
+ * Copyright 2003-2009 Red Hat Inc.
  * All Rights Reserved.
  * Copyright 2005 Andi Kleen, SUSE Labs.
  * Copyright 2007 Jiri Kosina, SUSE Labs.
index d1f7439d173c2ba10791d6358dd90b4d50325442..3daefa04ace535f693b07e5700e0470e17ca93b8 100644 (file)
@@ -194,7 +194,7 @@ void *alloc_remap(int nid, unsigned long size)
        size = ALIGN(size, L1_CACHE_BYTES);
 
        if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
-               return 0;
+               return NULL;
 
        node_remap_alloc_vaddr[nid] += size;
        memset(allocation, 0, size);
@@ -416,39 +416,14 @@ void __init initmem_init(unsigned long start_pfn,
        for_each_online_node(nid)
                propagate_e820_map_node(nid);
 
-       for_each_online_node(nid)
+       for_each_online_node(nid) {
                memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+               NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
+       }
 
-       NODE_DATA(0)->bdata = &bootmem_node_data[0];
        setup_bootmem_allocator();
 }
 
-void __init set_highmem_pages_init(void)
-{
-#ifdef CONFIG_HIGHMEM
-       struct zone *zone;
-       int nid;
-
-       for_each_zone(zone) {
-               unsigned long zone_start_pfn, zone_end_pfn;
-
-               if (!is_highmem(zone))
-                       continue;
-
-               zone_start_pfn = zone->zone_start_pfn;
-               zone_end_pfn = zone_start_pfn + zone->spanned_pages;
-
-               nid = zone_to_nid(zone);
-               printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
-                               zone->name, nid, zone_start_pfn, zone_end_pfn);
-
-               add_highpages_with_active_regions(nid, zone_start_pfn,
-                                zone_end_pfn);
-       }
-       totalram_pages += totalhigh_pages;
-#endif
-}
-
 #ifdef CONFIG_MEMORY_HOTPLUG
 static int paddr_to_nid(u64 addr)
 {
index f3516da035d141947d04ddfbd61bd56f74623fd3..64c9cf043cddf77a20474c371f68ed485a3352c2 100644 (file)
 #include <asm/acpi.h>
 #include <asm/k8.h>
 
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+# define DBG(x...) printk(KERN_DEBUG x)
+#else
+# define DBG(x...)
+#endif
+
 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 
@@ -33,6 +39,21 @@ int numa_off __initdata;
 static unsigned long __initdata nodemap_addr;
 static unsigned long __initdata nodemap_size;
 
+DEFINE_PER_CPU(int, node_number) = 0;
+EXPORT_PER_CPU_SYMBOL(node_number);
+
+/*
+ * Map cpu index to node index
+ */
+DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
+EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
+
+/*
+ * Which logical CPUs are on which nodes
+ */
+cpumask_t *node_to_cpumask_map;
+EXPORT_SYMBOL(node_to_cpumask_map);
+
 /*
  * Given a shift value, try to populate memnodemap[]
  * Returns :
@@ -640,3 +661,199 @@ void __init init_cpu_to_node(void)
 #endif
 
 
+/*
+ * Allocate node_to_cpumask_map based on number of available nodes
+ * Requires node_possible_map to be valid.
+ *
+ * Note: node_to_cpumask() is not valid until after this is done.
+ * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
+ */
+void __init setup_node_to_cpumask_map(void)
+{
+       unsigned int node, num = 0;
+       cpumask_t *map;
+
+       /* setup nr_node_ids if not done yet */
+       if (nr_node_ids == MAX_NUMNODES) {
+               for_each_node_mask(node, node_possible_map)
+                       num = node;
+               nr_node_ids = num + 1;
+       }
+
+       /* allocate the map */
+       map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
+       DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
+
+       pr_debug("Node to cpumask map at %p for %d nodes\n",
+                map, nr_node_ids);
+
+       /* node_to_cpumask() will now work */
+       node_to_cpumask_map = map;
+}
+
+void __cpuinit numa_set_node(int cpu, int node)
+{
+       int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
+
+       /* early setting, no percpu area yet */
+       if (cpu_to_node_map) {
+               cpu_to_node_map[cpu] = node;
+               return;
+       }
+
+#ifdef CONFIG_DEBUG_PER_CPU_MAPS
+       if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
+               printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
+               dump_stack();
+               return;
+       }
+#endif
+       per_cpu(x86_cpu_to_node_map, cpu) = node;
+
+       if (node != NUMA_NO_NODE)
+               per_cpu(node_number, cpu) = node;
+}
+
+void __cpuinit numa_clear_node(int cpu)
+{
+       numa_set_node(cpu, NUMA_NO_NODE);
+}
+
+#ifndef CONFIG_DEBUG_PER_CPU_MAPS
+
+void __cpuinit numa_add_cpu(int cpu)
+{
+       cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+       cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
+}
+
+#else /* CONFIG_DEBUG_PER_CPU_MAPS */
+
+/*
+ * --------- debug versions of the numa functions ---------
+ */
+static void __cpuinit numa_set_cpumask(int cpu, int enable)
+{
+       int node = early_cpu_to_node(cpu);
+       cpumask_t *mask;
+       char buf[64];
+
+       if (node_to_cpumask_map == NULL) {
+               printk(KERN_ERR "node_to_cpumask_map NULL\n");
+               dump_stack();
+               return;
+       }
+
+       mask = &node_to_cpumask_map[node];
+       if (enable)
+               cpu_set(cpu, *mask);
+       else
+               cpu_clear(cpu, *mask);
+
+       cpulist_scnprintf(buf, sizeof(buf), mask);
+       printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
+               enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
+}
+
+void __cpuinit numa_add_cpu(int cpu)
+{
+       numa_set_cpumask(cpu, 1);
+}
+
+void __cpuinit numa_remove_cpu(int cpu)
+{
+       numa_set_cpumask(cpu, 0);
+}
+
+int cpu_to_node(int cpu)
+{
+       if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
+               printk(KERN_WARNING
+                       "cpu_to_node(%d): usage too early!\n", cpu);
+               dump_stack();
+               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+       }
+       return per_cpu(x86_cpu_to_node_map, cpu);
+}
+EXPORT_SYMBOL(cpu_to_node);
+
+/*
+ * Same function as cpu_to_node() but used if called before the
+ * per_cpu areas are setup.
+ */
+int early_cpu_to_node(int cpu)
+{
+       if (early_per_cpu_ptr(x86_cpu_to_node_map))
+               return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
+
+       if (!cpu_possible(cpu)) {
+               printk(KERN_WARNING
+                       "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
+               dump_stack();
+               return NUMA_NO_NODE;
+       }
+       return per_cpu(x86_cpu_to_node_map, cpu);
+}
+
+
+/* empty cpumask */
+static const cpumask_t cpu_mask_none;
+
+/*
+ * Returns a pointer to the bitmask of CPUs on Node 'node'.
+ */
+const cpumask_t *cpumask_of_node(int node)
+{
+       if (node_to_cpumask_map == NULL) {
+               printk(KERN_WARNING
+                       "cpumask_of_node(%d): no node_to_cpumask_map!\n",
+                       node);
+               dump_stack();
+               return (const cpumask_t *)&cpu_online_map;
+       }
+       if (node >= nr_node_ids) {
+               printk(KERN_WARNING
+                       "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
+                       node, nr_node_ids);
+               dump_stack();
+               return &cpu_mask_none;
+       }
+       return &node_to_cpumask_map[node];
+}
+EXPORT_SYMBOL(cpumask_of_node);
+
+/*
+ * Returns a bitmask of CPUs on Node 'node'.
+ *
+ * Side note: this function creates the returned cpumask on the stack
+ * so with a high NR_CPUS count, excessive stack space is used.  The
+ * node_to_cpumask_ptr function should be used whenever possible.
+ */
+cpumask_t node_to_cpumask(int node)
+{
+       if (node_to_cpumask_map == NULL) {
+               printk(KERN_WARNING
+                       "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
+               dump_stack();
+               return cpu_online_map;
+       }
+       if (node >= nr_node_ids) {
+               printk(KERN_WARNING
+                       "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
+                       node, nr_node_ids);
+               dump_stack();
+               return cpu_mask_none;
+       }
+       return node_to_cpumask_map[node];
+}
+EXPORT_SYMBOL(node_to_cpumask);
+
+/*
+ * --------- end of debug versions of the numa functions ---------
+ */
+
+#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
index 7233bd7e357bbccf2e6f091322606b083ca6f9d1..9c4294986af779ed62ab088af0dae0f0048eb0c9 100644 (file)
@@ -482,6 +482,13 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        pbase = (pte_t *)page_address(base);
        paravirt_alloc_pte(&init_mm, page_to_pfn(base));
        ref_prot = pte_pgprot(pte_clrhuge(*kpte));
+       /*
+        * If we ever want to utilize the PAT bit, we need to
+        * update this function to make sure it's converted from
+        * bit 12 to bit 7 when we cross from the 2MB level to
+        * the 4K level:
+        */
+       WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
 
 #ifdef CONFIG_X86_64
        if (level == PG_LEVEL_1G) {
index 21bc1f787ae2cd75e50bc2a58a9ff1f9b5f8ed3b..640339ee4fb2ae7ab79c02302cf26d0647966344 100644 (file)
@@ -31,7 +31,7 @@
 #ifdef CONFIG_X86_PAT
 int __read_mostly pat_enabled = 1;
 
-void __cpuinit pat_disable(char *reason)
+void __cpuinit pat_disable(const char *reason)
 {
        pat_enabled = 0;
        printk(KERN_INFO "%s\n", reason);
@@ -43,6 +43,11 @@ static int __init nopat(char *str)
        return 0;
 }
 early_param("nopat", nopat);
+#else
+static inline void pat_disable(const char *reason)
+{
+       (void)reason;
+}
 #endif
 
 
@@ -79,16 +84,20 @@ void pat_init(void)
        if (!pat_enabled)
                return;
 
-       /* Paranoia check. */
-       if (!cpu_has_pat && boot_pat_state) {
-               /*
-                * If this happens we are on a secondary CPU, but
-                * switched to PAT on the boot CPU. We have no way to
-                * undo PAT.
-                */
-               printk(KERN_ERR "PAT enabled, "
-                      "but not supported by secondary CPU\n");
-               BUG();
+       if (!cpu_has_pat) {
+               if (!boot_pat_state) {
+                       pat_disable("PAT not supported by CPU.");
+                       return;
+               } else {
+                       /*
+                        * If this happens we are on a secondary CPU, but
+                        * switched to PAT on the boot CPU. We have no way to
+                        * undo PAT.
+                        */
+                       printk(KERN_ERR "PAT enabled, "
+                              "but not supported by secondary CPU\n");
+                       BUG();
+               }
        }
 
        /* Set PWT to Write-Combining. All other bits stay the same */
@@ -625,6 +634,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
        free_memtype(addr, addr + size);
 }
 
+/*
+ * Change the memory type for the physial address range in kernel identity
+ * mapping space if that range is a part of identity map.
+ */
+int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
+{
+       unsigned long id_sz;
+
+       if (!pat_enabled || base >= __pa(high_memory))
+               return 0;
+
+       id_sz = (__pa(high_memory) < base + size) ?
+                               __pa(high_memory) - base :
+                               size;
+
+       if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
+               printk(KERN_INFO
+                       "%s:%d ioremap_change_attr failed %s "
+                       "for %Lx-%Lx\n",
+                       current->comm, current->pid,
+                       cattr_name(flags),
+                       base, (unsigned long long)(base + size));
+               return -EINVAL;
+       }
+       return 0;
+}
+
 /*
  * Internal interface to reserve a range of physical memory with prot.
  * Reserved non RAM regions only and after successful reserve_memtype,
@@ -634,7 +670,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                                int strict_prot)
 {
        int is_ram = 0;
-       int id_sz, ret;
+       int ret;
        unsigned long flags;
        unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
 
@@ -672,23 +708,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                                     flags);
        }
 
-       /* Need to keep identity mapping in sync */
-       if (paddr >= __pa(high_memory))
-               return 0;
-
-       id_sz = (__pa(high_memory) < paddr + size) ?
-                               __pa(high_memory) - paddr :
-                               size;
-
-       if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
+       if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
                free_memtype(paddr, paddr + size);
-               printk(KERN_ERR
-                       "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
-                       "for %Lx-%Lx\n",
-                       current->comm, current->pid,
-                       cattr_name(flags),
-                       (unsigned long long)paddr,
-                       (unsigned long long)(paddr + size));
                return -EINVAL;
        }
        return 0;
index 86f2ffc43c3d8b7cc28d2e019f570f1ecc6128c8..5b7c7c8464fe9f9dfeb36310a569b440a2bc5e38 100644 (file)
@@ -313,6 +313,24 @@ int ptep_clear_flush_young(struct vm_area_struct *vma,
        return young;
 }
 
+/**
+ * reserve_top_address - reserves a hole in the top of kernel address space
+ * @reserve - size of hole to reserve
+ *
+ * Can be used to relocate the fixmap area and poke a hole in the top
+ * of kernel address space to make room for a hypervisor.
+ */
+void __init reserve_top_address(unsigned long reserve)
+{
+#ifdef CONFIG_X86_32
+       BUG_ON(fixmaps_set > 0);
+       printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
+              (int)-reserve);
+       __FIXADDR_TOP = -reserve - PAGE_SIZE;
+       __VMALLOC_RESERVE += reserve;
+#endif
+}
+
 int fixmaps_set;
 
 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
index 0951db9ee5190b351a843a71a755109a6fd1586e..f2e477c91c1b85d72bb3d7ec0659e647c67c968c 100644 (file)
@@ -20,6 +20,8 @@
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 
+unsigned int __VMALLOC_RESERVE = 128 << 20;
+
 /*
  * Associate a virtual page frame with a given physical page frame 
  * and protection flags for that frame.
@@ -97,22 +99,6 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 unsigned long __FIXADDR_TOP = 0xfffff000;
 EXPORT_SYMBOL(__FIXADDR_TOP);
 
-/**
- * reserve_top_address - reserves a hole in the top of kernel address space
- * @reserve - size of hole to reserve
- *
- * Can be used to relocate the fixmap area and poke a hole in the top
- * of kernel address space to make room for a hypervisor.
- */
-void __init reserve_top_address(unsigned long reserve)
-{
-       BUG_ON(fixmaps_set > 0);
-       printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
-              (int)-reserve);
-       __FIXADDR_TOP = -reserve - PAGE_SIZE;
-       __VMALLOC_RESERVE += reserve;
-}
-
 /*
  * vmalloc=size forces the vmalloc area to be exactly 'size'
  * bytes. This can be used to increase (or decrease) the
index 09737c8af07479020e7183fef611c376e4fc1dad..574c8bc95ef05c9253214fc71d34371818f1c78e 100644 (file)
@@ -20,7 +20,8 @@
 #include <asm/proto.h>
 #include <asm/numa.h>
 #include <asm/e820.h>
-#include <asm/genapic.h>
+#include <asm/apic.h>
+#include <asm/uv/uv.h>
 
 int acpi_numa __initdata;
 
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
new file mode 100644 (file)
index 0000000..a654d59
--- /dev/null
@@ -0,0 +1,295 @@
+#include <linux/init.h>
+
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+
+#include <asm/tlbflush.h>
+#include <asm/mmu_context.h>
+#include <asm/apic.h>
+#include <asm/uv/uv.h>
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate)
+                       = { &init_mm, 0, };
+
+/*
+ *     Smarter SMP flushing macros.
+ *             c/o Linus Torvalds.
+ *
+ *     These mean you can really definitely utterly forget about
+ *     writing to user space from interrupts. (Its not allowed anyway).
+ *
+ *     Optimizations Manfred Spraul <manfred@colorfullife.com>
+ *
+ *     More scalable flush, from Andi Kleen
+ *
+ *     To avoid global state use 8 different call vectors.
+ *     Each CPU uses a specific vector to trigger flushes on other
+ *     CPUs. Depending on the received vector the target CPUs look into
+ *     the right array slot for the flush data.
+ *
+ *     With more than 8 CPUs they are hashed to the 8 available
+ *     vectors. The limited global vector space forces us to this right now.
+ *     In future when interrupts are split into per CPU domains this could be
+ *     fixed, at the cost of triggering multiple IPIs in some cases.
+ */
+
+union smp_flush_state {
+       struct {
+               struct mm_struct *flush_mm;
+               unsigned long flush_va;
+               spinlock_t tlbstate_lock;
+               DECLARE_BITMAP(flush_cpumask, NR_CPUS);
+       };
+       char pad[CONFIG_X86_INTERNODE_CACHE_BYTES];
+} ____cacheline_internodealigned_in_smp;
+
+/* State is put into the per CPU data section, but padded
+   to a full cache line because other CPUs can access it and we don't
+   want false sharing in the per cpu data segment. */
+static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS];
+
+/*
+ * We cannot call mmdrop() because we are in interrupt context,
+ * instead update mm->cpu_vm_mask.
+ */
+void leave_mm(int cpu)
+{
+       if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
+               BUG();
+       cpu_clear(cpu, percpu_read(cpu_tlbstate.active_mm)->cpu_vm_mask);
+       load_cr3(swapper_pg_dir);
+}
+EXPORT_SYMBOL_GPL(leave_mm);
+
+/*
+ *
+ * The flush IPI assumes that a thread switch happens in this order:
+ * [cpu0: the cpu that switches]
+ * 1) switch_mm() either 1a) or 1b)
+ * 1a) thread switch to a different mm
+ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
+ *     Stop ipi delivery for the old mm. This is not synchronized with
+ *     the other cpus, but smp_invalidate_interrupt ignore flush ipis
+ *     for the wrong mm, and in the worst case we perform a superfluous
+ *     tlb flush.
+ * 1a2) set cpu mmu_state to TLBSTATE_OK
+ *     Now the smp_invalidate_interrupt won't call leave_mm if cpu0
+ *     was in lazy tlb mode.
+ * 1a3) update cpu active_mm
+ *     Now cpu0 accepts tlb flushes for the new mm.
+ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
+ *     Now the other cpus will send tlb flush ipis.
+ * 1a4) change cr3.
+ * 1b) thread switch without mm change
+ *     cpu active_mm is correct, cpu0 already handles
+ *     flush ipis.
+ * 1b1) set cpu mmu_state to TLBSTATE_OK
+ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
+ *     Atomically set the bit [other cpus will start sending flush ipis],
+ *     and test the bit.
+ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
+ * 2) switch %%esp, ie current
+ *
+ * The interrupt must handle 2 special cases:
+ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
+ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
+ *   runs in kernel space, the cpu could load tlb entries for user space
+ *   pages.
+ *
+ * The good news is that cpu mmu_state is local to each cpu, no
+ * write/read ordering problems.
+ */
+
+/*
+ * TLB flush IPI:
+ *
+ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
+ * 2) Leave the mm if we are in the lazy tlb mode.
+ *
+ * Interrupts are disabled.
+ */
+
+/*
+ * FIXME: use of asmlinkage is not consistent.  On x86_64 it's noop
+ * but still used for documentation purpose but the usage is slightly
+ * inconsistent.  On x86_32, asmlinkage is regparm(0) but interrupt
+ * entry calls in with the first parameter in %eax.  Maybe define
+ * intrlinkage?
+ */
+#ifdef CONFIG_X86_64
+asmlinkage
+#endif
+void smp_invalidate_interrupt(struct pt_regs *regs)
+{
+       unsigned int cpu;
+       unsigned int sender;
+       union smp_flush_state *f;
+
+       cpu = smp_processor_id();
+       /*
+        * orig_rax contains the negated interrupt vector.
+        * Use that to determine where the sender put the data.
+        */
+       sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START;
+       f = &flush_state[sender];
+
+       if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask)))
+               goto out;
+               /*
+                * This was a BUG() but until someone can quote me the
+                * line from the intel manual that guarantees an IPI to
+                * multiple CPUs is retried _only_ on the erroring CPUs
+                * its staying as a return
+                *
+                * BUG();
+                */
+
+       if (f->flush_mm == percpu_read(cpu_tlbstate.active_mm)) {
+               if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
+                       if (f->flush_va == TLB_FLUSH_ALL)
+                               local_flush_tlb();
+                       else
+                               __flush_tlb_one(f->flush_va);
+               } else
+                       leave_mm(cpu);
+       }
+out:
+       ack_APIC_irq();
+       smp_mb__before_clear_bit();
+       cpumask_clear_cpu(cpu, to_cpumask(f->flush_cpumask));
+       smp_mb__after_clear_bit();
+       inc_irq_stat(irq_tlb_count);
+}
+
+static void flush_tlb_others_ipi(const struct cpumask *cpumask,
+                                struct mm_struct *mm, unsigned long va)
+{
+       unsigned int sender;
+       union smp_flush_state *f;
+
+       /* Caller has disabled preemption */
+       sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
+       f = &flush_state[sender];
+
+       /*
+        * Could avoid this lock when
+        * num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
+        * probably not worth checking this for a cache-hot lock.
+        */
+       spin_lock(&f->tlbstate_lock);
+
+       f->flush_mm = mm;
+       f->flush_va = va;
+       cpumask_andnot(to_cpumask(f->flush_cpumask),
+                      cpumask, cpumask_of(smp_processor_id()));
+
+       /*
+        * Make the above memory operations globally visible before
+        * sending the IPI.
+        */
+       smp_mb();
+       /*
+        * We have to send the IPI only to
+        * CPUs affected.
+        */
+       apic->send_IPI_mask(to_cpumask(f->flush_cpumask),
+                     INVALIDATE_TLB_VECTOR_START + sender);
+
+       while (!cpumask_empty(to_cpumask(f->flush_cpumask)))
+               cpu_relax();
+
+       f->flush_mm = NULL;
+       f->flush_va = 0;
+       spin_unlock(&f->tlbstate_lock);
+}
+
+void native_flush_tlb_others(const struct cpumask *cpumask,
+                            struct mm_struct *mm, unsigned long va)
+{
+       if (is_uv_system()) {
+               unsigned int cpu;
+
+               cpu = get_cpu();
+               cpumask = uv_flush_tlb_others(cpumask, mm, va, cpu);
+               if (cpumask)
+                       flush_tlb_others_ipi(cpumask, mm, va);
+               put_cpu();
+               return;
+       }
+       flush_tlb_others_ipi(cpumask, mm, va);
+}
+
+static int __cpuinit init_smp_flush(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(flush_state); i++)
+               spin_lock_init(&flush_state[i].tlbstate_lock);
+
+       return 0;
+}
+core_initcall(init_smp_flush);
+
+void flush_tlb_current_task(void)
+{
+       struct mm_struct *mm = current->mm;
+
+       preempt_disable();
+
+       local_flush_tlb();
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
+       preempt_enable();
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+       preempt_disable();
+
+       if (current->active_mm == mm) {
+               if (current->mm)
+                       local_flush_tlb();
+               else
+                       leave_mm(smp_processor_id());
+       }
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, TLB_FLUSH_ALL);
+
+       preempt_enable();
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+       struct mm_struct *mm = vma->vm_mm;
+
+       preempt_disable();
+
+       if (current->active_mm == mm) {
+               if (current->mm)
+                       __flush_tlb_one(va);
+               else
+                       leave_mm(smp_processor_id());
+       }
+
+       if (cpumask_any_but(&mm->cpu_vm_mask, smp_processor_id()) < nr_cpu_ids)
+               flush_tlb_others(&mm->cpu_vm_mask, mm, va);
+
+       preempt_enable();
+}
+
+static void do_flush_tlb_all(void *info)
+{
+       unsigned long cpu = smp_processor_id();
+
+       __flush_tlb_all();
+       if (percpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
+               leave_mm(cpu);
+}
+
+void flush_tlb_all(void)
+{
+       on_each_cpu(do_flush_tlb_all, NULL, 1);
+}
index 82d22fc601ae3acc0ebb83df9d116e95ec66ea96..8c362b96b644953f018b2001541714dbbfab0c29 100644 (file)
@@ -90,7 +90,7 @@ static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
        return 0;
 }
 
-static struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitdata = {
+static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitconst = {
 /*
  * Systems where PCI IO resource ISA alignment can be skipped
  * when the ISA enable bit in the bridge control is not set
@@ -183,7 +183,7 @@ static int __devinit assign_all_busses(const struct dmi_system_id *d)
 }
 #endif
 
-static struct dmi_system_id __devinitdata pciprobe_dmi_table[] = {
+static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
 #ifdef __i386__
 /*
  * Laptops which need pci=assign-busses to see Cardbus cards
index 7d388d5cf54852136da06d0ca08b04c646fc7199..9c49919e4d1c08d6d098aa34feb8cd46fb97e623 100644 (file)
@@ -356,7 +356,7 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev)
 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video);
 
 
-static struct dmi_system_id __devinitdata msi_k8t_dmi_table[] = {
+static const struct dmi_system_id __devinitconst msi_k8t_dmi_table[] = {
        {
                .ident = "MSI-K8T-Neo2Fir",
                .matches = {
@@ -413,7 +413,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
  */
 static u16 toshiba_line_size;
 
-static struct dmi_system_id __devinitdata toshiba_ohci1394_dmi_table[] = {
+static const struct dmi_system_id __devinitconst toshiba_ohci1394_dmi_table[] = {
        {
                .ident = "Toshiba PS5 based laptop",
                .matches = {
index 2089354968a21f67ed3fe59e1e1cf26e777a1095..8eb295e116f626293e7219477f280b52b6aa765e 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/nodemask.h>
-#include <mach_apic.h>
+#include <asm/apic.h>
 #include <asm/mpspec.h>
 #include <asm/pci_x86.h>
 
 
 #define QUADLOCAL2BUS(quad,local) (quad_local_to_mp_bus_id[quad][local])
 
-/* Where the IO area was mapped on multiquad, always 0 otherwise */
-void *xquad_portio;
-EXPORT_SYMBOL(xquad_portio);
-
 #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port)
 
 #define PCI_CONF1_MQ_ADDRESS(bus, devfn, reg) \
index b82cae970dfd6d1d2dc9d5bbef3801841f57d1a8..1c975cc9839e0617e3b08d245e91277ec10450e3 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <asm/pci_x86.h>
-#include <asm/mach-default/pci-functions.h>
+#include <asm/pci-functions.h>
 
 /* BIOS32 signature: "_32_" */
 #define BIOS32_SIGNATURE       (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
index d1e9b53f9d3315dce9f618c08acce0e3ded959de..b641388d8286acfcf73bc6f7ca850609270739b3 100644 (file)
@@ -8,7 +8,7 @@
 
 #include <linux/linkage.h>
 #include <asm/segment.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/asm-offsets.h>
 #include <asm/processor-flags.h>
 
index 000415947d93590e3f3230371c1d81fa35d2b600..9356547d8c012458971b527897946d2c6d4aafae 100644 (file)
@@ -18,7 +18,7 @@
        .text
 #include <linux/linkage.h>
 #include <asm/segment.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 #include <asm/asm-offsets.h>
 #include <asm/processor-flags.h>
 
index 4d6ef0a336d6d3499580b32958d59c93639bc647..16a9020c8f11f74c14929a12e9d9f817fa075152 100644 (file)
@@ -38,7 +38,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
-       $(filter -g%,$(KBUILD_CFLAGS))
+       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector)
 
 $(vobjs): KBUILD_CFLAGS += $(CFL)
 
index 9c98cc6ba9783ce1c1caac812341924e43ae98aa..7133cdf9098b7ef0366871a92c29ef8aa5445209 100644 (file)
@@ -85,8 +85,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
        unsigned long addr, end;
        unsigned offset;
        end = (start + PMD_SIZE - 1) & PMD_MASK;
-       if (end >= TASK_SIZE64)
-               end = TASK_SIZE64;
+       if (end >= TASK_SIZE_MAX)
+               end = TASK_SIZE_MAX;
        end -= len;
        /* This loses some more bits than a modulo, but is cheaper */
        offset = get_random_int() & (PTRS_PER_PTE - 1);
index 87b9ab166423bca3e46e27e3fa8518de89c30466..b83e119fbeb0d29a832d68d67409ae72bb3a7400 100644 (file)
@@ -6,7 +6,7 @@ config XEN
        bool "Xen guest support"
        select PARAVIRT
        select PARAVIRT_CLOCK
-       depends on X86_64 || (X86_32 && X86_PAE && !(X86_VISWS || X86_VOYAGER))
+       depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS)
        depends on X86_CMPXCHG && X86_TSC
        help
          This is the Linux Xen port.  Enabling this will allow the
index 6dcefba7836ff477decab964acb003dc7a35fe98..3b767d03fd6add7d6b12ae208e0f5e33ba25db4d 100644 (file)
@@ -6,7 +6,8 @@ CFLAGS_REMOVE_irq.o = -pg
 endif
 
 obj-y          := enlighten.o setup.o multicalls.o mmu.o irq.o \
-                       time.o xen-asm_$(BITS).o grant-table.o suspend.o
+                       time.o xen-asm.o xen-asm_$(BITS).o \
+                       grant-table.o suspend.o
 
 obj-$(CONFIG_SMP)              += smp.o spinlock.o
 obj-$(CONFIG_XEN_DEBUG_FS)     += debugfs.o
\ No newline at end of file
index b58e9633814923aa5e13cd2f491086830528db6a..82cd39a6cbd3a116b83c5ce1cf30cae0214991f6 100644 (file)
@@ -61,40 +61,13 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 enum xen_domain_type xen_domain_type = XEN_NATIVE;
 EXPORT_SYMBOL_GPL(xen_domain_type);
 
-/*
- * Identity map, in addition to plain kernel map.  This needs to be
- * large enough to allocate page table pages to allocate the rest.
- * Each page can map 2MB.
- */
-static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
-
-#ifdef CONFIG_X86_64
-/* l3 pud for userspace vsyscall mapping */
-static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
-#endif /* CONFIG_X86_64 */
-
-/*
- * Note about cr3 (pagetable base) values:
- *
- * xen_cr3 contains the current logical cr3 value; it contains the
- * last set cr3.  This may not be the current effective cr3, because
- * its update may be being lazily deferred.  However, a vcpu looking
- * at its own cr3 can use this value knowing that it everything will
- * be self-consistent.
- *
- * xen_current_cr3 contains the actual vcpu cr3; it is set once the
- * hypercall to set the vcpu cr3 is complete (so it may be a little
- * out of date, but it will never be set early).  If one vcpu is
- * looking at another vcpu's cr3 value, it should use this variable.
- */
-DEFINE_PER_CPU(unsigned long, xen_cr3);         /* cr3 stored as physaddr */
-DEFINE_PER_CPU(unsigned long, xen_current_cr3);         /* actual vcpu cr3 */
-
 struct start_info *xen_start_info;
 EXPORT_SYMBOL_GPL(xen_start_info);
 
 struct shared_info xen_dummy_shared_info;
 
+void *xen_initial_gdt;
+
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
@@ -114,14 +87,7 @@ struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
  *
  * 0: not available, 1: available
  */
-static int have_vcpu_info_placement =
-#ifdef CONFIG_X86_32
-       1
-#else
-       0
-#endif
-       ;
-
+static int have_vcpu_info_placement = 1;
 
 static void xen_vcpu_setup(int cpu)
 {
@@ -137,7 +103,7 @@ static void xen_vcpu_setup(int cpu)
 
        vcpup = &per_cpu(xen_vcpu_info, cpu);
 
-       info.mfn = virt_to_mfn(vcpup);
+       info.mfn = arbitrary_virt_to_mfn(vcpup);
        info.offset = offset_in_page(vcpup);
 
        printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
@@ -237,7 +203,7 @@ static unsigned long xen_get_debugreg(int reg)
        return HYPERVISOR_get_debugreg(reg);
 }
 
-static void xen_leave_lazy(void)
+void xen_leave_lazy(void)
 {
        paravirt_leave_lazy(paravirt_get_lazy_mode());
        xen_mc_flush();
@@ -335,8 +301,10 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
        frames = mcs.args;
 
        for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
-               frames[f] = virt_to_mfn(va);
+               frames[f] = arbitrary_virt_to_mfn((void *)va);
+
                make_lowmem_page_readonly((void *)va);
+               make_lowmem_page_readonly(mfn_to_virt(frames[f]));
        }
 
        MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct));
@@ -348,7 +316,7 @@ static void load_TLS_descriptor(struct thread_struct *t,
                                unsigned int cpu, unsigned int i)
 {
        struct desc_struct *gdt = get_cpu_gdt_table(cpu);
-       xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
+       xmaddr_t maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
        struct multicall_space mc = __xen_mc_entry(0);
 
        MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
@@ -357,13 +325,14 @@ static void load_TLS_descriptor(struct thread_struct *t,
 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
 {
        /*
-        * XXX sleazy hack: If we're being called in a lazy-cpu zone,
-        * it means we're in a context switch, and %gs has just been
-        * saved.  This means we can zero it out to prevent faults on
-        * exit from the hypervisor if the next process has no %gs.
-        * Either way, it has been saved, and the new value will get
-        * loaded properly.  This will go away as soon as Xen has been
-        * modified to not save/restore %gs for normal hypercalls.
+        * XXX sleazy hack: If we're being called in a lazy-cpu zone
+        * and lazy gs handling is enabled, it means we're in a
+        * context switch, and %gs has just been saved.  This means we
+        * can zero it out to prevent faults on exit from the
+        * hypervisor if the next process has no %gs.  Either way, it
+        * has been saved, and the new value will get loaded properly.
+        * This will go away as soon as Xen has been modified to not
+        * save/restore %gs for normal hypercalls.
         *
         * On x86_64, this hack is not used for %gs, because gs points
         * to KERNEL_GS_BASE (and uses it for PDA references), so we
@@ -375,7 +344,7 @@ static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
         */
        if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
 #ifdef CONFIG_X86_32
-               loadsegment(gs, 0);
+               lazy_load_gs(0);
 #else
                loadsegment(fs, 0);
 #endif
@@ -521,7 +490,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
                break;
 
        default: {
-               xmaddr_t maddr = virt_to_machine(&dt[entry]);
+               xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
 
                xen_mc_flush();
                if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
@@ -587,94 +556,18 @@ static u32 xen_safe_apic_wait_icr_idle(void)
         return 0;
 }
 
-static struct apic_ops xen_basic_apic_ops = {
-       .read = xen_apic_read,
-       .write = xen_apic_write,
-       .icr_read = xen_apic_icr_read,
-       .icr_write = xen_apic_icr_write,
-       .wait_icr_idle = xen_apic_wait_icr_idle,
-       .safe_wait_icr_idle = xen_safe_apic_wait_icr_idle,
-};
-
-#endif
-
-static void xen_flush_tlb(void)
-{
-       struct mmuext_op *op;
-       struct multicall_space mcs;
-
-       preempt_disable();
-
-       mcs = xen_mc_entry(sizeof(*op));
-
-       op = mcs.args;
-       op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
-       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
-       xen_mc_issue(PARAVIRT_LAZY_MMU);
-
-       preempt_enable();
-}
-
-static void xen_flush_tlb_single(unsigned long addr)
+static void set_xen_basic_apic_ops(void)
 {
-       struct mmuext_op *op;
-       struct multicall_space mcs;
-
-       preempt_disable();
-
-       mcs = xen_mc_entry(sizeof(*op));
-       op = mcs.args;
-       op->cmd = MMUEXT_INVLPG_LOCAL;
-       op->arg1.linear_addr = addr & PAGE_MASK;
-       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
-       xen_mc_issue(PARAVIRT_LAZY_MMU);
-
-       preempt_enable();
+       apic->read = xen_apic_read;
+       apic->write = xen_apic_write;
+       apic->icr_read = xen_apic_icr_read;
+       apic->icr_write = xen_apic_icr_write;
+       apic->wait_icr_idle = xen_apic_wait_icr_idle;
+       apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
 }
 
-static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
-                                unsigned long va)
-{
-       struct {
-               struct mmuext_op op;
-               cpumask_t mask;
-       } *args;
-       cpumask_t cpumask = *cpus;
-       struct multicall_space mcs;
-
-       /*
-        * A couple of (to be removed) sanity checks:
-        *
-        * - current CPU must not be in mask
-        * - mask must exist :)
-        */
-       BUG_ON(cpus_empty(cpumask));
-       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
-       BUG_ON(!mm);
-
-       /* If a CPU which we ran on has gone down, OK. */
-       cpus_and(cpumask, cpumask, cpu_online_map);
-       if (cpus_empty(cpumask))
-               return;
-
-       mcs = xen_mc_entry(sizeof(*args));
-       args = mcs.args;
-       args->mask = cpumask;
-       args->op.arg2.vcpumask = &args->mask;
-
-       if (va == TLB_FLUSH_ALL) {
-               args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-       } else {
-               args->op.cmd = MMUEXT_INVLPG_MULTI;
-               args->op.arg1.linear_addr = va;
-       }
-
-       MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+#endif
 
-       xen_mc_issue(PARAVIRT_LAZY_MMU);
-}
 
 static void xen_clts(void)
 {
@@ -700,21 +593,6 @@ static void xen_write_cr0(unsigned long cr0)
        xen_mc_issue(PARAVIRT_LAZY_CPU);
 }
 
-static void xen_write_cr2(unsigned long cr2)
-{
-       x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
-}
-
-static unsigned long xen_read_cr2(void)
-{
-       return x86_read_percpu(xen_vcpu)->arch.cr2;
-}
-
-static unsigned long xen_read_cr2_direct(void)
-{
-       return x86_read_percpu(xen_vcpu_info.arch.cr2);
-}
-
 static void xen_write_cr4(unsigned long cr4)
 {
        cr4 &= ~X86_CR4_PGE;
@@ -723,71 +601,6 @@ static void xen_write_cr4(unsigned long cr4)
        native_write_cr4(cr4);
 }
 
-static unsigned long xen_read_cr3(void)
-{
-       return x86_read_percpu(xen_cr3);
-}
-
-static void set_current_cr3(void *v)
-{
-       x86_write_percpu(xen_current_cr3, (unsigned long)v);
-}
-
-static void __xen_write_cr3(bool kernel, unsigned long cr3)
-{
-       struct mmuext_op *op;
-       struct multicall_space mcs;
-       unsigned long mfn;
-
-       if (cr3)
-               mfn = pfn_to_mfn(PFN_DOWN(cr3));
-       else
-               mfn = 0;
-
-       WARN_ON(mfn == 0 && kernel);
-
-       mcs = __xen_mc_entry(sizeof(*op));
-
-       op = mcs.args;
-       op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
-       op->arg1.mfn = mfn;
-
-       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
-
-       if (kernel) {
-               x86_write_percpu(xen_cr3, cr3);
-
-               /* Update xen_current_cr3 once the batch has actually
-                  been submitted. */
-               xen_mc_callback(set_current_cr3, (void *)cr3);
-       }
-}
-
-static void xen_write_cr3(unsigned long cr3)
-{
-       BUG_ON(preemptible());
-
-       xen_mc_batch();  /* disables interrupts */
-
-       /* Update while interrupts are disabled, so its atomic with
-          respect to ipis */
-       x86_write_percpu(xen_cr3, cr3);
-
-       __xen_write_cr3(true, cr3);
-
-#ifdef CONFIG_X86_64
-       {
-               pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
-               if (user_pgd)
-                       __xen_write_cr3(false, __pa(user_pgd));
-               else
-                       __xen_write_cr3(false, 0);
-       }
-#endif
-
-       xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
-}
-
 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
 {
        int ret;
@@ -829,185 +642,6 @@ static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
        return ret;
 }
 
-/* Early in boot, while setting up the initial pagetable, assume
-   everything is pinned. */
-static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
-{
-#ifdef CONFIG_FLATMEM
-       BUG_ON(mem_map);        /* should only be used early */
-#endif
-       make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
-}
-
-/* Early release_pte assumes that all pts are pinned, since there's
-   only init_mm and anything attached to that is pinned. */
-static void xen_release_pte_init(unsigned long pfn)
-{
-       make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
-}
-
-static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
-{
-       struct mmuext_op op;
-       op.cmd = cmd;
-       op.arg1.mfn = pfn_to_mfn(pfn);
-       if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
-               BUG();
-}
-
-/* This needs to make sure the new pte page is pinned iff its being
-   attached to a pinned pagetable. */
-static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
-{
-       struct page *page = pfn_to_page(pfn);
-
-       if (PagePinned(virt_to_page(mm->pgd))) {
-               SetPagePinned(page);
-
-               vm_unmap_aliases();
-               if (!PageHighMem(page)) {
-                       make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
-                       if (level == PT_PTE && USE_SPLIT_PTLOCKS)
-                               pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
-               } else {
-                       /* make sure there are no stray mappings of
-                          this page */
-                       kmap_flush_unused();
-               }
-       }
-}
-
-static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
-{
-       xen_alloc_ptpage(mm, pfn, PT_PTE);
-}
-
-static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
-{
-       xen_alloc_ptpage(mm, pfn, PT_PMD);
-}
-
-static int xen_pgd_alloc(struct mm_struct *mm)
-{
-       pgd_t *pgd = mm->pgd;
-       int ret = 0;
-
-       BUG_ON(PagePinned(virt_to_page(pgd)));
-
-#ifdef CONFIG_X86_64
-       {
-               struct page *page = virt_to_page(pgd);
-               pgd_t *user_pgd;
-
-               BUG_ON(page->private != 0);
-
-               ret = -ENOMEM;
-
-               user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
-               page->private = (unsigned long)user_pgd;
-
-               if (user_pgd != NULL) {
-                       user_pgd[pgd_index(VSYSCALL_START)] =
-                               __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
-                       ret = 0;
-               }
-
-               BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
-       }
-#endif
-
-       return ret;
-}
-
-static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-#ifdef CONFIG_X86_64
-       pgd_t *user_pgd = xen_get_user_pgd(pgd);
-
-       if (user_pgd)
-               free_page((unsigned long)user_pgd);
-#endif
-}
-
-/* This should never happen until we're OK to use struct page */
-static void xen_release_ptpage(unsigned long pfn, unsigned level)
-{
-       struct page *page = pfn_to_page(pfn);
-
-       if (PagePinned(page)) {
-               if (!PageHighMem(page)) {
-                       if (level == PT_PTE && USE_SPLIT_PTLOCKS)
-                               pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
-                       make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
-               }
-               ClearPagePinned(page);
-       }
-}
-
-static void xen_release_pte(unsigned long pfn)
-{
-       xen_release_ptpage(pfn, PT_PTE);
-}
-
-static void xen_release_pmd(unsigned long pfn)
-{
-       xen_release_ptpage(pfn, PT_PMD);
-}
-
-#if PAGETABLE_LEVELS == 4
-static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
-{
-       xen_alloc_ptpage(mm, pfn, PT_PUD);
-}
-
-static void xen_release_pud(unsigned long pfn)
-{
-       xen_release_ptpage(pfn, PT_PUD);
-}
-#endif
-
-#ifdef CONFIG_HIGHPTE
-static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
-{
-       pgprot_t prot = PAGE_KERNEL;
-
-       if (PagePinned(page))
-               prot = PAGE_KERNEL_RO;
-
-       if (0 && PageHighMem(page))
-               printk("mapping highpte %lx type %d prot %s\n",
-                      page_to_pfn(page), type,
-                      (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
-
-       return kmap_atomic_prot(page, type, prot);
-}
-#endif
-
-#ifdef CONFIG_X86_32
-static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
-{
-       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
-       if (pte_val_ma(*ptep) & _PAGE_PRESENT)
-               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
-                              pte_val_ma(pte));
-
-       return pte;
-}
-
-/* Init-time set_pte while constructing initial pagetables, which
-   doesn't allow RO pagetable pages to be remapped RW */
-static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
-{
-       pte = mask_rw_pte(ptep, pte);
-
-       xen_set_pte(ptep, pte);
-}
-#endif
-
-static __init void xen_pagetable_setup_start(pgd_t *base)
-{
-}
-
 void xen_setup_shared_info(void)
 {
        if (!xen_feature(XENFEAT_auto_translated_physmap)) {
@@ -1028,37 +662,6 @@ void xen_setup_shared_info(void)
        xen_setup_mfn_list_list();
 }
 
-static __init void xen_pagetable_setup_done(pgd_t *base)
-{
-       xen_setup_shared_info();
-}
-
-static __init void xen_post_allocator_init(void)
-{
-       pv_mmu_ops.set_pte = xen_set_pte;
-       pv_mmu_ops.set_pmd = xen_set_pmd;
-       pv_mmu_ops.set_pud = xen_set_pud;
-#if PAGETABLE_LEVELS == 4
-       pv_mmu_ops.set_pgd = xen_set_pgd;
-#endif
-
-       /* This will work as long as patching hasn't happened yet
-          (which it hasn't) */
-       pv_mmu_ops.alloc_pte = xen_alloc_pte;
-       pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
-       pv_mmu_ops.release_pte = xen_release_pte;
-       pv_mmu_ops.release_pmd = xen_release_pmd;
-#if PAGETABLE_LEVELS == 4
-       pv_mmu_ops.alloc_pud = xen_alloc_pud;
-       pv_mmu_ops.release_pud = xen_release_pud;
-#endif
-
-#ifdef CONFIG_X86_64
-       SetPagePinned(virt_to_page(level3_user_vsyscall));
-#endif
-       xen_mark_init_mm_pinned();
-}
-
 /* This is called once we have the cpu_possible_map */
 void xen_setup_vcpu_info_placement(void)
 {
@@ -1072,10 +675,10 @@ void xen_setup_vcpu_info_placement(void)
        if (have_vcpu_info_placement) {
                printk(KERN_INFO "Xen: using vcpu_info placement\n");
 
-               pv_irq_ops.save_fl = xen_save_fl_direct;
-               pv_irq_ops.restore_fl = xen_restore_fl_direct;
-               pv_irq_ops.irq_disable = xen_irq_disable_direct;
-               pv_irq_ops.irq_enable = xen_irq_enable_direct;
+               pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
+               pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
+               pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
+               pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
                pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
        }
 }
@@ -1133,49 +736,6 @@ static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
        return ret;
 }
 
-static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
-{
-       pte_t pte;
-
-       phys >>= PAGE_SHIFT;
-
-       switch (idx) {
-       case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
-#ifdef CONFIG_X86_F00F_BUG
-       case FIX_F00F_IDT:
-#endif
-#ifdef CONFIG_X86_32
-       case FIX_WP_TEST:
-       case FIX_VDSO:
-# ifdef CONFIG_HIGHMEM
-       case FIX_KMAP_BEGIN ... FIX_KMAP_END:
-# endif
-#else
-       case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
-#endif
-#ifdef CONFIG_X86_LOCAL_APIC
-       case FIX_APIC_BASE:     /* maps dummy local APIC */
-#endif
-               pte = pfn_pte(phys, prot);
-               break;
-
-       default:
-               pte = mfn_pte(phys, prot);
-               break;
-       }
-
-       __native_set_fixmap(idx, pte);
-
-#ifdef CONFIG_X86_64
-       /* Replicate changes to map the vsyscall page into the user
-          pagetable vsyscall mapping. */
-       if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
-               unsigned long vaddr = __fix_to_virt(idx);
-               set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
-       }
-#endif
-}
-
 static const struct pv_info xen_info __initdata = {
        .paravirt_enabled = 1,
        .shared_kernel_pmd = 0,
@@ -1271,87 +831,6 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
 #endif
 };
 
-static const struct pv_mmu_ops xen_mmu_ops __initdata = {
-       .pagetable_setup_start = xen_pagetable_setup_start,
-       .pagetable_setup_done = xen_pagetable_setup_done,
-
-       .read_cr2 = xen_read_cr2,
-       .write_cr2 = xen_write_cr2,
-
-       .read_cr3 = xen_read_cr3,
-       .write_cr3 = xen_write_cr3,
-
-       .flush_tlb_user = xen_flush_tlb,
-       .flush_tlb_kernel = xen_flush_tlb,
-       .flush_tlb_single = xen_flush_tlb_single,
-       .flush_tlb_others = xen_flush_tlb_others,
-
-       .pte_update = paravirt_nop,
-       .pte_update_defer = paravirt_nop,
-
-       .pgd_alloc = xen_pgd_alloc,
-       .pgd_free = xen_pgd_free,
-
-       .alloc_pte = xen_alloc_pte_init,
-       .release_pte = xen_release_pte_init,
-       .alloc_pmd = xen_alloc_pte_init,
-       .alloc_pmd_clone = paravirt_nop,
-       .release_pmd = xen_release_pte_init,
-
-#ifdef CONFIG_HIGHPTE
-       .kmap_atomic_pte = xen_kmap_atomic_pte,
-#endif
-
-#ifdef CONFIG_X86_64
-       .set_pte = xen_set_pte,
-#else
-       .set_pte = xen_set_pte_init,
-#endif
-       .set_pte_at = xen_set_pte_at,
-       .set_pmd = xen_set_pmd_hyper,
-
-       .ptep_modify_prot_start = __ptep_modify_prot_start,
-       .ptep_modify_prot_commit = __ptep_modify_prot_commit,
-
-       .pte_val = xen_pte_val,
-       .pte_flags = native_pte_flags,
-       .pgd_val = xen_pgd_val,
-
-       .make_pte = xen_make_pte,
-       .make_pgd = xen_make_pgd,
-
-#ifdef CONFIG_X86_PAE
-       .set_pte_atomic = xen_set_pte_atomic,
-       .set_pte_present = xen_set_pte_at,
-       .pte_clear = xen_pte_clear,
-       .pmd_clear = xen_pmd_clear,
-#endif /* CONFIG_X86_PAE */
-       .set_pud = xen_set_pud_hyper,
-
-       .make_pmd = xen_make_pmd,
-       .pmd_val = xen_pmd_val,
-
-#if PAGETABLE_LEVELS == 4
-       .pud_val = xen_pud_val,
-       .make_pud = xen_make_pud,
-       .set_pgd = xen_set_pgd_hyper,
-
-       .alloc_pud = xen_alloc_pte_init,
-       .release_pud = xen_release_pte_init,
-#endif /* PAGETABLE_LEVELS == 4 */
-
-       .activate_mm = xen_activate_mm,
-       .dup_mmap = xen_dup_mmap,
-       .exit_mmap = xen_exit_mmap,
-
-       .lazy_mode = {
-               .enter = paravirt_enter_lazy_mmu,
-               .leave = xen_leave_lazy,
-       },
-
-       .set_fixmap = xen_set_fixmap,
-};
-
 static void xen_reboot(int reason)
 {
        struct sched_shutdown r = { .reason = reason };
@@ -1394,223 +873,6 @@ static const struct machine_ops __initdata xen_machine_ops = {
 };
 
 
-static void __init xen_reserve_top(void)
-{
-#ifdef CONFIG_X86_32
-       unsigned long top = HYPERVISOR_VIRT_START;
-       struct xen_platform_parameters pp;
-
-       if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
-               top = pp.virt_start;
-
-       reserve_top_address(-top);
-#endif /* CONFIG_X86_32 */
-}
-
-/*
- * Like __va(), but returns address in the kernel mapping (which is
- * all we have until the physical memory mapping has been set up.
- */
-static void *__ka(phys_addr_t paddr)
-{
-#ifdef CONFIG_X86_64
-       return (void *)(paddr + __START_KERNEL_map);
-#else
-       return __va(paddr);
-#endif
-}
-
-/* Convert a machine address to physical address */
-static unsigned long m2p(phys_addr_t maddr)
-{
-       phys_addr_t paddr;
-
-       maddr &= PTE_PFN_MASK;
-       paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
-
-       return paddr;
-}
-
-/* Convert a machine address to kernel virtual */
-static void *m2v(phys_addr_t maddr)
-{
-       return __ka(m2p(maddr));
-}
-
-static void set_page_prot(void *addr, pgprot_t prot)
-{
-       unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
-       pte_t pte = pfn_pte(pfn, prot);
-
-       if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
-               BUG();
-}
-
-static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
-{
-       unsigned pmdidx, pteidx;
-       unsigned ident_pte;
-       unsigned long pfn;
-
-       ident_pte = 0;
-       pfn = 0;
-       for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
-               pte_t *pte_page;
-
-               /* Reuse or allocate a page of ptes */
-               if (pmd_present(pmd[pmdidx]))
-                       pte_page = m2v(pmd[pmdidx].pmd);
-               else {
-                       /* Check for free pte pages */
-                       if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
-                               break;
-
-                       pte_page = &level1_ident_pgt[ident_pte];
-                       ident_pte += PTRS_PER_PTE;
-
-                       pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
-               }
-
-               /* Install mappings */
-               for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
-                       pte_t pte;
-
-                       if (pfn > max_pfn_mapped)
-                               max_pfn_mapped = pfn;
-
-                       if (!pte_none(pte_page[pteidx]))
-                               continue;
-
-                       pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
-                       pte_page[pteidx] = pte;
-               }
-       }
-
-       for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
-               set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
-
-       set_page_prot(pmd, PAGE_KERNEL_RO);
-}
-
-#ifdef CONFIG_X86_64
-static void convert_pfn_mfn(void *v)
-{
-       pte_t *pte = v;
-       int i;
-
-       /* All levels are converted the same way, so just treat them
-          as ptes. */
-       for (i = 0; i < PTRS_PER_PTE; i++)
-               pte[i] = xen_make_pte(pte[i].pte);
-}
-
-/*
- * Set up the inital kernel pagetable.
- *
- * We can construct this by grafting the Xen provided pagetable into
- * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
- * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt.  This
- * means that only the kernel has a physical mapping to start with -
- * but that's enough to get __va working.  We need to fill in the rest
- * of the physical mapping once some sort of allocator has been set
- * up.
- */
-static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
-                                               unsigned long max_pfn)
-{
-       pud_t *l3;
-       pmd_t *l2;
-
-       /* Zap identity mapping */
-       init_level4_pgt[0] = __pgd(0);
-
-       /* Pre-constructed entries are in pfn, so convert to mfn */
-       convert_pfn_mfn(init_level4_pgt);
-       convert_pfn_mfn(level3_ident_pgt);
-       convert_pfn_mfn(level3_kernel_pgt);
-
-       l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
-       l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
-
-       memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
-       memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
-
-       l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
-       l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
-       memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
-
-       /* Set up identity map */
-       xen_map_identity_early(level2_ident_pgt, max_pfn);
-
-       /* Make pagetable pieces RO */
-       set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
-       set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
-       set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
-       set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
-       set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
-       set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-
-       /* Pin down new L4 */
-       pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
-                         PFN_DOWN(__pa_symbol(init_level4_pgt)));
-
-       /* Unpin Xen-provided one */
-       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
-
-       /* Switch over */
-       pgd = init_level4_pgt;
-
-       /*
-        * At this stage there can be no user pgd, and no page
-        * structure to attach it to, so make sure we just set kernel
-        * pgd.
-        */
-       xen_mc_batch();
-       __xen_write_cr3(true, __pa(pgd));
-       xen_mc_issue(PARAVIRT_LAZY_CPU);
-
-       reserve_early(__pa(xen_start_info->pt_base),
-                     __pa(xen_start_info->pt_base +
-                          xen_start_info->nr_pt_frames * PAGE_SIZE),
-                     "XEN PAGETABLES");
-
-       return pgd;
-}
-#else  /* !CONFIG_X86_64 */
-static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
-
-static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
-                                               unsigned long max_pfn)
-{
-       pmd_t *kernel_pmd;
-
-       init_pg_tables_start = __pa(pgd);
-       init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
-       max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
-
-       kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
-       memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
-
-       xen_map_identity_early(level2_kernel_pgt, max_pfn);
-
-       memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
-       set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
-                       __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
-
-       set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
-       set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
-       set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
-
-       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
-
-       xen_write_cr3(__pa(swapper_pg_dir));
-
-       pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
-
-       return swapper_pg_dir;
-}
-#endif /* CONFIG_X86_64 */
-
 /* First C function to be called on Xen boot */
 asmlinkage void __init xen_start_kernel(void)
 {
@@ -1639,7 +901,7 @@ asmlinkage void __init xen_start_kernel(void)
        /*
         * set up the basic apic ops.
         */
-       apic_ops = &xen_basic_apic_ops;
+       set_xen_basic_apic_ops();
 #endif
 
        if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
@@ -1650,10 +912,18 @@ asmlinkage void __init xen_start_kernel(void)
        machine_ops = xen_machine_ops;
 
 #ifdef CONFIG_X86_64
-       /* Disable until direct per-cpu data access. */
-       have_vcpu_info_placement = 0;
-       x86_64_init_pda();
+       /*
+        * Setup percpu state.  We only need to do this for 64-bit
+        * because 32-bit already has %fs set properly.
+        */
+       load_percpu_segment(0);
 #endif
+       /*
+        * The only reliable way to retain the initial address of the
+        * percpu gdt_page is to remember it here, so we can go and
+        * mark it RW later, when the initial percpu area is freed.
+        */
+       xen_initial_gdt = &per_cpu(gdt_page, 0);
 
        xen_smp_init();
 
index bb042608c6023fd8214139d64befd8217797ba43..cfd17799bd6d23e8e2ce93e2862b09d4270fa442 100644 (file)
@@ -19,27 +19,12 @@ void xen_force_evtchn_callback(void)
        (void)HYPERVISOR_xen_version(0, NULL);
 }
 
-static void __init __xen_init_IRQ(void)
-{
-       int i;
-
-       /* Create identity vector->irq map */
-       for(i = 0; i < NR_VECTORS; i++) {
-               int cpu;
-
-               for_each_possible_cpu(cpu)
-                       per_cpu(vector_irq, cpu)[i] = i;
-       }
-
-       xen_init_IRQ();
-}
-
 static unsigned long xen_save_fl(void)
 {
        struct vcpu_info *vcpu;
        unsigned long flags;
 
-       vcpu = x86_read_percpu(xen_vcpu);
+       vcpu = percpu_read(xen_vcpu);
 
        /* flag has opposite sense of mask */
        flags = !vcpu->evtchn_upcall_mask;
@@ -50,6 +35,7 @@ static unsigned long xen_save_fl(void)
        */
        return (-flags) & X86_EFLAGS_IF;
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
 
 static void xen_restore_fl(unsigned long flags)
 {
@@ -62,7 +48,7 @@ static void xen_restore_fl(unsigned long flags)
           make sure we're don't switch CPUs between getting the vcpu
           pointer and updating the mask. */
        preempt_disable();
-       vcpu = x86_read_percpu(xen_vcpu);
+       vcpu = percpu_read(xen_vcpu);
        vcpu->evtchn_upcall_mask = flags;
        preempt_enable_no_resched();
 
@@ -76,6 +62,7 @@ static void xen_restore_fl(unsigned long flags)
                        xen_force_evtchn_callback();
        }
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
 
 static void xen_irq_disable(void)
 {
@@ -83,9 +70,10 @@ static void xen_irq_disable(void)
           make sure we're don't switch CPUs between getting the vcpu
           pointer and updating the mask. */
        preempt_disable();
-       x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1;
+       percpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
        preempt_enable_no_resched();
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
 
 static void xen_irq_enable(void)
 {
@@ -96,7 +84,7 @@ static void xen_irq_enable(void)
           the caller is confused and is trying to re-enable interrupts
           on an indeterminate processor. */
 
-       vcpu = x86_read_percpu(xen_vcpu);
+       vcpu = percpu_read(xen_vcpu);
        vcpu->evtchn_upcall_mask = 0;
 
        /* Doesn't matter if we get preempted here, because any
@@ -106,6 +94,7 @@ static void xen_irq_enable(void)
        if (unlikely(vcpu->evtchn_upcall_pending))
                xen_force_evtchn_callback();
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
 
 static void xen_safe_halt(void)
 {
@@ -123,11 +112,13 @@ static void xen_halt(void)
 }
 
 static const struct pv_irq_ops xen_irq_ops __initdata = {
-       .init_IRQ = __xen_init_IRQ,
-       .save_fl = xen_save_fl,
-       .restore_fl = xen_restore_fl,
-       .irq_disable = xen_irq_disable,
-       .irq_enable = xen_irq_enable,
+       .init_IRQ = xen_init_IRQ,
+
+       .save_fl = PV_CALLEE_SAVE(xen_save_fl),
+       .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
+       .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
+       .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
+
        .safe_halt = xen_safe_halt,
        .halt = xen_halt,
 #ifdef CONFIG_X86_64
index 503c240e26c73539c2d4061f0d186b92a7c20c83..cb6afa4ec95c524ce8e95fa20b47b88a2f65dac9 100644 (file)
@@ -47,6 +47,7 @@
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
 #include <asm/mmu_context.h>
+#include <asm/setup.h>
 #include <asm/paravirt.h>
 #include <asm/linkage.h>
 
@@ -55,6 +56,8 @@
 
 #include <xen/page.h>
 #include <xen/interface/xen.h>
+#include <xen/interface/version.h>
+#include <xen/hvc-console.h>
 
 #include "multicalls.h"
 #include "mmu.h"
@@ -114,6 +117,37 @@ static inline void check_zero(void)
 
 #endif /* CONFIG_XEN_DEBUG_FS */
 
+
+/*
+ * Identity map, in addition to plain kernel map.  This needs to be
+ * large enough to allocate page table pages to allocate the rest.
+ * Each page can map 2MB.
+ */
+static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
+
+#ifdef CONFIG_X86_64
+/* l3 pud for userspace vsyscall mapping */
+static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
+#endif /* CONFIG_X86_64 */
+
+/*
+ * Note about cr3 (pagetable base) values:
+ *
+ * xen_cr3 contains the current logical cr3 value; it contains the
+ * last set cr3.  This may not be the current effective cr3, because
+ * its update may be being lazily deferred.  However, a vcpu looking
+ * at its own cr3 can use this value knowing that it everything will
+ * be self-consistent.
+ *
+ * xen_current_cr3 contains the actual vcpu cr3; it is set once the
+ * hypercall to set the vcpu cr3 is complete (so it may be a little
+ * out of date, but it will never be set early).  If one vcpu is
+ * looking at another vcpu's cr3 value, it should use this variable.
+ */
+DEFINE_PER_CPU(unsigned long, xen_cr3);         /* cr3 stored as physaddr */
+DEFINE_PER_CPU(unsigned long, xen_current_cr3);         /* actual vcpu cr3 */
+
+
 /*
  * Just beyond the highest usermode address.  STACK_TOP_MAX has a
  * redzone above it, so round it up to a PGD boundary.
@@ -242,6 +276,13 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
        p2m_top[topidx][idx] = mfn;
 }
 
+unsigned long arbitrary_virt_to_mfn(void *vaddr)
+{
+       xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
+
+       return PFN_DOWN(maddr.maddr);
+}
+
 xmaddr_t arbitrary_virt_to_machine(void *vaddr)
 {
        unsigned long address = (unsigned long)vaddr;
@@ -458,28 +499,33 @@ pteval_t xen_pte_val(pte_t pte)
 {
        return pte_mfn_to_pfn(pte.pte);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
 
 pgdval_t xen_pgd_val(pgd_t pgd)
 {
        return pte_mfn_to_pfn(pgd.pgd);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
 
 pte_t xen_make_pte(pteval_t pte)
 {
        pte = pte_pfn_to_mfn(pte);
        return native_make_pte(pte);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
 
 pgd_t xen_make_pgd(pgdval_t pgd)
 {
        pgd = pte_pfn_to_mfn(pgd);
        return native_make_pgd(pgd);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
 
 pmdval_t xen_pmd_val(pmd_t pmd)
 {
        return pte_mfn_to_pfn(pmd.pmd);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
 
 void xen_set_pud_hyper(pud_t *ptr, pud_t val)
 {
@@ -556,12 +602,14 @@ pmd_t xen_make_pmd(pmdval_t pmd)
        pmd = pte_pfn_to_mfn(pmd);
        return native_make_pmd(pmd);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
 
 #if PAGETABLE_LEVELS == 4
 pudval_t xen_pud_val(pud_t pud)
 {
        return pte_mfn_to_pfn(pud.pud);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
 
 pud_t xen_make_pud(pudval_t pud)
 {
@@ -569,6 +617,7 @@ pud_t xen_make_pud(pudval_t pud)
 
        return native_make_pud(pud);
 }
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
 
 pgd_t *xen_get_user_pgd(pgd_t *pgd)
 {
@@ -1063,18 +1112,14 @@ static void drop_other_mm_ref(void *info)
        struct mm_struct *mm = info;
        struct mm_struct *active_mm;
 
-#ifdef CONFIG_X86_64
-       active_mm = read_pda(active_mm);
-#else
-       active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
-#endif
+       active_mm = percpu_read(cpu_tlbstate.active_mm);
 
        if (active_mm == mm)
                leave_mm(smp_processor_id());
 
        /* If this cpu still has a stale cr3 reference, then make sure
           it has been flushed. */
-       if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
+       if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
                load_cr3(swapper_pg_dir);
                arch_flush_lazy_cpu_mode();
        }
@@ -1156,6 +1201,706 @@ void xen_exit_mmap(struct mm_struct *mm)
        spin_unlock(&mm->page_table_lock);
 }
 
+static __init void xen_pagetable_setup_start(pgd_t *base)
+{
+}
+
+static __init void xen_pagetable_setup_done(pgd_t *base)
+{
+       xen_setup_shared_info();
+}
+
+static void xen_write_cr2(unsigned long cr2)
+{
+       percpu_read(xen_vcpu)->arch.cr2 = cr2;
+}
+
+static unsigned long xen_read_cr2(void)
+{
+       return percpu_read(xen_vcpu)->arch.cr2;
+}
+
+unsigned long xen_read_cr2_direct(void)
+{
+       return percpu_read(xen_vcpu_info.arch.cr2);
+}
+
+static void xen_flush_tlb(void)
+{
+       struct mmuext_op *op;
+       struct multicall_space mcs;
+
+       preempt_disable();
+
+       mcs = xen_mc_entry(sizeof(*op));
+
+       op = mcs.args;
+       op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
+}
+
+static void xen_flush_tlb_single(unsigned long addr)
+{
+       struct mmuext_op *op;
+       struct multicall_space mcs;
+
+       preempt_disable();
+
+       mcs = xen_mc_entry(sizeof(*op));
+       op = mcs.args;
+       op->cmd = MMUEXT_INVLPG_LOCAL;
+       op->arg1.linear_addr = addr & PAGE_MASK;
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+
+       preempt_enable();
+}
+
+static void xen_flush_tlb_others(const struct cpumask *cpus,
+                                struct mm_struct *mm, unsigned long va)
+{
+       struct {
+               struct mmuext_op op;
+               DECLARE_BITMAP(mask, NR_CPUS);
+       } *args;
+       struct multicall_space mcs;
+
+       BUG_ON(cpumask_empty(cpus));
+       BUG_ON(!mm);
+
+       mcs = xen_mc_entry(sizeof(*args));
+       args = mcs.args;
+       args->op.arg2.vcpumask = to_cpumask(args->mask);
+
+       /* Remove us, and any offline CPUS. */
+       cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
+
+       if (va == TLB_FLUSH_ALL) {
+               args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+       } else {
+               args->op.cmd = MMUEXT_INVLPG_MULTI;
+               args->op.arg1.linear_addr = va;
+       }
+
+       MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
+static unsigned long xen_read_cr3(void)
+{
+       return percpu_read(xen_cr3);
+}
+
+static void set_current_cr3(void *v)
+{
+       percpu_write(xen_current_cr3, (unsigned long)v);
+}
+
+static void __xen_write_cr3(bool kernel, unsigned long cr3)
+{
+       struct mmuext_op *op;
+       struct multicall_space mcs;
+       unsigned long mfn;
+
+       if (cr3)
+               mfn = pfn_to_mfn(PFN_DOWN(cr3));
+       else
+               mfn = 0;
+
+       WARN_ON(mfn == 0 && kernel);
+
+       mcs = __xen_mc_entry(sizeof(*op));
+
+       op = mcs.args;
+       op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
+       op->arg1.mfn = mfn;
+
+       MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
+
+       if (kernel) {
+               percpu_write(xen_cr3, cr3);
+
+               /* Update xen_current_cr3 once the batch has actually
+                  been submitted. */
+               xen_mc_callback(set_current_cr3, (void *)cr3);
+       }
+}
+
+static void xen_write_cr3(unsigned long cr3)
+{
+       BUG_ON(preemptible());
+
+       xen_mc_batch();  /* disables interrupts */
+
+       /* Update while interrupts are disabled, so its atomic with
+          respect to ipis */
+       percpu_write(xen_cr3, cr3);
+
+       __xen_write_cr3(true, cr3);
+
+#ifdef CONFIG_X86_64
+       {
+               pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
+               if (user_pgd)
+                       __xen_write_cr3(false, __pa(user_pgd));
+               else
+                       __xen_write_cr3(false, 0);
+       }
+#endif
+
+       xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
+}
+
+static int xen_pgd_alloc(struct mm_struct *mm)
+{
+       pgd_t *pgd = mm->pgd;
+       int ret = 0;
+
+       BUG_ON(PagePinned(virt_to_page(pgd)));
+
+#ifdef CONFIG_X86_64
+       {
+               struct page *page = virt_to_page(pgd);
+               pgd_t *user_pgd;
+
+               BUG_ON(page->private != 0);
+
+               ret = -ENOMEM;
+
+               user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
+               page->private = (unsigned long)user_pgd;
+
+               if (user_pgd != NULL) {
+                       user_pgd[pgd_index(VSYSCALL_START)] =
+                               __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
+                       ret = 0;
+               }
+
+               BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
+       }
+#endif
+
+       return ret;
+}
+
+static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+#ifdef CONFIG_X86_64
+       pgd_t *user_pgd = xen_get_user_pgd(pgd);
+
+       if (user_pgd)
+               free_page((unsigned long)user_pgd);
+#endif
+}
+
+#ifdef CONFIG_HIGHPTE
+static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
+{
+       pgprot_t prot = PAGE_KERNEL;
+
+       if (PagePinned(page))
+               prot = PAGE_KERNEL_RO;
+
+       if (0 && PageHighMem(page))
+               printk("mapping highpte %lx type %d prot %s\n",
+                      page_to_pfn(page), type,
+                      (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
+
+       return kmap_atomic_prot(page, type, prot);
+}
+#endif
+
+#ifdef CONFIG_X86_32
+static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
+{
+       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+       if (pte_val_ma(*ptep) & _PAGE_PRESENT)
+               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+                              pte_val_ma(pte));
+
+       return pte;
+}
+
+/* Init-time set_pte while constructing initial pagetables, which
+   doesn't allow RO pagetable pages to be remapped RW */
+static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+       pte = mask_rw_pte(ptep, pte);
+
+       xen_set_pte(ptep, pte);
+}
+#endif
+
+/* Early in boot, while setting up the initial pagetable, assume
+   everything is pinned. */
+static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
+{
+#ifdef CONFIG_FLATMEM
+       BUG_ON(mem_map);        /* should only be used early */
+#endif
+       make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
+}
+
+/* Early release_pte assumes that all pts are pinned, since there's
+   only init_mm and anything attached to that is pinned. */
+static void xen_release_pte_init(unsigned long pfn)
+{
+       make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+}
+
+static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
+{
+       struct mmuext_op op;
+       op.cmd = cmd;
+       op.arg1.mfn = pfn_to_mfn(pfn);
+       if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
+               BUG();
+}
+
+/* This needs to make sure the new pte page is pinned iff its being
+   attached to a pinned pagetable. */
+static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
+{
+       struct page *page = pfn_to_page(pfn);
+
+       if (PagePinned(virt_to_page(mm->pgd))) {
+               SetPagePinned(page);
+
+               vm_unmap_aliases();
+               if (!PageHighMem(page)) {
+                       make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
+                       if (level == PT_PTE && USE_SPLIT_PTLOCKS)
+                               pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
+               } else {
+                       /* make sure there are no stray mappings of
+                          this page */
+                       kmap_flush_unused();
+               }
+       }
+}
+
+static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
+{
+       xen_alloc_ptpage(mm, pfn, PT_PTE);
+}
+
+static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
+{
+       xen_alloc_ptpage(mm, pfn, PT_PMD);
+}
+
+/* This should never happen until we're OK to use struct page */
+static void xen_release_ptpage(unsigned long pfn, unsigned level)
+{
+       struct page *page = pfn_to_page(pfn);
+
+       if (PagePinned(page)) {
+               if (!PageHighMem(page)) {
+                       if (level == PT_PTE && USE_SPLIT_PTLOCKS)
+                               pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
+                       make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
+               }
+               ClearPagePinned(page);
+       }
+}
+
+static void xen_release_pte(unsigned long pfn)
+{
+       xen_release_ptpage(pfn, PT_PTE);
+}
+
+static void xen_release_pmd(unsigned long pfn)
+{
+       xen_release_ptpage(pfn, PT_PMD);
+}
+
+#if PAGETABLE_LEVELS == 4
+static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
+{
+       xen_alloc_ptpage(mm, pfn, PT_PUD);
+}
+
+static void xen_release_pud(unsigned long pfn)
+{
+       xen_release_ptpage(pfn, PT_PUD);
+}
+#endif
+
+void __init xen_reserve_top(void)
+{
+#ifdef CONFIG_X86_32
+       unsigned long top = HYPERVISOR_VIRT_START;
+       struct xen_platform_parameters pp;
+
+       if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
+               top = pp.virt_start;
+
+       reserve_top_address(-top);
+#endif /* CONFIG_X86_32 */
+}
+
+/*
+ * Like __va(), but returns address in the kernel mapping (which is
+ * all we have until the physical memory mapping has been set up.
+ */
+static void *__ka(phys_addr_t paddr)
+{
+#ifdef CONFIG_X86_64
+       return (void *)(paddr + __START_KERNEL_map);
+#else
+       return __va(paddr);
+#endif
+}
+
+/* Convert a machine address to physical address */
+static unsigned long m2p(phys_addr_t maddr)
+{
+       phys_addr_t paddr;
+
+       maddr &= PTE_PFN_MASK;
+       paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
+
+       return paddr;
+}
+
+/* Convert a machine address to kernel virtual */
+static void *m2v(phys_addr_t maddr)
+{
+       return __ka(m2p(maddr));
+}
+
+static void set_page_prot(void *addr, pgprot_t prot)
+{
+       unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
+       pte_t pte = pfn_pte(pfn, prot);
+
+       if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
+               BUG();
+}
+
+static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
+{
+       unsigned pmdidx, pteidx;
+       unsigned ident_pte;
+       unsigned long pfn;
+
+       ident_pte = 0;
+       pfn = 0;
+       for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
+               pte_t *pte_page;
+
+               /* Reuse or allocate a page of ptes */
+               if (pmd_present(pmd[pmdidx]))
+                       pte_page = m2v(pmd[pmdidx].pmd);
+               else {
+                       /* Check for free pte pages */
+                       if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
+                               break;
+
+                       pte_page = &level1_ident_pgt[ident_pte];
+                       ident_pte += PTRS_PER_PTE;
+
+                       pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
+               }
+
+               /* Install mappings */
+               for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
+                       pte_t pte;
+
+                       if (pfn > max_pfn_mapped)
+                               max_pfn_mapped = pfn;
+
+                       if (!pte_none(pte_page[pteidx]))
+                               continue;
+
+                       pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
+                       pte_page[pteidx] = pte;
+               }
+       }
+
+       for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
+               set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
+
+       set_page_prot(pmd, PAGE_KERNEL_RO);
+}
+
+#ifdef CONFIG_X86_64
+static void convert_pfn_mfn(void *v)
+{
+       pte_t *pte = v;
+       int i;
+
+       /* All levels are converted the same way, so just treat them
+          as ptes. */
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               pte[i] = xen_make_pte(pte[i].pte);
+}
+
+/*
+ * Set up the inital kernel pagetable.
+ *
+ * We can construct this by grafting the Xen provided pagetable into
+ * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
+ * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt.  This
+ * means that only the kernel has a physical mapping to start with -
+ * but that's enough to get __va working.  We need to fill in the rest
+ * of the physical mapping once some sort of allocator has been set
+ * up.
+ */
+__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+                                        unsigned long max_pfn)
+{
+       pud_t *l3;
+       pmd_t *l2;
+
+       /* Zap identity mapping */
+       init_level4_pgt[0] = __pgd(0);
+
+       /* Pre-constructed entries are in pfn, so convert to mfn */
+       convert_pfn_mfn(init_level4_pgt);
+       convert_pfn_mfn(level3_ident_pgt);
+       convert_pfn_mfn(level3_kernel_pgt);
+
+       l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
+       l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
+
+       memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
+       memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
+
+       l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
+       l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
+       memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
+
+       /* Set up identity map */
+       xen_map_identity_early(level2_ident_pgt, max_pfn);
+
+       /* Make pagetable pieces RO */
+       set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
+       set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+       set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+
+       /* Pin down new L4 */
+       pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
+                         PFN_DOWN(__pa_symbol(init_level4_pgt)));
+
+       /* Unpin Xen-provided one */
+       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
+
+       /* Switch over */
+       pgd = init_level4_pgt;
+
+       /*
+        * At this stage there can be no user pgd, and no page
+        * structure to attach it to, so make sure we just set kernel
+        * pgd.
+        */
+       xen_mc_batch();
+       __xen_write_cr3(true, __pa(pgd));
+       xen_mc_issue(PARAVIRT_LAZY_CPU);
+
+       reserve_early(__pa(xen_start_info->pt_base),
+                     __pa(xen_start_info->pt_base +
+                          xen_start_info->nr_pt_frames * PAGE_SIZE),
+                     "XEN PAGETABLES");
+
+       return pgd;
+}
+#else  /* !CONFIG_X86_64 */
+static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
+
+__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
+                                        unsigned long max_pfn)
+{
+       pmd_t *kernel_pmd;
+
+       init_pg_tables_start = __pa(pgd);
+       init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
+       max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
+
+       kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
+       memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
+
+       xen_map_identity_early(level2_kernel_pgt, max_pfn);
+
+       memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
+       set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
+                       __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
+
+       set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+       set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
+       set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
+
+       pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
+
+       xen_write_cr3(__pa(swapper_pg_dir));
+
+       pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
+
+       return swapper_pg_dir;
+}
+#endif /* CONFIG_X86_64 */
+
+static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
+{
+       pte_t pte;
+
+       phys >>= PAGE_SHIFT;
+
+       switch (idx) {
+       case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
+#ifdef CONFIG_X86_F00F_BUG
+       case FIX_F00F_IDT:
+#endif
+#ifdef CONFIG_X86_32
+       case FIX_WP_TEST:
+       case FIX_VDSO:
+# ifdef CONFIG_HIGHMEM
+       case FIX_KMAP_BEGIN ... FIX_KMAP_END:
+# endif
+#else
+       case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
+#endif
+#ifdef CONFIG_X86_LOCAL_APIC
+       case FIX_APIC_BASE:     /* maps dummy local APIC */
+#endif
+               pte = pfn_pte(phys, prot);
+               break;
+
+       default:
+               pte = mfn_pte(phys, prot);
+               break;
+       }
+
+       __native_set_fixmap(idx, pte);
+
+#ifdef CONFIG_X86_64
+       /* Replicate changes to map the vsyscall page into the user
+          pagetable vsyscall mapping. */
+       if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
+               unsigned long vaddr = __fix_to_virt(idx);
+               set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
+       }
+#endif
+}
+
+__init void xen_post_allocator_init(void)
+{
+       pv_mmu_ops.set_pte = xen_set_pte;
+       pv_mmu_ops.set_pmd = xen_set_pmd;
+       pv_mmu_ops.set_pud = xen_set_pud;
+#if PAGETABLE_LEVELS == 4
+       pv_mmu_ops.set_pgd = xen_set_pgd;
+#endif
+
+       /* This will work as long as patching hasn't happened yet
+          (which it hasn't) */
+       pv_mmu_ops.alloc_pte = xen_alloc_pte;
+       pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
+       pv_mmu_ops.release_pte = xen_release_pte;
+       pv_mmu_ops.release_pmd = xen_release_pmd;
+#if PAGETABLE_LEVELS == 4
+       pv_mmu_ops.alloc_pud = xen_alloc_pud;
+       pv_mmu_ops.release_pud = xen_release_pud;
+#endif
+
+#ifdef CONFIG_X86_64
+       SetPagePinned(virt_to_page(level3_user_vsyscall));
+#endif
+       xen_mark_init_mm_pinned();
+}
+
+
+const struct pv_mmu_ops xen_mmu_ops __initdata = {
+       .pagetable_setup_start = xen_pagetable_setup_start,
+       .pagetable_setup_done = xen_pagetable_setup_done,
+
+       .read_cr2 = xen_read_cr2,
+       .write_cr2 = xen_write_cr2,
+
+       .read_cr3 = xen_read_cr3,
+       .write_cr3 = xen_write_cr3,
+
+       .flush_tlb_user = xen_flush_tlb,
+       .flush_tlb_kernel = xen_flush_tlb,
+       .flush_tlb_single = xen_flush_tlb_single,
+       .flush_tlb_others = xen_flush_tlb_others,
+
+       .pte_update = paravirt_nop,
+       .pte_update_defer = paravirt_nop,
+
+       .pgd_alloc = xen_pgd_alloc,
+       .pgd_free = xen_pgd_free,
+
+       .alloc_pte = xen_alloc_pte_init,
+       .release_pte = xen_release_pte_init,
+       .alloc_pmd = xen_alloc_pte_init,
+       .alloc_pmd_clone = paravirt_nop,
+       .release_pmd = xen_release_pte_init,
+
+#ifdef CONFIG_HIGHPTE
+       .kmap_atomic_pte = xen_kmap_atomic_pte,
+#endif
+
+#ifdef CONFIG_X86_64
+       .set_pte = xen_set_pte,
+#else
+       .set_pte = xen_set_pte_init,
+#endif
+       .set_pte_at = xen_set_pte_at,
+       .set_pmd = xen_set_pmd_hyper,
+
+       .ptep_modify_prot_start = __ptep_modify_prot_start,
+       .ptep_modify_prot_commit = __ptep_modify_prot_commit,
+
+       .pte_val = PV_CALLEE_SAVE(xen_pte_val),
+       .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
+
+       .make_pte = PV_CALLEE_SAVE(xen_make_pte),
+       .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
+
+#ifdef CONFIG_X86_PAE
+       .set_pte_atomic = xen_set_pte_atomic,
+       .set_pte_present = xen_set_pte_at,
+       .pte_clear = xen_pte_clear,
+       .pmd_clear = xen_pmd_clear,
+#endif /* CONFIG_X86_PAE */
+       .set_pud = xen_set_pud_hyper,
+
+       .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
+       .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
+
+#if PAGETABLE_LEVELS == 4
+       .pud_val = PV_CALLEE_SAVE(xen_pud_val),
+       .make_pud = PV_CALLEE_SAVE(xen_make_pud),
+       .set_pgd = xen_set_pgd_hyper,
+
+       .alloc_pud = xen_alloc_pte_init,
+       .release_pud = xen_release_pte_init,
+#endif /* PAGETABLE_LEVELS == 4 */
+
+       .activate_mm = xen_activate_mm,
+       .dup_mmap = xen_dup_mmap,
+       .exit_mmap = xen_exit_mmap,
+
+       .lazy_mode = {
+               .enter = paravirt_enter_lazy_mmu,
+               .leave = xen_leave_lazy,
+       },
+
+       .set_fixmap = xen_set_fixmap,
+};
+
+
 #ifdef CONFIG_XEN_DEBUG_FS
 
 static struct dentry *d_mmu_debug;
index 98d71659da5a15ceb21a08f090d1c457d52df775..24d1b44a337d6c3ad9ad1a0b274b07169e9837fd 100644 (file)
@@ -54,4 +54,7 @@ pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t
 void  xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
                                  pte_t *ptep, pte_t pte);
 
+unsigned long xen_read_cr2_direct(void);
+
+extern const struct pv_mmu_ops xen_mmu_ops;
 #endif /* _XEN_MMU_H */
index c738644b543502d6577ee07b42c25f6e52493131..8bff7e7c290b7d30ef805061115690e7267dc4a9 100644 (file)
@@ -39,6 +39,7 @@ struct mc_buffer {
        struct multicall_entry entries[MC_BATCH];
 #if MC_DEBUG
        struct multicall_entry debug[MC_BATCH];
+       void *caller[MC_BATCH];
 #endif
        unsigned char args[MC_ARGS];
        struct callback {
@@ -154,11 +155,12 @@ void xen_mc_flush(void)
                               ret, smp_processor_id());
                        dump_stack();
                        for (i = 0; i < b->mcidx; i++) {
-                               printk(KERN_DEBUG "  call %2d/%d: op=%lu arg=[%lx] result=%ld\n",
+                               printk(KERN_DEBUG "  call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n",
                                       i+1, b->mcidx,
                                       b->debug[i].op,
                                       b->debug[i].args[0],
-                                      b->entries[i].result);
+                                      b->entries[i].result,
+                                      b->caller[i]);
                        }
                }
 #endif
@@ -168,8 +170,6 @@ void xen_mc_flush(void)
        } else
                BUG_ON(b->argidx != 0);
 
-       local_irq_restore(flags);
-
        for (i = 0; i < b->cbidx; i++) {
                struct callback *cb = &b->callbacks[i];
 
@@ -177,7 +177,9 @@ void xen_mc_flush(void)
        }
        b->cbidx = 0;
 
-       BUG_ON(ret);
+       local_irq_restore(flags);
+
+       WARN_ON(ret);
 }
 
 struct multicall_space __xen_mc_entry(size_t args)
@@ -197,6 +199,9 @@ struct multicall_space __xen_mc_entry(size_t args)
        }
 
        ret.mc = &b->entries[b->mcidx];
+#ifdef MC_DEBUG
+       b->caller[b->mcidx] = __builtin_return_address(0);
+#endif
        b->mcidx++;
        ret.args = &b->args[argidx];
        b->argidx = argidx + args;
index fa3e10725d985021d55f9f44beaff3cd05c0ad7a..9e565da5d1f730f50d41fca98f4fbb86c69b5f29 100644 (file)
@@ -41,7 +41,7 @@ static inline void xen_mc_issue(unsigned mode)
                xen_mc_flush();
 
        /* restore flags saved in xen_mc_batch */
-       local_irq_restore(x86_read_percpu(xen_mc_irq_flags));
+       local_irq_restore(percpu_read(xen_mc_irq_flags));
 }
 
 /* Set up a callback to be called when the current batch is flushed */
index c44e2069c7c78078ab7ee895acf817689de48443..8d470562ffc9f632fda68855f80a9c90c116de5e 100644 (file)
@@ -50,11 +50,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
  */
 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
 {
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_resched_count++;
-#else
-       add_pda(irq_resched_count, 1);
-#endif
+       inc_irq_stat(irq_resched_count);
 
        return IRQ_HANDLED;
 }
@@ -78,7 +74,7 @@ static __cpuinit void cpu_bringup(void)
        xen_setup_cpu_clockevents();
 
        cpu_set(cpu, cpu_online_map);
-       x86_write_percpu(cpu_state, CPU_ONLINE);
+       percpu_write(cpu_state, CPU_ONLINE);
        wmb();
 
        /* We can take interrupts now: we're officially "up". */
@@ -174,7 +170,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
 
        /* We've switched to the "real" per-cpu gdt, so make sure the
           old memory can be recycled */
-       make_lowmem_page_readwrite(&per_cpu_var(gdt_page));
+       make_lowmem_page_readwrite(xen_initial_gdt);
 
        xen_setup_vcpu_info_placement();
 }
@@ -223,6 +219,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
 {
        struct vcpu_guest_context *ctxt;
        struct desc_struct *gdt;
+       unsigned long gdt_mfn;
 
        if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
                return 0;
@@ -239,6 +236,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        ctxt->user_regs.ss = __KERNEL_DS;
 #ifdef CONFIG_X86_32
        ctxt->user_regs.fs = __KERNEL_PERCPU;
+#else
+       ctxt->gs_base_kernel = per_cpu_offset(cpu);
 #endif
        ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
        ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
@@ -250,9 +249,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        ctxt->ldt_ents = 0;
 
        BUG_ON((unsigned long)gdt & ~PAGE_MASK);
+
+       gdt_mfn = arbitrary_virt_to_mfn(gdt);
        make_lowmem_page_readonly(gdt);
+       make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
 
-       ctxt->gdt_frames[0] = virt_to_mfn(gdt);
+       ctxt->gdt_frames[0] = gdt_mfn;
        ctxt->gdt_ents      = GDT_ENTRIES;
 
        ctxt->user_regs.cs = __KERNEL_CS;
@@ -283,23 +285,14 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
        struct task_struct *idle = idle_task(cpu);
        int rc;
 
-#ifdef CONFIG_X86_64
-       /* Allocate node local memory for AP pdas */
-       WARN_ON(cpu == 0);
-       if (cpu > 0) {
-               rc = get_local_pda(cpu);
-               if (rc)
-                       return rc;
-       }
-#endif
-
-#ifdef CONFIG_X86_32
-       init_gdt(cpu);
        per_cpu(current_task, cpu) = idle;
+#ifdef CONFIG_X86_32
        irq_ctx_init(cpu);
 #else
-       cpu_pda(cpu)->pcurrent = idle;
        clear_tsk_thread_flag(idle, TIF_FORK);
+       per_cpu(kernel_stack, cpu) =
+               (unsigned long)task_stack_page(idle) -
+               KERNEL_STACK_OFFSET + THREAD_SIZE;
 #endif
        xen_setup_timer(cpu);
        xen_init_lock_cpu(cpu);
@@ -445,11 +438,7 @@ static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
 {
        irq_enter();
        generic_smp_call_function_interrupt();
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_call_count++;
-#else
-       add_pda(irq_call_count, 1);
-#endif
+       inc_irq_stat(irq_call_count);
        irq_exit();
 
        return IRQ_HANDLED;
@@ -459,11 +448,7 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
 {
        irq_enter();
        generic_smp_call_function_single_interrupt();
-#ifdef CONFIG_X86_32
-       __get_cpu_var(irq_stat).irq_call_count++;
-#else
-       add_pda(irq_call_count, 1);
-#endif
+       inc_irq_stat(irq_call_count);
        irq_exit();
 
        return IRQ_HANDLED;
index 212ffe012b76656231834f2a9026db28d289e080..95be7b434724c5067214dc416367633c522a69bb 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/page.h>
+#include <asm/fixmap.h>
 
 #include "xen-ops.h"
 #include "mmu.h"
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
new file mode 100644 (file)
index 0000000..79d7362
--- /dev/null
@@ -0,0 +1,142 @@
+/*
+ * Asm versions of Xen pv-ops, suitable for either direct use or
+ * inlining.  The inline versions are the same as the direct-use
+ * versions, with the pre- and post-amble chopped off.
+ *
+ * This code is encoded for size rather than absolute efficiency, with
+ * a view to being able to inline as much as possible.
+ *
+ * We only bother with direct forms (ie, vcpu in percpu data) of the
+ * operations here; the indirect forms are better handled in C, since
+ * they're generally too large to inline anyway.
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/percpu.h>
+#include <asm/processor-flags.h>
+
+#include "xen-asm.h"
+
+/*
+ * Enable events.  This clears the event mask and tests the pending
+ * event status with one and operation.  If there are pending events,
+ * then enter the hypervisor to get them handled.
+ */
+ENTRY(xen_irq_enable_direct)
+       /* Unmask events */
+       movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+
+       /*
+        * Preempt here doesn't matter because that will deal with any
+        * pending interrupts.  The pending check may end up being run
+        * on the wrong CPU, but that doesn't hurt.
+        */
+
+       /* Test for pending */
+       testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
+       jz 1f
+
+2:     call check_events
+1:
+ENDPATCH(xen_irq_enable_direct)
+       ret
+       ENDPROC(xen_irq_enable_direct)
+       RELOC(xen_irq_enable_direct, 2b+1)
+
+
+/*
+ * Disabling events is simply a matter of making the event mask
+ * non-zero.
+ */
+ENTRY(xen_irq_disable_direct)
+       movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+ENDPATCH(xen_irq_disable_direct)
+       ret
+       ENDPROC(xen_irq_disable_direct)
+       RELOC(xen_irq_disable_direct, 0)
+
+/*
+ * (xen_)save_fl is used to get the current interrupt enable status.
+ * Callers expect the status to be in X86_EFLAGS_IF, and other bits
+ * may be set in the return value.  We take advantage of this by
+ * making sure that X86_EFLAGS_IF has the right value (and other bits
+ * in that byte are 0), but other bits in the return value are
+ * undefined.  We need to toggle the state of the bit, because Xen and
+ * x86 use opposite senses (mask vs enable).
+ */
+ENTRY(xen_save_fl_direct)
+       testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+       setz %ah
+       addb %ah, %ah
+ENDPATCH(xen_save_fl_direct)
+       ret
+       ENDPROC(xen_save_fl_direct)
+       RELOC(xen_save_fl_direct, 0)
+
+
+/*
+ * In principle the caller should be passing us a value return from
+ * xen_save_fl_direct, but for robustness sake we test only the
+ * X86_EFLAGS_IF flag rather than the whole byte. After setting the
+ * interrupt mask state, it checks for unmasked pending events and
+ * enters the hypervisor to get them delivered if so.
+ */
+ENTRY(xen_restore_fl_direct)
+#ifdef CONFIG_X86_64
+       testw $X86_EFLAGS_IF, %di
+#else
+       testb $X86_EFLAGS_IF>>8, %ah
+#endif
+       setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+       /*
+        * Preempt here doesn't matter because that will deal with any
+        * pending interrupts.  The pending check may end up being run
+        * on the wrong CPU, but that doesn't hurt.
+        */
+
+       /* check for unmasked and pending */
+       cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
+       jz 1f
+2:     call check_events
+1:
+ENDPATCH(xen_restore_fl_direct)
+       ret
+       ENDPROC(xen_restore_fl_direct)
+       RELOC(xen_restore_fl_direct, 2b+1)
+
+
+/*
+ * Force an event check by making a hypercall, but preserve regs
+ * before making the call.
+ */
+check_events:
+#ifdef CONFIG_X86_32
+       push %eax
+       push %ecx
+       push %edx
+       call xen_force_evtchn_callback
+       pop %edx
+       pop %ecx
+       pop %eax
+#else
+       push %rax
+       push %rcx
+       push %rdx
+       push %rsi
+       push %rdi
+       push %r8
+       push %r9
+       push %r10
+       push %r11
+       call xen_force_evtchn_callback
+       pop %r11
+       pop %r10
+       pop %r9
+       pop %r8
+       pop %rdi
+       pop %rsi
+       pop %rdx
+       pop %rcx
+       pop %rax
+#endif
+       ret
diff --git a/arch/x86/xen/xen-asm.h b/arch/x86/xen/xen-asm.h
new file mode 100644 (file)
index 0000000..4652764
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _XEN_XEN_ASM_H
+#define _XEN_XEN_ASM_H
+
+#include <linux/linkage.h>
+
+#define RELOC(x, v)    .globl x##_reloc; x##_reloc=v
+#define ENDPATCH(x)    .globl x##_end; x##_end=.
+
+/* Pseudo-flag used for virtual NMI, which we don't implement yet */
+#define XEN_EFLAGS_NMI 0x80000000
+
+#endif
index 42786f59d9c0016f84413e61d8312ffae63c0a6c..88e15deb8b8282744a62702790546f9795bfe4c1 100644 (file)
 /*
-       Asm versions of Xen pv-ops, suitable for either direct use or inlining.
      The inline versions are the same as the direct-use versions, with the
      pre- and post-amble chopped off.
-
-       This code is encoded for size rather than absolute efficiency,
      with a view to being able to inline as much as possible.
-
-       We only bother with direct forms (ie, vcpu in pda) of the operations
      here; the indirect forms are better handled in C, since they're
      generally too large to inline anyway.
+ * Asm versions of Xen pv-ops, suitable for either direct use or
* inlining.  The inline versions are the same as the direct-use
* versions, with the pre- and post-amble chopped off.
+ *
+ * This code is encoded for size rather than absolute efficiency, with
* a view to being able to inline as much as possible.
+ *
+ * We only bother with direct forms (ie, vcpu in pda) of the
* operations here; the indirect forms are better handled in C, since
* they're generally too large to inline anyway.
  */
 
-#include <linux/linkage.h>
-
-#include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
-#include <asm/percpu.h>
 #include <asm/processor-flags.h>
 #include <asm/segment.h>
 
 #include <xen/interface/xen.h>
 
-#define RELOC(x, v)    .globl x##_reloc; x##_reloc=v
-#define ENDPATCH(x)    .globl x##_end; x##_end=.
-
-/* Pseudo-flag used for virtual NMI, which we don't implement yet */
-#define XEN_EFLAGS_NMI 0x80000000
-
-/*
-       Enable events.  This clears the event mask and tests the pending
-       event status with one and operation.  If there are pending
-       events, then enter the hypervisor to get them handled.
- */
-ENTRY(xen_irq_enable_direct)
-       /* Unmask events */
-       movb $0, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
-
-       /* Preempt here doesn't matter because that will deal with
-          any pending interrupts.  The pending check may end up being
-          run on the wrong CPU, but that doesn't hurt. */
-
-       /* Test for pending */
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
-       jz 1f
-
-2:     call check_events
-1:
-ENDPATCH(xen_irq_enable_direct)
-       ret
-       ENDPROC(xen_irq_enable_direct)
-       RELOC(xen_irq_enable_direct, 2b+1)
-
-
-/*
-       Disabling events is simply a matter of making the event mask
-       non-zero.
- */
-ENTRY(xen_irq_disable_direct)
-       movb $1, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
-ENDPATCH(xen_irq_disable_direct)
-       ret
-       ENDPROC(xen_irq_disable_direct)
-       RELOC(xen_irq_disable_direct, 0)
+#include "xen-asm.h"
 
 /*
-       (xen_)save_fl is used to get the current interrupt enable status.
-       Callers expect the status to be in X86_EFLAGS_IF, and other bits
-       may be set in the return value.  We take advantage of this by
-       making sure that X86_EFLAGS_IF has the right value (and other bits
-       in that byte are 0), but other bits in the return value are
-       undefined.  We need to toggle the state of the bit, because
-       Xen and x86 use opposite senses (mask vs enable).
+ * Force an event check by making a hypercall, but preserve regs
+ * before making the call.
  */
-ENTRY(xen_save_fl_direct)
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
-       setz %ah
-       addb %ah,%ah
-ENDPATCH(xen_save_fl_direct)
-       ret
-       ENDPROC(xen_save_fl_direct)
-       RELOC(xen_save_fl_direct, 0)
-
-
-/*
-       In principle the caller should be passing us a value return
-       from xen_save_fl_direct, but for robustness sake we test only
-       the X86_EFLAGS_IF flag rather than the whole byte. After
-       setting the interrupt mask state, it checks for unmasked
-       pending events and enters the hypervisor to get them delivered
-       if so.
- */
-ENTRY(xen_restore_fl_direct)
-       testb $X86_EFLAGS_IF>>8, %ah
-       setz PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_mask
-       /* Preempt here doesn't matter because that will deal with
-          any pending interrupts.  The pending check may end up being
-          run on the wrong CPU, but that doesn't hurt. */
-
-       /* check for unmasked and pending */
-       cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info)+XEN_vcpu_info_pending
-       jz 1f
-2:     call check_events
-1:
-ENDPATCH(xen_restore_fl_direct)
+check_events:
+       push %eax
+       push %ecx
+       push %edx
+       call xen_force_evtchn_callback
+       pop %edx
+       pop %ecx
+       pop %eax
        ret
-       ENDPROC(xen_restore_fl_direct)
-       RELOC(xen_restore_fl_direct, 2b+1)
 
 /*
      We can't use sysexit directly, because we're not running in ring0.
-       But we can easily fake it up using iret.  Assuming xen_sysexit
-       is jumped to with a standard stack frame, we can just strip it
      back to a standard iret frame and use iret.
* We can't use sysexit directly, because we're not running in ring0.
+ * But we can easily fake it up using iret.  Assuming xen_sysexit is
+ * jumped to with a standard stack frame, we can just strip it back to
* a standard iret frame and use iret.
  */
 ENTRY(xen_sysexit)
        movl PT_EAX(%esp), %eax                 /* Shouldn't be necessary? */
@@ -122,33 +48,31 @@ ENTRY(xen_sysexit)
 ENDPROC(xen_sysexit)
 
 /*
-       This is run where a normal iret would be run, with the same stack setup:
-             8: eflags
-             4: cs
-       esp-> 0: eip
-
-       This attempts to make sure that any pending events are dealt
-       with on return to usermode, but there is a small window in
-       which an event can happen just before entering usermode.  If
-       the nested interrupt ends up setting one of the TIF_WORK_MASK
-       pending work flags, they will not be tested again before
-       returning to usermode. This means that a process can end up
-       with pending work, which will be unprocessed until the process
-       enters and leaves the kernel again, which could be an
-       unbounded amount of time.  This means that a pending signal or
-       reschedule event could be indefinitely delayed.
-
-       The fix is to notice a nested interrupt in the critical
-       window, and if one occurs, then fold the nested interrupt into
-       the current interrupt stack frame, and re-process it
-       iteratively rather than recursively.  This means that it will
-       exit via the normal path, and all pending work will be dealt
-       with appropriately.
-
-       Because the nested interrupt handler needs to deal with the
-       current stack state in whatever form its in, we keep things
-       simple by only using a single register which is pushed/popped
-       on the stack.
+ * This is run where a normal iret would be run, with the same stack setup:
+ *     8: eflags
+ *     4: cs
+ *     esp-> 0: eip
+ *
+ * This attempts to make sure that any pending events are dealt with
+ * on return to usermode, but there is a small window in which an
+ * event can happen just before entering usermode.  If the nested
+ * interrupt ends up setting one of the TIF_WORK_MASK pending work
+ * flags, they will not be tested again before returning to
+ * usermode. This means that a process can end up with pending work,
+ * which will be unprocessed until the process enters and leaves the
+ * kernel again, which could be an unbounded amount of time.  This
+ * means that a pending signal or reschedule event could be
+ * indefinitely delayed.
+ *
+ * The fix is to notice a nested interrupt in the critical window, and
+ * if one occurs, then fold the nested interrupt into the current
+ * interrupt stack frame, and re-process it iteratively rather than
+ * recursively.  This means that it will exit via the normal path, and
+ * all pending work will be dealt with appropriately.
+ *
+ * Because the nested interrupt handler needs to deal with the current
+ * stack state in whatever form its in, we keep things simple by only
+ * using a single register which is pushed/popped on the stack.
  */
 ENTRY(xen_iret)
        /* test eflags for special cases */
@@ -158,13 +82,15 @@ ENTRY(xen_iret)
        push %eax
        ESP_OFFSET=4    # bytes pushed onto stack
 
-       /* Store vcpu_info pointer for easy access.  Do it this
-          way to avoid having to reload %fs */
+       /*
+        * Store vcpu_info pointer for easy access.  Do it this way to
+        * avoid having to reload %fs
+        */
 #ifdef CONFIG_SMP
        GET_THREAD_INFO(%eax)
-       movl TI_cpu(%eax),%eax
-       movl __per_cpu_offset(,%eax,4),%eax
-       mov per_cpu__xen_vcpu(%eax),%eax
+       movl TI_cpu(%eax), %eax
+       movl __per_cpu_offset(,%eax,4), %eax
+       mov per_cpu__xen_vcpu(%eax), %eax
 #else
        movl per_cpu__xen_vcpu, %eax
 #endif
@@ -172,37 +98,46 @@ ENTRY(xen_iret)
        /* check IF state we're restoring */
        testb $X86_EFLAGS_IF>>8, 8+1+ESP_OFFSET(%esp)
 
-       /* Maybe enable events.  Once this happens we could get a
-          recursive event, so the critical region starts immediately
-          afterwards.  However, if that happens we don't end up
-          resuming the code, so we don't have to be worried about
-          being preempted to another CPU. */
+       /*
+        * Maybe enable events.  Once this happens we could get a
+        * recursive event, so the critical region starts immediately
+        * afterwards.  However, if that happens we don't end up
+        * resuming the code, so we don't have to be worried about
+        * being preempted to another CPU.
+        */
        setz XEN_vcpu_info_mask(%eax)
 xen_iret_start_crit:
 
        /* check for unmasked and pending */
        cmpw $0x0001, XEN_vcpu_info_pending(%eax)
 
-       /* If there's something pending, mask events again so we
-          can jump back into xen_hypervisor_callback */
+       /*
+        * If there's something pending, mask events again so we can
+        * jump back into xen_hypervisor_callback
+        */
        sete XEN_vcpu_info_mask(%eax)
 
        popl %eax
 
-       /* From this point on the registers are restored and the stack
-          updated, so we don't need to worry about it if we're preempted */
+       /*
+        * From this point on the registers are restored and the stack
+        * updated, so we don't need to worry about it if we're
+        * preempted
+        */
 iret_restore_end:
 
-       /* Jump to hypervisor_callback after fixing up the stack.
-          Events are masked, so jumping out of the critical
-          region is OK. */
+       /*
+        * Jump to hypervisor_callback after fixing up the stack.
+        * Events are masked, so jumping out of the critical region is
+        * OK.
+        */
        je xen_hypervisor_callback
 
 1:     iret
 xen_iret_end_crit:
-.section __ex_table,"a"
+.section __ex_table, "a"
        .align 4
-       .long 1b,iret_exc
+       .long 1b, iret_exc
 .previous
 
 hyper_iret:
@@ -212,55 +147,55 @@ hyper_iret:
        .globl xen_iret_start_crit, xen_iret_end_crit
 
 /*
  This is called by xen_hypervisor_callback in entry.S when it sees
-   that the EIP at the time of interrupt was between xen_iret_start_crit
-   and xen_iret_end_crit.  We're passed the EIP in %eax so we can do
  a more refined determination of what to do.
-
  The stack format at this point is:
      ----------------
       ss             : (ss/esp may be present if we came from usermode)
       esp            :
       eflags         }  outer exception info
       cs             }
       eip            }
      ---------------- <- edi (copy dest)
       eax            :  outer eax if it hasn't been restored
      ----------------
       eflags         }  nested exception info
       cs             }   (no ss/esp because we're nested
       eip            }    from the same ring)
       orig_eax       }<- esi (copy src)
       - - - - - - - -
       fs             }
       es             }
       ds             }  SAVE_ALL state
       eax            }
        :             :
       ebx            }<- esp
      ----------------
-
  In order to deliver the nested exception properly, we need to shift
-   everything from the return addr up to the error code so it
  sits just under the outer exception info.  This means that when we
-   handle the exception, we do it in the context of the outer exception
  rather than starting a new one.
-
-   The only caveat is that if the outer eax hasn't been
-   restored yet (ie, it's still on stack), we need to insert
  its value into the SAVE_ALL state before going on, since
  it's usermode state which we eventually need to restore.
* This is called by xen_hypervisor_callback in entry.S when it sees
+ * that the EIP at the time of interrupt was between
+ * xen_iret_start_crit and xen_iret_end_crit.  We're passed the EIP in
* %eax so we can do a more refined determination of what to do.
+ *
* The stack format at this point is:
*     ----------------
*      ss             : (ss/esp may be present if we came from usermode)
*      esp            :
*      eflags         }  outer exception info
*      cs             }
*      eip            }
*     ---------------- <- edi (copy dest)
*      eax            :  outer eax if it hasn't been restored
*     ----------------
*      eflags         }  nested exception info
*      cs             }   (no ss/esp because we're nested
*      eip            }    from the same ring)
*      orig_eax       }<- esi (copy src)
*      - - - - - - - -
*      fs             }
*      es             }
*      ds             }  SAVE_ALL state
*      eax            }
*       :             :
*      ebx            }<- esp
*     ----------------
+ *
* In order to deliver the nested exception properly, we need to shift
+ * everything from the return addr up to the error code so it sits
* just under the outer exception info.  This means that when we
+ * handle the exception, we do it in the context of the outer
* exception rather than starting a new one.
+ *
+ * The only caveat is that if the outer eax hasn't been restored yet
+ * (ie, it's still on stack), we need to insert its value into the
* SAVE_ALL state before going on, since it's usermode state which we
* eventually need to restore.
  */
 ENTRY(xen_iret_crit_fixup)
        /*
-          Paranoia: Make sure we're really coming from kernel space.
-          One could imagine a case where userspace jumps into the
-          critical range address, but just before the CPU delivers a GP,
-          it decides to deliver an interrupt instead.  Unlikely?
-          Definitely.  Easy to avoid?  Yes.  The Intel documents
-          explicitly say that the reported EIP for a bad jump is the
-          jump instruction itself, not the destination, but some virtual
-          environments get this wrong.
+        * Paranoia: Make sure we're really coming from kernel space.
+        * One could imagine a case where userspace jumps into the
+        * critical range address, but just before the CPU delivers a
+        * GP, it decides to deliver an interrupt instead.  Unlikely?
+        * Definitely.  Easy to avoid?  Yes.  The Intel documents
+        * explicitly say that the reported EIP for a bad jump is the
+        * jump instruction itself, not the destination, but some
+        * virtual environments get this wrong.
         */
        movl PT_CS(%esp), %ecx
        andl $SEGMENT_RPL_MASK, %ecx
@@ -270,15 +205,17 @@ ENTRY(xen_iret_crit_fixup)
        lea PT_ORIG_EAX(%esp), %esi
        lea PT_EFLAGS(%esp), %edi
 
-       /* If eip is before iret_restore_end then stack
-          hasn't been restored yet. */
+       /*
+        * If eip is before iret_restore_end then stack
+        * hasn't been restored yet.
+        */
        cmp $iret_restore_end, %eax
        jae 1f
 
-       movl 0+4(%edi),%eax             /* copy EAX (just above top of frame) */
+       movl 0+4(%edi), %eax            /* copy EAX (just above top of frame) */
        movl %eax, PT_EAX(%esp)
 
-       lea ESP_OFFSET(%edi),%edi       /* move dest up over saved regs */
+       lea ESP_OFFSET(%edi), %edi      /* move dest up over saved regs */
 
        /* set up the copy */
 1:     std
@@ -286,20 +223,6 @@ ENTRY(xen_iret_crit_fixup)
        rep movsl
        cld
 
-       lea 4(%edi),%esp                /* point esp to new frame */
+       lea 4(%edi), %esp               /* point esp to new frame */
 2:     jmp xen_do_upcall
 
-
-/*
-       Force an event check by making a hypercall,
-       but preserve regs before making the call.
- */
-check_events:
-       push %eax
-       push %ecx
-       push %edx
-       call xen_force_evtchn_callback
-       pop %edx
-       pop %ecx
-       pop %eax
-       ret
index 05794c566e8799382f9a314970661d5ab232e4a8..02f496a8dbaa250d40f843456b8ac10fc183fe14 100644 (file)
 /*
-       Asm versions of Xen pv-ops, suitable for either direct use or inlining.
      The inline versions are the same as the direct-use versions, with the
      pre- and post-amble chopped off.
-
-       This code is encoded for size rather than absolute efficiency,
      with a view to being able to inline as much as possible.
-
-       We only bother with direct forms (ie, vcpu in pda) of the operations
      here; the indirect forms are better handled in C, since they're
      generally too large to inline anyway.
+ * Asm versions of Xen pv-ops, suitable for either direct use or
* inlining.  The inline versions are the same as the direct-use
* versions, with the pre- and post-amble chopped off.
+ *
+ * This code is encoded for size rather than absolute efficiency, with
* a view to being able to inline as much as possible.
+ *
+ * We only bother with direct forms (ie, vcpu in pda) of the
* operations here; the indirect forms are better handled in C, since
* they're generally too large to inline anyway.
  */
 
-#include <linux/linkage.h>
-
-#include <asm/asm-offsets.h>
-#include <asm/processor-flags.h>
 #include <asm/errno.h>
+#include <asm/percpu.h>
+#include <asm/processor-flags.h>
 #include <asm/segment.h>
 
 #include <xen/interface/xen.h>
 
-#define RELOC(x, v)    .globl x##_reloc; x##_reloc=v
-#define ENDPATCH(x)    .globl x##_end; x##_end=.
-
-/* Pseudo-flag used for virtual NMI, which we don't implement yet */
-#define XEN_EFLAGS_NMI 0x80000000
-
-#if 1
-/*
-       x86-64 does not yet support direct access to percpu variables
-       via a segment override, so we just need to make sure this code
-       never gets used
- */
-#define BUG                    ud2a
-#define PER_CPU_VAR(var, off)  0xdeadbeef
-#endif
-
-/*
-       Enable events.  This clears the event mask and tests the pending
-       event status with one and operation.  If there are pending
-       events, then enter the hypervisor to get them handled.
- */
-ENTRY(xen_irq_enable_direct)
-       BUG
-
-       /* Unmask events */
-       movb $0, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
-
-       /* Preempt here doesn't matter because that will deal with
-          any pending interrupts.  The pending check may end up being
-          run on the wrong CPU, but that doesn't hurt. */
-
-       /* Test for pending */
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
-       jz 1f
-
-2:     call check_events
-1:
-ENDPATCH(xen_irq_enable_direct)
-       ret
-       ENDPROC(xen_irq_enable_direct)
-       RELOC(xen_irq_enable_direct, 2b+1)
-
-/*
-       Disabling events is simply a matter of making the event mask
-       non-zero.
- */
-ENTRY(xen_irq_disable_direct)
-       BUG
-
-       movb $1, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
-ENDPATCH(xen_irq_disable_direct)
-       ret
-       ENDPROC(xen_irq_disable_direct)
-       RELOC(xen_irq_disable_direct, 0)
-
-/*
-       (xen_)save_fl is used to get the current interrupt enable status.
-       Callers expect the status to be in X86_EFLAGS_IF, and other bits
-       may be set in the return value.  We take advantage of this by
-       making sure that X86_EFLAGS_IF has the right value (and other bits
-       in that byte are 0), but other bits in the return value are
-       undefined.  We need to toggle the state of the bit, because
-       Xen and x86 use opposite senses (mask vs enable).
- */
-ENTRY(xen_save_fl_direct)
-       BUG
-
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
-       setz %ah
-       addb %ah,%ah
-ENDPATCH(xen_save_fl_direct)
-       ret
-       ENDPROC(xen_save_fl_direct)
-       RELOC(xen_save_fl_direct, 0)
-
-/*
-       In principle the caller should be passing us a value return
-       from xen_save_fl_direct, but for robustness sake we test only
-       the X86_EFLAGS_IF flag rather than the whole byte. After
-       setting the interrupt mask state, it checks for unmasked
-       pending events and enters the hypervisor to get them delivered
-       if so.
- */
-ENTRY(xen_restore_fl_direct)
-       BUG
-
-       testb $X86_EFLAGS_IF>>8, %ah
-       setz PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_mask)
-       /* Preempt here doesn't matter because that will deal with
-          any pending interrupts.  The pending check may end up being
-          run on the wrong CPU, but that doesn't hurt. */
-
-       /* check for unmasked and pending */
-       cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info, XEN_vcpu_info_pending)
-       jz 1f
-2:     call check_events
-1:
-ENDPATCH(xen_restore_fl_direct)
-       ret
-       ENDPROC(xen_restore_fl_direct)
-       RELOC(xen_restore_fl_direct, 2b+1)
-
-
-/*
-       Force an event check by making a hypercall,
-       but preserve regs before making the call.
- */
-check_events:
-       push %rax
-       push %rcx
-       push %rdx
-       push %rsi
-       push %rdi
-       push %r8
-       push %r9
-       push %r10
-       push %r11
-       call xen_force_evtchn_callback
-       pop %r11
-       pop %r10
-       pop %r9
-       pop %r8
-       pop %rdi
-       pop %rsi
-       pop %rdx
-       pop %rcx
-       pop %rax
-       ret
+#include "xen-asm.h"
 
 ENTRY(xen_adjust_exception_frame)
-       mov 8+0(%rsp),%rcx
-       mov 8+8(%rsp),%r11
+       mov 8+0(%rsp), %rcx
+       mov 8+8(%rsp), %r11
        ret $16
 
 hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
 /*
      Xen64 iret frame:
-
      ss
      rsp
      rflags
      cs
      rip             <-- standard iret frame
-
      flags
-
      rcx             }
      r11             }<-- pushed by hypercall page
-rsp -> rax             }
* Xen64 iret frame:
+ *
*     ss
*     rsp
*     rflags
*     cs
*     rip             <-- standard iret frame
+ *
*     flags
+ *
*     rcx             }
*     r11             }<-- pushed by hypercall page
+ * rsp->rax            }
  */
 ENTRY(xen_iret)
        pushq $0
@@ -177,8 +48,8 @@ ENDPATCH(xen_iret)
 RELOC(xen_iret, 1b+1)
 
 /*
-       sysexit is not used for 64-bit processes, so it's
      only ever used to return to 32-bit compat userspace.
+ * sysexit is not used for 64-bit processes, so it's only ever used to
* return to 32-bit compat userspace.
  */
 ENTRY(xen_sysexit)
        pushq $__USER32_DS
@@ -193,13 +64,15 @@ ENDPATCH(xen_sysexit)
 RELOC(xen_sysexit, 1b+1)
 
 ENTRY(xen_sysret64)
-       /* We're already on the usermode stack at this point, but still
-          with the kernel gs, so we can easily switch back */
-       movq %rsp, %gs:pda_oldrsp
-       movq %gs:pda_kernelstack,%rsp
+       /*
+        * We're already on the usermode stack at this point, but
+        * still with the kernel gs, so we can easily switch back
+        */
+       movq %rsp, PER_CPU_VAR(old_rsp)
+       movq PER_CPU_VAR(kernel_stack), %rsp
 
        pushq $__USER_DS
-       pushq %gs:pda_oldrsp
+       pushq PER_CPU_VAR(old_rsp)
        pushq %r11
        pushq $__USER_CS
        pushq %rcx
@@ -210,13 +83,15 @@ ENDPATCH(xen_sysret64)
 RELOC(xen_sysret64, 1b+1)
 
 ENTRY(xen_sysret32)
-       /* We're already on the usermode stack at this point, but still
-          with the kernel gs, so we can easily switch back */
-       movq %rsp, %gs:pda_oldrsp
-       movq %gs:pda_kernelstack, %rsp
+       /*
+        * We're already on the usermode stack at this point, but
+        * still with the kernel gs, so we can easily switch back
+        */
+       movq %rsp, PER_CPU_VAR(old_rsp)
+       movq PER_CPU_VAR(kernel_stack), %rsp
 
        pushq $__USER32_DS
-       pushq %gs:pda_oldrsp
+       pushq PER_CPU_VAR(old_rsp)
        pushq %r11
        pushq $__USER32_CS
        pushq %rcx
@@ -227,28 +102,27 @@ ENDPATCH(xen_sysret32)
 RELOC(xen_sysret32, 1b+1)
 
 /*
-       Xen handles syscall callbacks much like ordinary exceptions,
-       which means we have:
-        - kernel gs
-        - kernel rsp
-        - an iret-like stack frame on the stack (including rcx and r11):
-               ss
-               rsp
-               rflags
-               cs
-               rip
-               r11
-       rsp->   rcx
-
-       In all the entrypoints, we undo all that to make it look
-       like a CPU-generated syscall/sysenter and jump to the normal
-       entrypoint.
+ * Xen handles syscall callbacks much like ordinary exceptions, which
+ * means we have:
+ * - kernel gs
+ * - kernel rsp
+ * - an iret-like stack frame on the stack (including rcx and r11):
+ *     ss
+ *     rsp
+ *     rflags
+ *     cs
+ *     rip
+ *     r11
+ * rsp->rcx
+ *
+ * In all the entrypoints, we undo all that to make it look like a
+ * CPU-generated syscall/sysenter and jump to the normal entrypoint.
  */
 
 .macro undo_xen_syscall
-       mov 0*8(%rsp),%rcx
-       mov 1*8(%rsp),%r11
-       mov 5*8(%rsp),%rsp
+       mov 0*8(%rsp), %rcx
+       mov 1*8(%rsp), %r11
+       mov 5*8(%rsp), %rsp
 .endm
 
 /* Normal 64-bit system call target */
@@ -275,7 +149,7 @@ ENDPROC(xen_sysenter_target)
 
 ENTRY(xen_syscall32_target)
 ENTRY(xen_sysenter_target)
-       lea 16(%rsp), %rsp      /* strip %rcx,%r11 */
+       lea 16(%rsp), %rsp      /* strip %rcx, %r11 */
        mov $-ENOSYS, %rax
        pushq $VGCF_in_syscall
        jmp hypercall_iret
index 63d49a523ed307f9407e805ca507bf293ad0dfca..1a5ff24e29c0a645156abad8111cb99e8de2e68f 100644 (file)
@@ -8,7 +8,7 @@
 
 #include <asm/boot.h>
 #include <asm/asm.h>
-#include <asm/page.h>
+#include <asm/page_types.h>
 
 #include <xen/interface/elfnote.h>
 #include <asm/xen/interface.h>
index c1f8faf0a2c5a49d0676e24aaad161323bec6135..2f5ef2632ea23bc29d698d043738c1d90545d467 100644 (file)
 extern const char xen_hypervisor_callback[];
 extern const char xen_failsafe_callback[];
 
+extern void *xen_initial_gdt;
+
 struct trap_info;
 void xen_copy_trap_info(struct trap_info *traps);
 
+DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
 DECLARE_PER_CPU(unsigned long, xen_cr3);
 DECLARE_PER_CPU(unsigned long, xen_current_cr3);
 
@@ -22,6 +25,13 @@ extern struct shared_info *HYPERVISOR_shared_info;
 
 void xen_setup_mfn_list_list(void);
 void xen_setup_shared_info(void);
+void xen_setup_machphys_mapping(void);
+pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
+void xen_ident_map_ISA(void);
+void xen_reserve_top(void);
+
+void xen_leave_lazy(void);
+void xen_post_allocator_init(void);
 
 char * __init xen_memory_setup(void);
 void __init xen_arch_setup(void);
index f50b697eb601a20f8147a554d3d9f8d2d7c6a83e..226a3916231085f12eace8432d57b1770ca58853 100644 (file)
@@ -11,7 +11,7 @@
 #ifndef _XTENSA_SWAB_H
 #define _XTENSA_SWAB_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 #include <linux/compiler.h>
 
 #define __SWAB_64_THRU_32__
index 7cf9d1ff45a015e0d8fe7be2546c6fb7f95f590e..028120a0965aa9a723196d1f245012151459ae3a 100644 (file)
@@ -363,7 +363,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
        if (!bt->sequence)
                goto err;
 
-       bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG);
+       bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
        if (!bt->msg_data)
                goto err;
 
index c3e841f3cde924239ce34f94a75818cfc194a211..ab0aff3c7d6a3672bd7dd778853a77108e111528 100644 (file)
@@ -365,7 +365,7 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
 
 /*******************************************************************************
  *
- * FUNCTION:    acpi_get_table
+ * FUNCTION:    acpi_get_table_with_size
  *
  * PARAMETERS:  Signature           - ACPI signature of needed table
  *              Instance            - Which instance (for SSDTs)
@@ -377,8 +377,9 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
  *
  *****************************************************************************/
 acpi_status
-acpi_get_table(char *signature,
-              u32 instance, struct acpi_table_header **out_table)
+acpi_get_table_with_size(char *signature,
+              u32 instance, struct acpi_table_header **out_table,
+              acpi_size *tbl_size)
 {
        u32 i;
        u32 j;
@@ -408,6 +409,7 @@ acpi_get_table(char *signature,
                    acpi_tb_verify_table(&acpi_gbl_root_table_list.tables[i]);
                if (ACPI_SUCCESS(status)) {
                        *out_table = acpi_gbl_root_table_list.tables[i].pointer;
+                       *tbl_size = acpi_gbl_root_table_list.tables[i].length;
                }
 
                if (!acpi_gbl_permanent_mmap) {
@@ -420,6 +422,15 @@ acpi_get_table(char *signature,
        return (AE_NOT_FOUND);
 }
 
+acpi_status
+acpi_get_table(char *signature,
+              u32 instance, struct acpi_table_header **out_table)
+{
+       acpi_size tbl_size;
+
+       return acpi_get_table_with_size(signature,
+                      instance, out_table, &tbl_size);
+}
 ACPI_EXPORT_SYMBOL(acpi_get_table)
 
 /*******************************************************************************
index b3193ec0a2ef36eae859111a525a1d5931590b78..2b6c5902825437470d50e7e841a23d3c37b10d61 100644 (file)
@@ -272,14 +272,21 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 }
 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
 
-void acpi_os_unmap_memory(void __iomem * virt, acpi_size size)
+void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
 {
-       if (acpi_gbl_permanent_mmap) {
+       if (acpi_gbl_permanent_mmap)
                iounmap(virt);
-       }
+       else
+               __acpi_unmap_table(virt, size);
 }
 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
 
+void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
+{
+       if (!acpi_gbl_permanent_mmap)
+               __acpi_unmap_table(virt, size);
+}
+
 #ifdef ACPI_FUTURE_USAGE
 acpi_status
 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
index 9cc769b587ff7bbe061ae0c5ab5cc66d67e62ae5..68fd3d2927997efe09ced4d0df8120afa8adc7ce 100644 (file)
@@ -516,12 +516,12 @@ int acpi_processor_preregister_performance(
                        continue;
                }
 
-               if (!performance || !percpu_ptr(performance, i)) {
+               if (!performance || !per_cpu_ptr(performance, i)) {
                        retval = -EINVAL;
                        continue;
                }
 
-               pr->performance = percpu_ptr(performance, i);
+               pr->performance = per_cpu_ptr(performance, i);
                cpumask_set_cpu(i, pr->performance->shared_cpu_map);
                if (acpi_processor_get_psd(pr)) {
                        retval = -EINVAL;
index a8852952fac409de6550b467d5d49b4f7e5ace07..fec1ae36d4310cadca89ff36c2e3174480c765dc 100644 (file)
@@ -181,14 +181,15 @@ acpi_table_parse_entries(char *id,
        struct acpi_subtable_header *entry;
        unsigned int count = 0;
        unsigned long table_end;
+       acpi_size tbl_size;
 
        if (!handler)
                return -EINVAL;
 
        if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
-               acpi_get_table(id, acpi_apic_instance, &table_header);
+               acpi_get_table_with_size(id, acpi_apic_instance, &table_header, &tbl_size);
        else
-               acpi_get_table(id, 0, &table_header);
+               acpi_get_table_with_size(id, 0, &table_header, &tbl_size);
 
        if (!table_header) {
                printk(KERN_WARNING PREFIX "%4.4s not present\n", id);
@@ -206,8 +207,10 @@ acpi_table_parse_entries(char *id,
               table_end) {
                if (entry->type == entry_id
                    && (!max_entries || count++ < max_entries))
-                       if (handler(entry, table_end))
+                       if (handler(entry, table_end)) {
+                               early_acpi_os_unmap_memory((char *)table_header, tbl_size);
                                return -EINVAL;
+                       }
 
                entry = (struct acpi_subtable_header *)
                    ((unsigned long)entry + entry->length);
@@ -217,6 +220,7 @@ acpi_table_parse_entries(char *id,
                       "%i found\n", id, entry_id, count - max_entries, count);
        }
 
+       early_acpi_os_unmap_memory((char *)table_header, tbl_size);
        return count;
 }
 
@@ -241,17 +245,19 @@ acpi_table_parse_madt(enum acpi_madt_type id,
 int __init acpi_table_parse(char *id, acpi_table_handler handler)
 {
        struct acpi_table_header *table = NULL;
+       acpi_size tbl_size;
 
        if (!handler)
                return -EINVAL;
 
        if (strncmp(id, ACPI_SIG_MADT, 4) == 0)
-               acpi_get_table(id, acpi_apic_instance, &table);
+               acpi_get_table_with_size(id, acpi_apic_instance, &table, &tbl_size);
        else
-               acpi_get_table(id, 0, &table);
+               acpi_get_table_with_size(id, 0, &table, &tbl_size);
 
        if (table) {
                handler(table);
+               early_acpi_os_unmap_memory(table, tbl_size);
                return 0;
        } else
                return 1;
@@ -265,8 +271,9 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
 static void __init check_multiple_madt(void)
 {
        struct acpi_table_header *table = NULL;
+       acpi_size tbl_size;
 
-       acpi_get_table(ACPI_SIG_MADT, 2, &table);
+       acpi_get_table_with_size(ACPI_SIG_MADT, 2, &table, &tbl_size);
        if (table) {
                printk(KERN_WARNING PREFIX
                       "BIOS bug: multiple APIC/MADT found,"
@@ -275,6 +282,7 @@ static void __init check_multiple_madt(void)
                       "If \"acpi_apic_instance=%d\" works better, "
                       "notify linux-acpi@vger.kernel.org\n",
                       acpi_apic_instance ? 0 : 2);
+               early_acpi_os_unmap_memory(table, tbl_size);
 
        } else
                acpi_apic_instance = 0;
index 719ee5c1c8d94d97bfb4841ee9e82c1f9760ffda..5b257a57bc57ae60e369950b60126f3d43f43e04 100644 (file)
@@ -107,7 +107,7 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
 /*
  * Print cpu online, possible, present, and system maps
  */
-static ssize_t print_cpus_map(char *buf, cpumask_t *map)
+static ssize_t print_cpus_map(char *buf, const struct cpumask *map)
 {
        int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map);
 
index a778fb52b11f30f507edccca0301220d851344b0..bf6b13206d00cbb4dda735829b6df2f740b80fa9 100644 (file)
 #include <linux/hardirq.h>
 #include <linux/topology.h>
 
-#define define_one_ro(_name)           \
+#define define_one_ro_named(_name, _func)                              \
+static SYSDEV_ATTR(_name, 0444, _func, NULL)
+
+#define define_one_ro(_name)                           \
 static SYSDEV_ATTR(_name, 0444, show_##_name, NULL)
 
 #define define_id_show_func(name)                              \
@@ -42,8 +45,8 @@ static ssize_t show_##name(struct sys_device *dev,            \
        return sprintf(buf, "%d\n", topology_##name(cpu));      \
 }
 
-#if defined(topology_thread_siblings) || defined(topology_core_siblings)
-static ssize_t show_cpumap(int type, cpumask_t *mask, char *buf)
+#if defined(topology_thread_cpumask) || defined(topology_core_cpumask)
+static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
 {
        ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
        int n = 0;
@@ -65,7 +68,7 @@ static ssize_t show_##name(struct sys_device *dev,                    \
                           struct sysdev_attribute *attr, char *buf)    \
 {                                                                      \
        unsigned int cpu = dev->id;                                     \
-       return show_cpumap(0, &(topology_##name(cpu)), buf);            \
+       return show_cpumap(0, topology_##name(cpu), buf);               \
 }
 
 #define define_siblings_show_list(name)                                        \
@@ -74,7 +77,7 @@ static ssize_t show_##name##_list(struct sys_device *dev,             \
                                  char *buf)                            \
 {                                                                      \
        unsigned int cpu = dev->id;                                     \
-       return show_cpumap(1, &(topology_##name(cpu)), buf);            \
+       return show_cpumap(1, topology_##name(cpu), buf);               \
 }
 
 #else
@@ -82,9 +85,7 @@ static ssize_t show_##name##_list(struct sys_device *dev,             \
 static ssize_t show_##name(struct sys_device *dev,                     \
                           struct sysdev_attribute *attr, char *buf)    \
 {                                                                      \
-       unsigned int cpu = dev->id;                                     \
-       cpumask_t mask = topology_##name(cpu);                          \
-       return show_cpumap(0, &mask, buf);                              \
+       return show_cpumap(0, topology_##name(dev->id), buf);           \
 }
 
 #define define_siblings_show_list(name)                                        \
@@ -92,9 +93,7 @@ static ssize_t show_##name##_list(struct sys_device *dev,             \
                                  struct sysdev_attribute *attr,        \
                                  char *buf)                            \
 {                                                                      \
-       unsigned int cpu = dev->id;                                     \
-       cpumask_t mask = topology_##name(cpu);                          \
-       return show_cpumap(1, &mask, buf);                              \
+       return show_cpumap(1, topology_##name(dev->id), buf);           \
 }
 #endif
 
@@ -107,13 +106,13 @@ define_one_ro(physical_package_id);
 define_id_show_func(core_id);
 define_one_ro(core_id);
 
-define_siblings_show_func(thread_siblings);
-define_one_ro(thread_siblings);
-define_one_ro(thread_siblings_list);
+define_siblings_show_func(thread_cpumask);
+define_one_ro_named(thread_siblings, show_thread_cpumask);
+define_one_ro_named(thread_siblings_list, show_thread_cpumask_list);
 
-define_siblings_show_func(core_siblings);
-define_one_ro(core_siblings);
-define_one_ro(core_siblings_list);
+define_siblings_show_func(core_cpumask);
+define_one_ro_named(core_siblings, show_core_cpumask);
+define_one_ro_named(core_siblings_list, show_core_cpumask_list);
 
 static struct attribute *default_attrs[] = {
        &attr_physical_package_id.attr,
index e1129fad96dd2c75a20cc283dc6e53045b3a553d..ee19b6e8fcb4545a4bf14f7d433141db217a0434 100644 (file)
@@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
 #endif
 
 #ifndef CONFIG_X86_64
-#include "mach_timer.h"
+#include <asm/mach_timer.h>
 #define PMTMR_EXPECTED_RATE \
   ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
 /*
index 1bde303b970ba2d008813d4dfd67d48ffeb5ceff..8615059a872915cb0f7bfadc3d348a1dbdd5aee5 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/pgtable.h>
 #include <asm/io.h>
 
-#include "mach_timer.h"
+#include <asm/mach_timer.h>
 
 #define CYCLONE_CBAR_ADDR      0xFEB00CD0      /* base address ptr */
 #define CYCLONE_PMCC_OFFSET    0x51A0          /* offset to control register */
index c0646576cf47cf937b8e47b6fc0ec5daecd93577..2705284f622334a735f7c083566872261df95fbd 100644 (file)
@@ -3,7 +3,7 @@
 #
 config EISA_VLB_PRIMING
        bool "Vesa Local Bus priming"
-       depends on X86_PC && EISA
+       depends on X86 && EISA
        default n
        ---help---
          Activate this option if your system contains a Vesa Local
@@ -24,11 +24,11 @@ config EISA_PCI_EISA
          When in doubt, say Y.
 
 # Using EISA_VIRTUAL_ROOT on something other than an Alpha or
-# an X86_PC may lead to crashes...
+# an X86 may lead to crashes...
 
 config EISA_VIRTUAL_ROOT
        bool "EISA virtual root device"
-       depends on EISA && (ALPHA || X86_PC)
+       depends on EISA && (ALPHA || X86)
        default y
        ---help---
          Activate this option if your system only have EISA bus
index 777fba48d2d3ddbf69343b9abd0c3aa8351ab9d9..3009e0171e54958f2c99ea3b568d4619ff237b7c 100644 (file)
@@ -244,7 +244,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev,
  */
 int dcdbas_smi_request(struct smi_cmd *smi_cmd)
 {
-       cpumask_t old_mask;
+       cpumask_var_t old_mask;
        int ret = 0;
 
        if (smi_cmd->magic != SMI_CMD_MAGIC) {
@@ -254,8 +254,11 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
        }
 
        /* SMI requires CPU 0 */
-       old_mask = current->cpus_allowed;
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
+       if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       cpumask_copy(old_mask, &current->cpus_allowed);
+       set_cpus_allowed_ptr(current, cpumask_of(0));
        if (smp_processor_id() != 0) {
                dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
                        __func__);
@@ -275,7 +278,8 @@ int dcdbas_smi_request(struct smi_cmd *smi_cmd)
        );
 
 out:
-       set_cpus_allowed_ptr(current, &old_mask);
+       set_cpus_allowed_ptr(current, old_mask);
+       free_cpumask_var(old_mask);
        return ret;
 }
 
index 3ab3e4a41d670f48af0583c841dae8a6d040748f..7b7ddc2d51c965fe6d9c6b7519f94e92074ff536 100644 (file)
@@ -938,8 +938,8 @@ static int __init ibft_init(void)
                return -ENOMEM;
 
        if (ibft_addr) {
-               printk(KERN_INFO "iBFT detected at 0x%lx.\n",
-                      virt_to_phys((void *)ibft_addr));
+               printk(KERN_INFO "iBFT detected at 0x%llx.\n",
+                      (u64)virt_to_phys((void *)ibft_addr));
 
                rc = ibft_check_device();
                if (rc)
index 8df849f66830fbd50bc371fb13abf04360afa914..b756f043a5f4d19fbf58b625b95b4d8a6a2b4cae 100644 (file)
@@ -678,9 +678,9 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
        *start = &buf[offset];
        *eof = 0;
 
-       DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
+       DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n",
                       atomic_read(&dev->vma_count),
-                      high_memory, virt_to_phys(high_memory));
+                      high_memory, (u64)virt_to_phys(high_memory));
        list_for_each_entry(pt, &dev->vmalist, head) {
                if (!(vma = pt->vma))
                        continue;
index b84bf066879b6c177bde4efd79aa2bde480faa60..b4eea0292c1a240cc8d7c71cfd6772baaa51537b 100644 (file)
@@ -543,8 +543,8 @@ config SENSORS_LM90
        help
          If you say yes here you get support for National Semiconductor LM90,
          LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, and Maxim
-         MAX6646, MAX6647, MAX6649, MAX6657, MAX6658, MAX6659, MAX6680 and
-         MAX6681 sensor chips.
+         MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
+         MAX6680, MAX6681 and MAX6692 sensor chips.
 
          This driver can also be built as a module.  If so, the module
          will be called lm90.
index e52b38806d034f4ac835bf626907d933f368a3de..ad2b3431b7253091505e726a72d4857b6f708dde 100644 (file)
@@ -760,8 +760,11 @@ static int abituguru3_read_increment_offset(struct abituguru3_data *data,
 
        for (i = 0; i < offset_count; i++)
                if ((x = abituguru3_read(data, bank, offset + i, count,
-                               buf + i * count)) != count)
-                       return i * count + (i && (x < 0)) ? 0 : x;
+                               buf + i * count)) != count) {
+                       if (x < 0)
+                               return x;
+                       return i * count + x;
+               }
 
        return i * count;
 }
index 1692de36996966d18e36236f3d5e2a3179a75211..18a1ba888165adf489e38189e78d459b1cec6688 100644 (file)
@@ -617,7 +617,7 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
 static int f75375_probe(struct i2c_client *client,
                const struct i2c_device_id *id)
 {
-       struct f75375_data *data = i2c_get_clientdata(client);
+       struct f75375_data *data;
        struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data;
        int err;
 
index 95a99c590da22817e2e1abc1e02afe0dde3ec2f2..9157247fed8e6062877c4428571bb75385e2227b 100644 (file)
@@ -213,7 +213,7 @@ static inline u16 FAN16_TO_REG(long rpm)
 
 #define TEMP_TO_REG(val) (SENSORS_LIMIT(((val)<0?(((val)-500)/1000):\
                                        ((val)+500)/1000),-128,127))
-#define TEMP_FROM_REG(val) (((val)>0x80?(val)-0x100:(val))*1000)
+#define TEMP_FROM_REG(val) ((val) * 1000)
 
 #define PWM_TO_REG(val)   ((val) >> 1)
 #define PWM_FROM_REG(val) (((val)&0x7f) << 1)
@@ -267,9 +267,9 @@ struct it87_data {
        u8 has_fan;             /* Bitfield, fans enabled */
        u16 fan[5];             /* Register values, possibly combined */
        u16 fan_min[5];         /* Register values, possibly combined */
-       u8 temp[3];             /* Register value */
-       u8 temp_high[3];        /* Register value */
-       u8 temp_low[3];         /* Register value */
+       s8 temp[3];             /* Register value */
+       s8 temp_high[3];        /* Register value */
+       s8 temp_low[3];         /* Register value */
        u8 sensor;              /* Register value */
        u8 fan_div[3];          /* Register encoding, shifted right */
        u8 vid;                 /* Register encoding, combined */
index 96a7018667261a947c913ab667433facc35d7782..1aff7575799d57d2ad5837c54c46bd8d5a65fd1a 100644 (file)
  * supported by this driver. These chips lack the remote temperature
  * offset feature.
  *
- * This driver also supports the MAX6646, MAX6647 and MAX6649 chips
- * made by Maxim.  These are again similar to the LM86, but they use
- * unsigned temperature values and can report temperatures from 0 to
- * 145 degrees.
+ * This driver also supports the MAX6646, MAX6647, MAX6648, MAX6649 and
+ * MAX6692 chips made by Maxim.  These are again similar to the LM86,
+ * but they use unsigned temperature values and can report temperatures
+ * from 0 to 145 degrees.
  *
  * This driver also supports the MAX6680 and MAX6681, two other sensor
  * chips made by Maxim. These are quite similar to the other Maxim
index a01b4488208b20c97a8b0317ecc0b3777a3d5577..4a65b96db2c8f07ea8de3a286155bc3e0cfce6c7 100644 (file)
@@ -2490,12 +2490,14 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
        int ret = 0;
        struct nes_vnic *nesvnic;
        struct nes_device *nesdev;
+       struct nes_ib_device *nesibdev;
 
        nesvnic = to_nesvnic(nesqp->ibqp.device);
        if (!nesvnic)
                return -EINVAL;
 
        nesdev = nesvnic->nesdev;
+       nesibdev = nesvnic->nesibdev;
 
        nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
                        atomic_read(&nesvnic->netdev->refcnt));
@@ -2507,6 +2509,8 @@ static int nes_disconnect(struct nes_qp *nesqp, int abrupt)
        } else {
                /* Need to free the Last Streaming Mode Message */
                if (nesqp->ietf_frame) {
+                       if (nesqp->lsmm_mr)
+                               nesibdev->ibdev.dereg_mr(nesqp->lsmm_mr);
                        pci_free_consistent(nesdev->pcidev,
                                        nesqp->private_data_len+sizeof(struct ietf_mpa_frame),
                                        nesqp->ietf_frame, nesqp->ietf_frame_pbase);
@@ -2543,6 +2547,12 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        u32 crc_value;
        int ret;
        int passive_state;
+       struct nes_ib_device *nesibdev;
+       struct ib_mr *ibmr = NULL;
+       struct ib_phys_buf ibphysbuf;
+       struct nes_pd *nespd;
+
+
 
        ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
        if (!ibqp)
@@ -2601,6 +2611,26 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        if (cm_id->remote_addr.sin_addr.s_addr !=
                        cm_id->local_addr.sin_addr.s_addr) {
                u64temp = (unsigned long)nesqp;
+               nesibdev = nesvnic->nesibdev;
+               nespd = nesqp->nespd;
+               ibphysbuf.addr = nesqp->ietf_frame_pbase;
+               ibphysbuf.size = conn_param->private_data_len +
+                                       sizeof(struct ietf_mpa_frame);
+               ibmr = nesibdev->ibdev.reg_phys_mr((struct ib_pd *)nespd,
+                                               &ibphysbuf, 1,
+                                               IB_ACCESS_LOCAL_WRITE,
+                                               (u64 *)&nesqp->ietf_frame);
+               if (!ibmr) {
+                       nes_debug(NES_DBG_CM, "Unable to register memory region"
+                                       "for lSMM for cm_node = %p \n",
+                                       cm_node);
+                       return -ENOMEM;
+               }
+
+               ibmr->pd = &nespd->ibpd;
+               ibmr->device = nespd->ibpd.device;
+               nesqp->lsmm_mr = ibmr;
+
                u64temp |= NES_SW_CONTEXT_ALIGN>>1;
                set_wqe_64bit_value(wqe->wqe_words,
                        NES_IWARP_SQ_WQE_COMP_CTX_LOW_IDX,
@@ -2611,14 +2641,13 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
                wqe->wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX] =
                        cpu_to_le32(conn_param->private_data_len +
                        sizeof(struct ietf_mpa_frame));
-               wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_LOW_IDX] =
-                       cpu_to_le32((u32)nesqp->ietf_frame_pbase);
-               wqe->wqe_words[NES_IWARP_SQ_WQE_FRAG0_HIGH_IDX] =
-                       cpu_to_le32((u32)((u64)nesqp->ietf_frame_pbase >> 32));
+               set_wqe_64bit_value(wqe->wqe_words,
+                                       NES_IWARP_SQ_WQE_FRAG0_LOW_IDX,
+                                       (u64)nesqp->ietf_frame);
                wqe->wqe_words[NES_IWARP_SQ_WQE_LENGTH0_IDX] =
                        cpu_to_le32(conn_param->private_data_len +
                        sizeof(struct ietf_mpa_frame));
-               wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = 0;
+               wqe->wqe_words[NES_IWARP_SQ_WQE_STAG0_IDX] = ibmr->lkey;
 
                nesqp->nesqp_context->ird_ord_sizes |=
                        cpu_to_le32(NES_QPCONTEXT_ORDIRD_LSMM_PRESENT |
index 4fdb72454f94f493045e56ce4a289fe01ad45266..d93a6562817ce2e0bfb27caeef26024544731971 100644 (file)
@@ -1360,8 +1360,10 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
                                        NES_QPCONTEXT_MISC_RQ_SIZE_SHIFT);
                        nesqp->nesqp_context->misc |= cpu_to_le32((u32)nesqp->hwqp.sq_encoded_size <<
                                        NES_QPCONTEXT_MISC_SQ_SIZE_SHIFT);
+                       if (!udata) {
                                nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_PRIV_EN);
                                nesqp->nesqp_context->misc |= cpu_to_le32(NES_QPCONTEXT_MISC_FAST_REGISTER_EN);
+                       }
                        nesqp->nesqp_context->cqs = cpu_to_le32(nesqp->nesscq->hw_cq.cq_number +
                                        ((u32)nesqp->nesrcq->hw_cq.cq_number << 16));
                        u64temp = (u64)nesqp->hwqp.sq_pbase;
index 6c6b4da5184f9aee7bdb4600a69d887162cbf873..ae0ca9bc83bd9aae7a8fd83dd4c9165939af051e 100644 (file)
@@ -134,6 +134,7 @@ struct nes_qp {
        struct ietf_mpa_frame *ietf_frame;
        dma_addr_t            ietf_frame_pbase;
        wait_queue_head_t     state_waitq;
+       struct ib_mr          *lsmm_mr;
        unsigned long         socket;
        struct nes_hw_qp      hwqp;
        struct work_struct    work;
index 35561689ff38717f60ddc588493d7d02206f2008..ea2638b4198226dfc7d2d69c87a387eea1df1df6 100644 (file)
@@ -13,11 +13,11 @@ menuconfig INPUT_KEYBOARD
 if INPUT_KEYBOARD
 
 config KEYBOARD_ATKBD
-       tristate "AT keyboard" if EMBEDDED || !X86_PC
+       tristate "AT keyboard" if EMBEDDED || !X86
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86_PC
+       select SERIO_I8042 if X86
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you want to use a standard AT or PS/2 keyboard. Usually
index 9705f3a00a3d944a5e3883e3cdf2fce4de20a2e8..4f38e6f7dfdd6db154b5d6ecc531a0f55e802019 100644 (file)
@@ -17,7 +17,7 @@ config MOUSE_PS2
        default y
        select SERIO
        select SERIO_LIBPS2
-       select SERIO_I8042 if X86_PC
+       select SERIO_I8042 if X86
        select SERIO_GSCPS2 if GSC
        help
          Say Y here if you have a PS/2 mouse connected to your system. This
index 76f2b36881c3b43871533399cc242b4dfd24cd43..a3d3cbab359a8b1ae9bea16ecf59b2caef0f3212 100644 (file)
@@ -1,6 +1,6 @@
 config LGUEST
        tristate "Linux hypervisor example code"
-       depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX && !X86_VOYAGER
+       depends on X86_32 && EXPERIMENTAL && !X86_PAE && FUTEX
        select HVC_DRIVER
        ---help---
          This is a very simple module which allows you to run
index 84d5ea1ec17120e9c4d8d94b6a2c9376ac241449..b457a05b28d9621b61352f68969c043c603175f1 100644 (file)
@@ -1383,6 +1383,11 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
                        wm8350->power.rev_g_coeff = 1;
                        break;
 
+               case 1:
+                       dev_info(wm8350->dev, "WM8351 Rev B\n");
+                       wm8350->power.rev_g_coeff = 1;
+                       break;
+
                default:
                        dev_err(wm8350->dev, "Unknown WM8351 CHIP_REV\n");
                        ret = -ENODEV;
index c64e6798878a529035154426c3db9af0d587bdb3..1c484084ed4f874c78cf7ccbf555350148a2a00c 100644 (file)
@@ -162,7 +162,7 @@ config ENCLOSURE_SERVICES
 config SGI_XP
        tristate "Support communication between SGI SSIs"
        depends on NET
-       depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_64) && SMP
+       depends on (IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || X86_UV) && SMP
        select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
        select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2
        select SGI_GRU if (IA64_GENERIC || IA64_SGI_UV || X86_64) && SMP
@@ -189,7 +189,7 @@ config HP_ILO
 
 config SGI_GRU
        tristate "SGI GRU driver"
-       depends on (X86_64 || IA64_SGI_UV || IA64_GENERIC) && SMP
+       depends on (X86_UV || IA64_SGI_UV || IA64_GENERIC) && SMP
        default n
        select MMU_NOTIFIER
        ---help---
index 6509838063924071cfdcfce11cf1d425b2ad170a..c67e4e8bd62c5fd83236ff97fb0961a82db189d2 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/proc_fs.h>
 #include <linux/uaccess.h>
+#include <asm/uv/uv.h>
 #include "gru.h"
 #include "grulib.h"
 #include "grutables.h"
 
-#if defined CONFIG_X86_64
-#include <asm/genapic.h>
-#include <asm/irq.h>
-#define IS_UV()                is_uv_system()
-#elif defined CONFIG_IA64
-#include <asm/system.h>
-#include <asm/sn/simulator.h>
-/* temp support for running on hardware simulator */
-#define IS_UV()                IS_MEDUSA() || ia64_platform_is("uv")
-#else
-#define IS_UV()                0
-#endif
-
 #include <asm/uv/uv_hub.h>
 #include <asm/uv/uv_mmrs.h>
 
@@ -381,7 +369,7 @@ static int __init gru_init(void)
        char id[10];
        void *gru_start_vaddr;
 
-       if (!IS_UV())
+       if (!is_uv_system())
                return 0;
 
 #if defined CONFIG_IA64
@@ -451,7 +439,7 @@ static void __exit gru_exit(void)
        int order = get_order(sizeof(struct gru_state) *
                              GRU_CHIPLETS_PER_BLADE);
 
-       if (!IS_UV())
+       if (!is_uv_system())
                return;
 
        for (i = 0; i < GRU_CHIPLETS_PER_BLADE; i++)
index 7b4cbd5e03e99a3e4297dc6d4b6de219d30f7389..2275126cb3348f203903978265943edbbbaf846c 100644 (file)
 
 #include <linux/mutex.h>
 
-#ifdef CONFIG_IA64
+#if defined CONFIG_X86_UV || defined CONFIG_IA64_SGI_UV
+#include <asm/uv/uv.h>
+#define is_uv()                is_uv_system()
+#endif
+
+#ifndef is_uv
+#define is_uv()                0
+#endif
+
+#if defined CONFIG_IA64
 #include <asm/system.h>
 #include <asm/sn/arch.h>       /* defines is_shub1() and is_shub2() */
 #define is_shub()      ia64_platform_is("sn2")
-#ifdef CONFIG_IA64_SGI_UV
-#define is_uv()                ia64_platform_is("uv")
-#else
-#define is_uv()                0
-#endif
-#endif
-#ifdef CONFIG_X86_64
-#include <asm/genapic.h>
-#define is_uv()                is_uv_system()
 #endif
 
 #ifndef is_shub1
 #define is_shub()      0
 #endif
 
-#ifndef is_uv
-#define is_uv()                0
-#endif
-
 #ifdef USE_DBUG_ON
 #define DBUG_ON(condition)     BUG_ON(condition)
 #else
index 89218f7cfaa72e272aba75ebb99ab43c87359e71..6576170de9628ae9ab72fde2aace263248ad52de 100644 (file)
@@ -318,7 +318,7 @@ xpc_hb_checker(void *ignore)
 
        /* this thread was marked active by xpc_hb_init() */
 
-       set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
+       set_cpus_allowed_ptr(current, cpumask_of(XPC_HB_CHECK_CPU));
 
        /* set our heartbeating to other partitions into motion */
        xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
index f4a67c65d301b4d5aacc52ac85c8e452d6c63fd7..2db166b7096f42ddf1ed5eac276de476adb9fc3e 100644 (file)
@@ -793,8 +793,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host,
                              host->mem->start + host->sdidata);
 
        if (!setup_ok) {
-               s3c2410_dma_config(host->dma, 4,
-                       (S3C2410_DCON_HWTRIG | S3C2410_DCON_CH0_SDI));
+               s3c2410_dma_config(host->dma, 4, 0);
                s3c2410_dma_set_buffdone_fn(host->dma,
                                            s3cmci_dma_done_callback);
                s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
index 8b12e6e109d3721d12d8aff35d8a225ddd6873be..2ff88791cebc359aae7b830a18cf5b9679ec15b4 100644 (file)
@@ -273,7 +273,7 @@ config MTD_NAND_CAFE
 
 config MTD_NAND_CS553X
        tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
-       depends on X86_32 && (X86_PC || X86_GENERICARCH)
+       depends on X86_32
        help
          The CS553x companion chips for the AMD Geode processor
          include NAND flash controllers with built-in hardware ECC
index fac43fd6fc87782a42455b852cec62a56333849f..6a843f7350ab10982506df13116b9b5c48d02b40 100644 (file)
@@ -150,7 +150,8 @@ static int __init ne3210_eisa_probe (struct device *device)
                if (phys_mem < virt_to_phys(high_memory)) {
                        printk(KERN_CRIT "ne3210.c: Card RAM overlaps with normal memory!!!\n");
                        printk(KERN_CRIT "ne3210.c: Use EISA SCU to set card memory below 1MB,\n");
-                       printk(KERN_CRIT "ne3210.c: or to an address above 0x%lx.\n", virt_to_phys(high_memory));
+                       printk(KERN_CRIT "ne3210.c: or to an address above 0x%llx.\n",
+                               (u64)virt_to_phys(high_memory));
                        printk(KERN_CRIT "ne3210.c: Driver NOT installed.\n");
                        retval = -EINVAL;
                        goto out3;
index ab0e09bf154d1f1fe2fdab401465e31cccdb2918..847e9bb0098f2bf0b2543a1cb53e1135a3f5939a 100644 (file)
@@ -854,20 +854,27 @@ static void efx_fini_io(struct efx_nic *efx)
  * interrupts across them. */
 static int efx_wanted_rx_queues(void)
 {
-       cpumask_t core_mask;
+       cpumask_var_t core_mask;
        int count;
        int cpu;
 
-       cpus_clear(core_mask);
+       if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) {
+               printk(KERN_WARNING
+                      "efx.c: allocation failure, irq balancing hobbled\n");
+               return 1;
+       }
+
+       cpumask_clear(core_mask);
        count = 0;
        for_each_online_cpu(cpu) {
-               if (!cpu_isset(cpu, core_mask)) {
+               if (!cpumask_test_cpu(cpu, core_mask)) {
                        ++count;
-                       cpus_or(core_mask, core_mask,
-                               topology_core_siblings(cpu));
+                       cpumask_or(core_mask, core_mask,
+                                  topology_core_cpumask(cpu));
                }
        }
 
+       free_cpumask_var(core_mask);
        return count;
 }
 
index d5378e60fcddfcb44cb5223abb11b6a1f75a34e6..064307c2277eff8bb400bb1f161426ff6c61ee29 100644 (file)
@@ -338,10 +338,10 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
        nic_data->next_buffer_table += buffer->entries;
 
        EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
-               "(virt %p phys %lx)\n", buffer->index,
+               "(virt %p phys %llx)\n", buffer->index,
                buffer->index + buffer->entries - 1,
-               (unsigned long long)buffer->dma_addr, len,
-               buffer->addr, virt_to_phys(buffer->addr));
+               (u64)buffer->dma_addr, len,
+               buffer->addr, (u64)virt_to_phys(buffer->addr));
 
        return 0;
 }
@@ -353,10 +353,10 @@ static void falcon_free_special_buffer(struct efx_nic *efx,
                return;
 
        EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
-               "(virt %p phys %lx)\n", buffer->index,
+               "(virt %p phys %llx)\n", buffer->index,
                buffer->index + buffer->entries - 1,
-               (unsigned long long)buffer->dma_addr, buffer->len,
-               buffer->addr, virt_to_phys(buffer->addr));
+               (u64)buffer->dma_addr, buffer->len,
+               buffer->addr, (u64)virt_to_phys(buffer->addr));
 
        pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
                            buffer->dma_addr);
@@ -2343,10 +2343,10 @@ int falcon_probe_port(struct efx_nic *efx)
                                 FALCON_MAC_STATS_SIZE);
        if (rc)
                return rc;
-       EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n",
-               (unsigned long long)efx->stats_buffer.dma_addr,
+       EFX_LOG(efx, "stats buffer at %llx (virt %p phys %llx)\n",
+               (u64)efx->stats_buffer.dma_addr,
                efx->stats_buffer.addr,
-               virt_to_phys(efx->stats_buffer.addr));
+               (u64)virt_to_phys(efx->stats_buffer.addr));
 
        return 0;
 }
@@ -2921,9 +2921,9 @@ int falcon_probe_nic(struct efx_nic *efx)
                goto fail4;
        BUG_ON(efx->irq_status.dma_addr & 0x0f);
 
-       EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n",
-               (unsigned long long)efx->irq_status.dma_addr,
-               efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
+       EFX_LOG(efx, "INT_KER at %llx (virt %p phys %llx)\n",
+               (u64)efx->irq_status.dma_addr,
+               efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr));
 
        falcon_probe_spi_devices(efx);
 
index d4fb4acdbebdfc9ca70d28a3abb9e0b88753dfd0..4e9bd380a5c2ea2b50661627e439da2a9bc2712e 100644 (file)
@@ -2649,8 +2649,6 @@ static int __devinit happy_meal_sbus_probe_one(struct of_device *op, int is_qfe)
        int err = -ENODEV;
 
        sbus_dp = to_of_device(op->dev.parent)->node;
-       if (is_qfe)
-               sbus_dp = to_of_device(op->dev.parent->parent)->node;
 
        /* We can match PCI devices too, do not accept those here. */
        if (strcmp(sbus_dp->name, "sbus"))
index bfca15da6f0f844fa379bcb7c7eade8914139e3c..14c11656e82cba06867cf8a789c620c0a9028fdf 100644 (file)
@@ -1082,8 +1082,8 @@ static int __init arlan_probe_here(struct net_device *dev,
        if (arlan_check_fingerprint(memaddr))
                return -ENODEV;
 
-       printk(KERN_NOTICE "%s: Arlan found at %x, \n ", dev->name, 
-              (int) virt_to_phys((void*)memaddr));
+       printk(KERN_NOTICE "%s: Arlan found at %llx, \n ", dev->name, 
+              (u64) virt_to_phys((void*)memaddr));
 
        ap->card = (void *) memaddr;
        dev->mem_start = memaddr;
index 9da5a4b811337562dbcf708dcad927756264381d..c3ea5fa7d05a6db4079bebb99ecc865b89eefca5 100644 (file)
@@ -38,7 +38,7 @@
 
 static LIST_HEAD(dying_tasks);
 static LIST_HEAD(dead_tasks);
-static cpumask_t marked_cpus = CPU_MASK_NONE;
+static cpumask_var_t marked_cpus;
 static DEFINE_SPINLOCK(task_mortuary);
 static void process_task_mortuary(void);
 
@@ -456,10 +456,10 @@ static void mark_done(int cpu)
 {
        int i;
 
-       cpu_set(cpu, marked_cpus);
+       cpumask_set_cpu(cpu, marked_cpus);
 
        for_each_online_cpu(i) {
-               if (!cpu_isset(i, marked_cpus))
+               if (!cpumask_test_cpu(i, marked_cpus))
                        return;
        }
 
@@ -468,7 +468,7 @@ static void mark_done(int cpu)
         */
        process_task_mortuary();
 
-       cpus_clear(marked_cpus);
+       cpumask_clear(marked_cpus);
 }
 
 
@@ -565,6 +565,20 @@ void sync_buffer(int cpu)
        mutex_unlock(&buffer_mutex);
 }
 
+int __init buffer_sync_init(void)
+{
+       if (!alloc_cpumask_var(&marked_cpus, GFP_KERNEL))
+               return -ENOMEM;
+
+       cpumask_clear(marked_cpus);
+               return 0;
+}
+
+void __exit buffer_sync_cleanup(void)
+{
+       free_cpumask_var(marked_cpus);
+}
+
 /* The function can be used to add a buffer worth of data directly to
  * the kernel buffer. The buffer is assumed to be a circular buffer.
  * Take the entries from index start and end at index end, wrapping
index 3110732c1835acc640dbbc248d7a5c02403f0d83..0ebf5db626796965fca5d44b6cda4d09a9a82708 100644 (file)
@@ -19,4 +19,8 @@ void sync_stop(void);
 /* sync the given CPU's buffer */
 void sync_buffer(int cpu);
 
+/* initialize/destroy the buffer system. */
+int buffer_sync_init(void);
+void buffer_sync_cleanup(void);
+
 #endif /* OPROFILE_BUFFER_SYNC_H */
index 3cffce90f82a9693999bb16c5741b6c2e4e842c2..ced39f602292b2b8328fbc64cb705d8504768ac8 100644 (file)
@@ -183,6 +183,10 @@ static int __init oprofile_init(void)
 {
        int err;
 
+       err = buffer_sync_init();
+       if (err)
+               return err;
+
        err = oprofile_arch_init(&oprofile_ops);
 
        if (err < 0 || timer) {
@@ -191,8 +195,10 @@ static int __init oprofile_init(void)
        }
 
        err = oprofilefs_register();
-       if (err)
+       if (err) {
                oprofile_arch_exit();
+               buffer_sync_cleanup();
+       }
 
        return err;
 }
@@ -202,6 +208,7 @@ static void __exit oprofile_exit(void)
 {
        oprofilefs_unregister();
        oprofile_arch_exit();
+       buffer_sync_cleanup();
 }
 
 
index 26c536b51c5acd80d5f4915bb4980a666bed06b5..5f333403c2ea0fdb4434942e555f3373871869ec 100644 (file)
@@ -42,6 +42,7 @@
 LIST_HEAD(dmar_drhd_units);
 
 static struct acpi_table_header * __initdata dmar_tbl;
+static acpi_size dmar_tbl_size;
 
 static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
 {
@@ -288,8 +289,9 @@ static int __init dmar_table_detect(void)
        acpi_status status = AE_OK;
 
        /* if we could find DMAR table, then there are DMAR devices */
-       status = acpi_get_table(ACPI_SIG_DMAR, 0,
-                               (struct acpi_table_header **)&dmar_tbl);
+       status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
+                               (struct acpi_table_header **)&dmar_tbl,
+                               &dmar_tbl_size);
 
        if (ACPI_SUCCESS(status) && !dmar_tbl) {
                printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
@@ -489,6 +491,7 @@ void __init detect_intel_iommu(void)
                        iommu_detected = 1;
 #endif
        }
+       early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size);
        dmar_tbl = NULL;
 }
 
index eacfb13998bbd29449052e264a99080e2284bb8f..9aa4fe100a0d03c0b04be379fcea99c266e7d730 100644 (file)
@@ -143,7 +143,7 @@ config HOTPLUG_PCI_SHPC
 
 config HOTPLUG_PCI_RPA
        tristate "RPA PCI Hotplug driver"
-       depends on PPC_PSERIES && PPC64 && !HOTPLUG_PCI_FAKE
+       depends on PPC_PSERIES && EEH && !HOTPLUG_PCI_FAKE
        help
          Say Y here if you have a RPA system that supports PCI Hotplug.
 
index 45effc5726c0f63a55862c85eaa6d9cb4a38c706..8e44db040db7f4dba8cf8c12dc2087012965e16b 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/irq.h>
 #include <asm/io_apic.h>
 #include <asm/smp.h>
+#include <asm/cpu.h>
 #include <linux/intel-iommu.h>
 #include "intr_remapping.h"
 
index d0c9736858689f62f19dc6fd85572dacc3c2d3ec..38257500738223486758e337cb49d7184ddcef5a 100644 (file)
@@ -133,6 +133,9 @@ static void set_downstream_devices_error_reporting(struct pci_dev *dev,
                                                   bool enable)
 {
        set_device_error_reporting(dev, &enable);
+
+       if (!dev->subordinate)
+               return;
        pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
 }
 
index 248b4db915526e5819c7050737f4467456aeda9f..5ea566e20b375410ed2cc5b19d30d8eec8992fd8 100644 (file)
@@ -103,6 +103,7 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
 static void pcie_portdrv_remove (struct pci_dev *dev)
 {
        pcie_port_device_remove(dev);
+       pci_disable_device(dev);
        kfree(pci_get_drvdata(dev));
 }
 
index f20d55368edb7edfb5d0a6bc9ef1a8746da6b8cb..92b9efe9bcaf628faa260c0e919eece0d2610e9f 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/acpi.h>
 #include <linux/kallsyms.h>
 #include <linux/dmi.h>
+#include <linux/pci-aspm.h>
 #include "pci.h"
 
 int isa_dma_bridge_buggy;
@@ -1749,6 +1750,30 @@ static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_e100_interrupt);
 
+/*
+ * The 82575 and 82598 may experience data corruption issues when transitioning
+ * out of L0S.  To prevent this we need to disable L0S on the pci-e link
+ */
+static void __devinit quirk_disable_aspm_l0s(struct pci_dev *dev)
+{
+       dev_info(&dev->dev, "Disabling L0s\n");
+       pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
+
 static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
 {
        /* rev 1 ncr53c810 chips don't set the class at all which means
@@ -2097,7 +2122,7 @@ static void __devinit ht_disable_msi_mapping(struct pci_dev *dev)
 
                if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
                                         &flags) == 0) {
-                       dev_info(&dev->dev, "Enabling HT MSI Mapping\n");
+                       dev_info(&dev->dev, "Disabling HT MSI Mapping\n");
 
                        pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
                                              flags & ~HT_MSI_FLAGS_ENABLE);
@@ -2141,6 +2166,10 @@ static void __devinit nv_msi_ht_cap_quirk(struct pci_dev *dev)
        int pos;
        int found;
 
+       /* Enabling HT MSI mapping on this device breaks MCP51 */
+       if (dev->device == 0x270)
+               return;
+
        /* check if there is HT MSI cap or enabled on this device */
        found = ht_check_msi_mapping(dev);
 
index 94c9f911824ef5b3279205a41a87dd169935f1db..6bcca616a70405bf057520b064a2a29dcf302078 100644 (file)
@@ -1297,7 +1297,7 @@ static int __init acer_wmi_init(void)
 
        set_quirks();
 
-       if (!acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) {
+       if (acpi_video_backlight_support() && has_cap(ACER_CAP_BRIGHTNESS)) {
                interface->capability &= ~ACER_CAP_BRIGHTNESS;
                printk(ACER_INFO "Brightness must be controlled by "
                       "generic video driver\n");
index 1d768928e0bbaf0c9fe61923bcaa0f10b139062c..a52d4a11652d57aa7b045fa225777fdaecb252f2 100644 (file)
@@ -180,10 +180,13 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
        di->empty_uAh = battery_interpolate(scale, di->temp_C / 10);
        di->empty_uAh *= 1000; /* convert to ÂµAh */
 
-       /* From Maxim Application Note 131: remaining capacity =
-        * ((ICA - Empty Value) / (Full Value - Empty Value)) x 100% */
-       di->rem_capacity = ((di->accum_current_uAh - di->empty_uAh) * 100L) /
-                           (di->full_active_uAh - di->empty_uAh);
+       if (di->full_active_uAh == di->empty_uAh)
+               di->rem_capacity = 0;
+       else
+               /* From Maxim Application Note 131: remaining capacity =
+                * ((ICA - Empty Value) / (Full Value - Empty Value)) x 100% */
+               di->rem_capacity = ((di->accum_current_uAh - di->empty_uAh) * 100L) /
+                                   (di->full_active_uAh - di->empty_uAh);
 
        if (di->rem_capacity < 0)
                di->rem_capacity = 0;
index f08e169ba1b51a150372753672741df102818591..7e30e5f6e032a30bbdae7d89355b49122b4f6b42 100644 (file)
@@ -129,7 +129,7 @@ static int wait_for_pin(struct bbc_i2c_bus *bp, u8 *status)
        bp->waiting = 1;
        add_wait_queue(&bp->wq, &wait);
        while (limit-- > 0) {
-               unsigned long val;
+               long val;
 
                val = wait_event_interruptible_timeout(
                                bp->wq,
index a9a9893a5f95b4867328b9396e2e097071e428ba..e6d1fc8c54f151514aa6057df9e183526b53938c 100644 (file)
@@ -38,9 +38,6 @@
 #include <linux/string.h>
 #include <linux/genhd.h>
 #include <linux/blkdev.h>
-
-#define MAJOR_NR       JSFD_MAJOR
-
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/io.h>
index 9e1138a75e8bb52618f9d561acc8c0b3c664683e..a411702413d687af44676cf37e3e4c7720c64f82 100644 (file)
@@ -39,7 +39,7 @@ static u8 w1_gpio_read_bit(void *data)
 {
        struct w1_gpio_platform_data *pdata = data;
 
-       return gpio_get_value(pdata->pin);
+       return gpio_get_value(pdata->pin) ? 1 : 0;
 }
 
 static int __init w1_gpio_probe(struct platform_device *pdev)
index bf92802f2bbe93ab19b4531610fea7d3978a7abb..36e221beedcda87d386be2ff838836cb506800d8 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/io.h>
 #include <linux/uaccess.h>
 
-#include <asm/mach-rdc321x/rdc321x_defs.h>
+#include <asm/rdc321x_defs.h>
 
 #define RDC_WDT_MASK   0x80000000 /* Mask */
 #define RDC_WDT_EN     0x00800000 /* Enable bit */
index eb0dfdeaa9494ac3043f13db5cbd1bf0c0dd5751..30963af5dba02960143f9ebf37b6f85d44de0dec 100644 (file)
 #include <linux/irq.h>
 #include <linux/module.h>
 #include <linux/string.h>
+#include <linux/bootmem.h>
 
 #include <asm/ptrace.h>
 #include <asm/irq.h>
+#include <asm/idle.h>
 #include <asm/sync_bitops.h>
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
@@ -50,36 +52,55 @@ static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
 /* IRQ <-> IPI mapping */
 static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1};
 
-/* Packed IRQ information: binding type, sub-type index, and event channel. */
-struct packed_irq
-{
-       unsigned short evtchn;
-       unsigned char index;
-       unsigned char type;
-};
-
-static struct packed_irq irq_info[NR_IRQS];
-
-/* Binding types. */
-enum {
-       IRQT_UNBOUND,
+/* Interrupt types. */
+enum xen_irq_type {
+       IRQT_UNBOUND = 0,
        IRQT_PIRQ,
        IRQT_VIRQ,
        IRQT_IPI,
        IRQT_EVTCHN
 };
 
-/* Convenient shorthand for packed representation of an unbound IRQ. */
-#define IRQ_UNBOUND    mk_irq_info(IRQT_UNBOUND, 0, 0)
+/*
+ * Packed IRQ information:
+ * type - enum xen_irq_type
+ * event channel - irq->event channel mapping
+ * cpu - cpu this event channel is bound to
+ * index - type-specific information:
+ *    PIRQ - vector, with MSB being "needs EIO"
+ *    VIRQ - virq number
+ *    IPI - IPI vector
+ *    EVTCHN -
+ */
+struct irq_info
+{
+       enum xen_irq_type type; /* type */
+       unsigned short evtchn;  /* event channel */
+       unsigned short cpu;     /* cpu bound */
+
+       union {
+               unsigned short virq;
+               enum ipi_vector ipi;
+               struct {
+                       unsigned short gsi;
+                       unsigned short vector;
+               } pirq;
+       } u;
+};
+
+static struct irq_info irq_info[NR_IRQS];
 
 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
        [0 ... NR_EVENT_CHANNELS-1] = -1
 };
-static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
-static u8 cpu_evtchn[NR_EVENT_CHANNELS];
-
-/* Reference counts for bindings to IRQs. */
-static int irq_bindcount[NR_IRQS];
+struct cpu_evtchn_s {
+       unsigned long bits[NR_EVENT_CHANNELS/BITS_PER_LONG];
+};
+static struct cpu_evtchn_s *cpu_evtchn_mask_p;
+static inline unsigned long *cpu_evtchn_mask(int cpu)
+{
+       return cpu_evtchn_mask_p[cpu].bits;
+}
 
 /* Xen will never allocate port zero for any purpose. */
 #define VALID_EVTCHN(chn)      ((chn) != 0)
@@ -87,27 +108,108 @@ static int irq_bindcount[NR_IRQS];
 static struct irq_chip xen_dynamic_chip;
 
 /* Constructor for packed IRQ information. */
-static inline struct packed_irq mk_irq_info(u32 type, u32 index, u32 evtchn)
+static struct irq_info mk_unbound_info(void)
+{
+       return (struct irq_info) { .type = IRQT_UNBOUND };
+}
+
+static struct irq_info mk_evtchn_info(unsigned short evtchn)
+{
+       return (struct irq_info) { .type = IRQT_EVTCHN, .evtchn = evtchn,
+                       .cpu = 0 };
+}
+
+static struct irq_info mk_ipi_info(unsigned short evtchn, enum ipi_vector ipi)
 {
-       return (struct packed_irq) { evtchn, index, type };
+       return (struct irq_info) { .type = IRQT_IPI, .evtchn = evtchn,
+                       .cpu = 0, .u.ipi = ipi };
+}
+
+static struct irq_info mk_virq_info(unsigned short evtchn, unsigned short virq)
+{
+       return (struct irq_info) { .type = IRQT_VIRQ, .evtchn = evtchn,
+                       .cpu = 0, .u.virq = virq };
+}
+
+static struct irq_info mk_pirq_info(unsigned short evtchn,
+                                   unsigned short gsi, unsigned short vector)
+{
+       return (struct irq_info) { .type = IRQT_PIRQ, .evtchn = evtchn,
+                       .cpu = 0, .u.pirq = { .gsi = gsi, .vector = vector } };
 }
 
 /*
  * Accessors for packed IRQ information.
  */
-static inline unsigned int evtchn_from_irq(int irq)
+static struct irq_info *info_for_irq(unsigned irq)
+{
+       return &irq_info[irq];
+}
+
+static unsigned int evtchn_from_irq(unsigned irq)
 {
-       return irq_info[irq].evtchn;
+       return info_for_irq(irq)->evtchn;
 }
 
-static inline unsigned int index_from_irq(int irq)
+static enum ipi_vector ipi_from_irq(unsigned irq)
 {
-       return irq_info[irq].index;
+       struct irq_info *info = info_for_irq(irq);
+
+       BUG_ON(info == NULL);
+       BUG_ON(info->type != IRQT_IPI);
+
+       return info->u.ipi;
 }
 
-static inline unsigned int type_from_irq(int irq)
+static unsigned virq_from_irq(unsigned irq)
 {
-       return irq_info[irq].type;
+       struct irq_info *info = info_for_irq(irq);
+
+       BUG_ON(info == NULL);
+       BUG_ON(info->type != IRQT_VIRQ);
+
+       return info->u.virq;
+}
+
+static unsigned gsi_from_irq(unsigned irq)
+{
+       struct irq_info *info = info_for_irq(irq);
+
+       BUG_ON(info == NULL);
+       BUG_ON(info->type != IRQT_PIRQ);
+
+       return info->u.pirq.gsi;
+}
+
+static unsigned vector_from_irq(unsigned irq)
+{
+       struct irq_info *info = info_for_irq(irq);
+
+       BUG_ON(info == NULL);
+       BUG_ON(info->type != IRQT_PIRQ);
+
+       return info->u.pirq.vector;
+}
+
+static enum xen_irq_type type_from_irq(unsigned irq)
+{
+       return info_for_irq(irq)->type;
+}
+
+static unsigned cpu_from_irq(unsigned irq)
+{
+       return info_for_irq(irq)->cpu;
+}
+
+static unsigned int cpu_from_evtchn(unsigned int evtchn)
+{
+       int irq = evtchn_to_irq[evtchn];
+       unsigned ret = 0;
+
+       if (irq != -1)
+               ret = cpu_from_irq(irq);
+
+       return ret;
 }
 
 static inline unsigned long active_evtchns(unsigned int cpu,
@@ -115,7 +217,7 @@ static inline unsigned long active_evtchns(unsigned int cpu,
                                           unsigned int idx)
 {
        return (sh->evtchn_pending[idx] &
-               cpu_evtchn_mask[cpu][idx] &
+               cpu_evtchn_mask(cpu)[idx] &
                ~sh->evtchn_mask[idx]);
 }
 
@@ -125,13 +227,13 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
 
        BUG_ON(irq == -1);
 #ifdef CONFIG_SMP
-       irq_to_desc(irq)->affinity = cpumask_of_cpu(cpu);
+       cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
 #endif
 
-       __clear_bit(chn, cpu_evtchn_mask[cpu_evtchn[chn]]);
-       __set_bit(chn, cpu_evtchn_mask[cpu]);
+       __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
+       __set_bit(chn, cpu_evtchn_mask(cpu));
 
-       cpu_evtchn[chn] = cpu;
+       irq_info[irq].cpu = cpu;
 }
 
 static void init_evtchn_cpu_bindings(void)
@@ -142,17 +244,11 @@ static void init_evtchn_cpu_bindings(void)
 
        /* By default all event channels notify CPU#0. */
        for_each_irq_desc(i, desc) {
-               desc->affinity = cpumask_of_cpu(0);
+               cpumask_copy(desc->affinity, cpumask_of(0));
        }
 #endif
 
-       memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
-       memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
-}
-
-static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
-{
-       return cpu_evtchn[evtchn];
+       memset(cpu_evtchn_mask(0), ~0, sizeof(cpu_evtchn_mask(0)));
 }
 
 static inline void clear_evtchn(int port)
@@ -232,9 +328,8 @@ static int find_unbound_irq(void)
        int irq;
        struct irq_desc *desc;
 
-       /* Only allocate from dynirq range */
        for (irq = 0; irq < nr_irqs; irq++)
-               if (irq_bindcount[irq] == 0)
+               if (irq_info[irq].type == IRQT_UNBOUND)
                        break;
 
        if (irq == nr_irqs)
@@ -244,6 +339,8 @@ static int find_unbound_irq(void)
        if (WARN_ON(desc == NULL))
                return -1;
 
+       dynamic_irq_init(irq);
+
        return irq;
 }
 
@@ -258,16 +355,13 @@ int bind_evtchn_to_irq(unsigned int evtchn)
        if (irq == -1) {
                irq = find_unbound_irq();
 
-               dynamic_irq_init(irq);
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
                                              handle_level_irq, "event");
 
                evtchn_to_irq[evtchn] = irq;
-               irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+               irq_info[irq] = mk_evtchn_info(evtchn);
        }
 
-       irq_bindcount[irq]++;
-
        spin_unlock(&irq_mapping_update_lock);
 
        return irq;
@@ -282,12 +376,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
        spin_lock(&irq_mapping_update_lock);
 
        irq = per_cpu(ipi_to_irq, cpu)[ipi];
+
        if (irq == -1) {
                irq = find_unbound_irq();
                if (irq < 0)
                        goto out;
 
-               dynamic_irq_init(irq);
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
                                              handle_level_irq, "ipi");
 
@@ -298,15 +392,12 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
                evtchn = bind_ipi.port;
 
                evtchn_to_irq[evtchn] = irq;
-               irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
-
+               irq_info[irq] = mk_ipi_info(evtchn, ipi);
                per_cpu(ipi_to_irq, cpu)[ipi] = irq;
 
                bind_evtchn_to_cpu(evtchn, cpu);
        }
 
-       irq_bindcount[irq]++;
-
  out:
        spin_unlock(&irq_mapping_update_lock);
        return irq;
@@ -332,20 +423,17 @@ static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
 
                irq = find_unbound_irq();
 
-               dynamic_irq_init(irq);
                set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
                                              handle_level_irq, "virq");
 
                evtchn_to_irq[evtchn] = irq;
-               irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+               irq_info[irq] = mk_virq_info(evtchn, virq);
 
                per_cpu(virq_to_irq, cpu)[virq] = irq;
 
                bind_evtchn_to_cpu(evtchn, cpu);
        }
 
-       irq_bindcount[irq]++;
-
        spin_unlock(&irq_mapping_update_lock);
 
        return irq;
@@ -358,7 +446,7 @@ static void unbind_from_irq(unsigned int irq)
 
        spin_lock(&irq_mapping_update_lock);
 
-       if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
+       if (VALID_EVTCHN(evtchn)) {
                close.port = evtchn;
                if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
                        BUG();
@@ -366,11 +454,11 @@ static void unbind_from_irq(unsigned int irq)
                switch (type_from_irq(irq)) {
                case IRQT_VIRQ:
                        per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
-                               [index_from_irq(irq)] = -1;
+                               [virq_from_irq(irq)] = -1;
                        break;
                case IRQT_IPI:
                        per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
-                               [index_from_irq(irq)] = -1;
+                               [ipi_from_irq(irq)] = -1;
                        break;
                default:
                        break;
@@ -380,7 +468,7 @@ static void unbind_from_irq(unsigned int irq)
                bind_evtchn_to_cpu(evtchn, 0);
 
                evtchn_to_irq[evtchn] = -1;
-               irq_info[irq] = IRQ_UNBOUND;
+               irq_info[irq] = mk_unbound_info();
 
                dynamic_irq_cleanup(irq);
        }
@@ -498,8 +586,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
        for(i = 0; i < NR_EVENT_CHANNELS; i++) {
                if (sync_test_bit(i, sh->evtchn_pending)) {
                        printk("  %d: event %d -> irq %d\n",
-                               cpu_evtchn[i], i,
-                               evtchn_to_irq[i]);
+                              cpu_from_evtchn(i), i,
+                              evtchn_to_irq[i]);
                }
        }
 
@@ -508,7 +596,6 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-
 /*
  * Search the CPUs pending events bitmasks.  For each one found, map
  * the event number to an irq, and feed it into do_IRQ() for
@@ -521,11 +608,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
 void xen_evtchn_do_upcall(struct pt_regs *regs)
 {
        int cpu = get_cpu();
+       struct pt_regs *old_regs = set_irq_regs(regs);
        struct shared_info *s = HYPERVISOR_shared_info;
        struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
        static DEFINE_PER_CPU(unsigned, nesting_count);
        unsigned count;
 
+       exit_idle();
+       irq_enter();
+
        do {
                unsigned long pending_words;
 
@@ -550,7 +641,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
                                int irq = evtchn_to_irq[port];
 
                                if (irq != -1)
-                                       xen_do_IRQ(irq, regs);
+                                       handle_irq(irq, regs);
                        }
                }
 
@@ -561,12 +652,17 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
        } while(count != 1);
 
 out:
+       irq_exit();
+       set_irq_regs(old_regs);
+
        put_cpu();
 }
 
 /* Rebind a new event channel to an existing irq. */
 void rebind_evtchn_irq(int evtchn, int irq)
 {
+       struct irq_info *info = info_for_irq(irq);
+
        /* Make sure the irq is masked, since the new event channel
           will also be masked. */
        disable_irq(irq);
@@ -576,11 +672,11 @@ void rebind_evtchn_irq(int evtchn, int irq)
        /* After resume the irq<->evtchn mappings are all cleared out */
        BUG_ON(evtchn_to_irq[evtchn] != -1);
        /* Expect irq to have been bound before,
-          so the bindcount should be non-0 */
-       BUG_ON(irq_bindcount[irq] == 0);
+          so there should be a proper type */
+       BUG_ON(info->type == IRQT_UNBOUND);
 
        evtchn_to_irq[evtchn] = irq;
-       irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+       irq_info[irq] = mk_evtchn_info(evtchn);
 
        spin_unlock(&irq_mapping_update_lock);
 
@@ -690,8 +786,7 @@ static void restore_cpu_virqs(unsigned int cpu)
                if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
                        continue;
 
-               BUG_ON(irq_info[irq].type != IRQT_VIRQ);
-               BUG_ON(irq_info[irq].index != virq);
+               BUG_ON(virq_from_irq(irq) != virq);
 
                /* Get a new binding from Xen. */
                bind_virq.virq = virq;
@@ -703,7 +798,7 @@ static void restore_cpu_virqs(unsigned int cpu)
 
                /* Record the new mapping. */
                evtchn_to_irq[evtchn] = irq;
-               irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+               irq_info[irq] = mk_virq_info(evtchn, virq);
                bind_evtchn_to_cpu(evtchn, cpu);
 
                /* Ready for use. */
@@ -720,8 +815,7 @@ static void restore_cpu_ipis(unsigned int cpu)
                if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
                        continue;
 
-               BUG_ON(irq_info[irq].type != IRQT_IPI);
-               BUG_ON(irq_info[irq].index != ipi);
+               BUG_ON(ipi_from_irq(irq) != ipi);
 
                /* Get a new binding from Xen. */
                bind_ipi.vcpu = cpu;
@@ -732,7 +826,7 @@ static void restore_cpu_ipis(unsigned int cpu)
 
                /* Record the new mapping. */
                evtchn_to_irq[evtchn] = irq;
-               irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
+               irq_info[irq] = mk_ipi_info(evtchn, ipi);
                bind_evtchn_to_cpu(evtchn, cpu);
 
                /* Ready for use. */
@@ -812,8 +906,11 @@ void xen_irq_resume(void)
 
 static struct irq_chip xen_dynamic_chip __read_mostly = {
        .name           = "xen-dyn",
+
+       .disable        = disable_dynirq,
        .mask           = disable_dynirq,
        .unmask         = enable_dynirq,
+
        .ack            = ack_dynirq,
        .set_affinity   = set_affinity_irq,
        .retrigger      = retrigger_dynirq,
@@ -822,6 +919,10 @@ static struct irq_chip xen_dynamic_chip __read_mostly = {
 void __init xen_init_IRQ(void)
 {
        int i;
+       size_t size = nr_cpu_ids * sizeof(struct cpu_evtchn_s);
+
+       cpu_evtchn_mask_p = alloc_bootmem(size);
+       BUG_ON(cpu_evtchn_mask_p == NULL);
 
        init_evtchn_cpu_bindings();
 
@@ -829,9 +930,5 @@ void __init xen_init_IRQ(void)
        for (i = 0; i < NR_EVENT_CHANNELS; i++)
                mask_evtchn(i);
 
-       /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
-       for (i = 0; i < nr_irqs; i++)
-               irq_bindcount[i] = 0;
-
        irq_ctx_init(smp_processor_id());
 }
index 56892a142ee273eef1112ea212792556ecfc806b..3ccd348d112d62036f4e63a038d23deca7ed67f7 100644 (file)
@@ -108,7 +108,7 @@ static void do_suspend(void)
        /* XXX use normal device tree? */
        xenbus_suspend();
 
-       err = stop_machine(xen_suspend, &cancelled, &cpumask_of_cpu(0));
+       err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
        if (err) {
                printk(KERN_ERR "failed to start xen_suspend: %d\n", err);
                goto out;
index e5eaa62fd17f14e1d1b47c31966f4c1646eafde7..e3fe9918faafb710446f5bd73445eeca5ce5b0d7 100644 (file)
@@ -274,6 +274,7 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
        int ret;
 
        BUG_ON(inode->i_state & I_SYNC);
+       WARN_ON(inode->i_state & I_NEW);
 
        /* Set I_SYNC, reset I_DIRTY */
        dirty = inode->i_state & I_DIRTY;
@@ -298,6 +299,7 @@ __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
        }
 
        spin_lock(&inode_lock);
+       WARN_ON(inode->i_state & I_NEW);
        inode->i_state &= ~I_SYNC;
        if (!(inode->i_state & I_FREEING)) {
                if (!(inode->i_state & I_DIRTY) &&
@@ -470,6 +472,11 @@ void generic_sync_sb_inodes(struct super_block *sb,
                        break;
                }
 
+               if (inode->i_state & I_NEW) {
+                       requeue_io(inode);
+                       continue;
+               }
+
                if (wbc->nonblocking && bdi_write_congested(bdi)) {
                        wbc->encountered_congestion = 1;
                        if (!sb_is_blkdev_sb(sb))
@@ -531,7 +538,7 @@ void generic_sync_sb_inodes(struct super_block *sb,
                list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
                        struct address_space *mapping;
 
-                       if (inode->i_state & (I_FREEING|I_WILL_FREE))
+                       if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
                                continue;
                        mapping = inode->i_mapping;
                        if (mapping->nrpages == 0)
index 913ab2d9a5d10ecd183630c1a25770435613aca5..826fb0b9d1c38f4dbe4ad16c6ec938a7e31cdc88 100644 (file)
@@ -359,6 +359,7 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose)
                invalidate_inode_buffers(inode);
                if (!atomic_read(&inode->i_count)) {
                        list_move(&inode->i_list, dispose);
+                       WARN_ON(inode->i_state & I_NEW);
                        inode->i_state |= I_FREEING;
                        count++;
                        continue;
@@ -460,6 +461,7 @@ static void prune_icache(int nr_to_scan)
                                continue;
                }
                list_move(&inode->i_list, &freeable);
+               WARN_ON(inode->i_state & I_NEW);
                inode->i_state |= I_FREEING;
                nr_pruned++;
        }
@@ -656,6 +658,7 @@ void unlock_new_inode(struct inode *inode)
         * just created it (so there can be no old holders
         * that haven't tested I_LOCK).
         */
+       WARN_ON((inode->i_state & (I_LOCK|I_NEW)) != (I_LOCK|I_NEW));
        inode->i_state &= ~(I_LOCK|I_NEW);
        wake_up_inode(inode);
 }
@@ -1145,6 +1148,7 @@ void generic_delete_inode(struct inode *inode)
 
        list_del_init(&inode->i_list);
        list_del_init(&inode->i_sb_list);
+       WARN_ON(inode->i_state & I_NEW);
        inode->i_state |= I_FREEING;
        inodes_stat.nr_inodes--;
        spin_unlock(&inode_lock);
@@ -1186,16 +1190,19 @@ static void generic_forget_inode(struct inode *inode)
                        spin_unlock(&inode_lock);
                        return;
                }
+               WARN_ON(inode->i_state & I_NEW);
                inode->i_state |= I_WILL_FREE;
                spin_unlock(&inode_lock);
                write_inode_now(inode, 1);
                spin_lock(&inode_lock);
+               WARN_ON(inode->i_state & I_NEW);
                inode->i_state &= ~I_WILL_FREE;
                inodes_stat.nr_unused--;
                hlist_del_init(&inode->i_hash);
        }
        list_del_init(&inode->i_list);
        list_del_init(&inode->i_sb_list);
+       WARN_ON(inode->i_state & I_NEW);
        inode->i_state |= I_FREEING;
        inodes_stat.nr_inodes--;
        spin_unlock(&inode_lock);
index 3a48ba5179d55000e4d6b26f5a1eb7bfb5a5891d..14f502b89cf5c2eb46cf8f2fb476d055a7fb118a 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -699,12 +699,12 @@ pipe_rdwr_fasync(int fd, struct file *filp, int on)
        int retval;
 
        mutex_lock(&inode->i_mutex);
-
        retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
-
-       if (retval >= 0)
+       if (retval >= 0) {
                retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
-
+               if (retval < 0) /* this can happen only if on == T */
+                       fasync_helper(-1, filp, 0, &pipe->fasync_readers);
+       }
        mutex_unlock(&inode->i_mutex);
 
        if (retval < 0)
index 321728f48f2d0c38204a312fbc67685428ee318e..2a79603103492220c85c8828efba9a6b9e15c66d 100644 (file)
@@ -184,15 +184,7 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
                                offset = 0;
                        }
 
-                       if (msblk->stream.avail_out == 0) {
-                               if (page == pages) {
-                                       ERROR("zlib_inflate tried to "
-                                               "decompress too much data, "
-                                               "expected %d bytes.  Zlib "
-                                               "data probably corrupt\n",
-                                               srclength);
-                                       goto release_mutex;
-                               }
+                       if (msblk->stream.avail_out == 0 && page < pages) {
                                msblk->stream.next_out = buffer[page++];
                                msblk->stream.avail_out = PAGE_CACHE_SIZE;
                        }
@@ -209,25 +201,20 @@ int squashfs_read_data(struct super_block *sb, void **buffer, u64 index,
                                zlib_init = 1;
                        }
 
-                       zlib_err = zlib_inflate(&msblk->stream, Z_NO_FLUSH);
+                       zlib_err = zlib_inflate(&msblk->stream, Z_SYNC_FLUSH);
 
                        if (msblk->stream.avail_in == 0 && k < b)
                                put_bh(bh[k++]);
                } while (zlib_err == Z_OK);
 
                if (zlib_err != Z_STREAM_END) {
-                       ERROR("zlib_inflate returned unexpected result"
-                               " 0x%x, srclength %d, avail_in %d,"
-                               " avail_out %d\n", zlib_err, srclength,
-                               msblk->stream.avail_in,
-                               msblk->stream.avail_out);
+                       ERROR("zlib_inflate error, data probably corrupt\n");
                        goto release_mutex;
                }
 
                zlib_err = zlib_inflateEnd(&msblk->stream);
                if (zlib_err != Z_OK) {
-                       ERROR("zlib_inflateEnd returned unexpected result 0x%x,"
-                               " srclength %d\n", zlib_err, srclength);
+                       ERROR("zlib_inflate error, data probably corrupt\n");
                        goto release_mutex;
                }
                length = msblk->stream.total_out;
index 8349ed6b1412aa13e9f23d17fad91e66c9ec1915..6ce501447ada090cac2f5d8ec575030070356831 100644 (file)
@@ -371,8 +371,10 @@ retry:
                                continue;
                        if (!grab_super(old))
                                goto retry;
-                       if (s)
+                       if (s) {
+                               up_write(&s->s_umount);
                                destroy_super(s);
+                       }
                        return old;
                }
        }
@@ -387,6 +389,7 @@ retry:
        err = set(s, data);
        if (err) {
                spin_unlock(&sb_lock);
+               up_write(&s->s_umount);
                destroy_super(s);
                return ERR_PTR(err);
        }
index cb329edc925b915ae5e590edeb4840810f811326..aa1016bb913405e4e4f77c5bee28e7f38db2b44c 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/freezer.h>
 
+#include "xfs_sb.h"
+#include "xfs_inum.h"
+#include "xfs_ag.h"
+#include "xfs_dmapi.h"
+#include "xfs_mount.h"
+
 static kmem_zone_t *xfs_buf_zone;
 STATIC int xfsbufd(void *);
 STATIC int xfsbufd_wakeup(int, gfp_t);
@@ -1435,10 +1441,12 @@ xfs_unregister_buftarg(
 
 void
 xfs_free_buftarg(
-       xfs_buftarg_t           *btp)
+       struct xfs_mount        *mp,
+       struct xfs_buftarg      *btp)
 {
        xfs_flush_buftarg(btp, 1);
-       xfs_blkdev_issue_flush(btp);
+       if (mp->m_flags & XFS_MOUNT_BARRIER)
+               xfs_blkdev_issue_flush(btp);
        xfs_free_bufhash(btp);
        iput(btp->bt_mapping->host);
 
index 288ae7c4c800a5040c7c89ce8dd2a54622e2b7f2..9b4d666ad31f953c640ad4dfdec70a9052a76adc 100644 (file)
@@ -413,7 +413,7 @@ static inline int XFS_bwrite(xfs_buf_t *bp)
  *     Handling of buftargs.
  */
 extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
-extern void xfs_free_buftarg(xfs_buftarg_t *);
+extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
 extern void xfs_wait_buftarg(xfs_buftarg_t *);
 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
 extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
index c71e226da7f5c4bd36f337b06e5315228cea9a67..32ae5028e96b5cb8c8bafaf5ae93089ee798855a 100644 (file)
@@ -734,15 +734,15 @@ xfs_close_devices(
 {
        if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
                struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
-               xfs_free_buftarg(mp->m_logdev_targp);
+               xfs_free_buftarg(mp, mp->m_logdev_targp);
                xfs_blkdev_put(logdev);
        }
        if (mp->m_rtdev_targp) {
                struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
-               xfs_free_buftarg(mp->m_rtdev_targp);
+               xfs_free_buftarg(mp, mp->m_rtdev_targp);
                xfs_blkdev_put(rtdev);
        }
-       xfs_free_buftarg(mp->m_ddev_targp);
+       xfs_free_buftarg(mp, mp->m_ddev_targp);
 }
 
 /*
@@ -811,9 +811,9 @@ xfs_open_devices(
 
  out_free_rtdev_targ:
        if (mp->m_rtdev_targp)
-               xfs_free_buftarg(mp->m_rtdev_targp);
+               xfs_free_buftarg(mp, mp->m_rtdev_targp);
  out_free_ddev_targ:
-       xfs_free_buftarg(mp->m_ddev_targp);
+       xfs_free_buftarg(mp, mp->m_ddev_targp);
  out_close_rtdev:
        if (rtdev)
                xfs_blkdev_put(rtdev);
index e2fb6210d4c58e7e9609d9e5c584210d5a4058c3..478e587087feddcd975e6ab62fa5e1655136b59e 100644 (file)
@@ -246,9 +246,6 @@ xfs_iget_cache_miss(
                goto out_destroy;
        }
 
-       if (lock_flags)
-               xfs_ilock(ip, lock_flags);
-
        /*
         * Preload the radix tree so we can insert safely under the
         * write spinlock. Note that we cannot sleep inside the preload
@@ -256,7 +253,16 @@ xfs_iget_cache_miss(
         */
        if (radix_tree_preload(GFP_KERNEL)) {
                error = EAGAIN;
-               goto out_unlock;
+               goto out_destroy;
+       }
+
+       /*
+        * Because the inode hasn't been added to the radix-tree yet it can't
+        * be found by another thread, so we can do the non-sleeping lock here.
+        */
+       if (lock_flags) {
+               if (!xfs_ilock_nowait(ip, lock_flags))
+                       BUG();
        }
 
        mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
@@ -284,7 +290,6 @@ xfs_iget_cache_miss(
 out_preload_end:
        write_unlock(&pag->pag_ici_lock);
        radix_tree_preload_end();
-out_unlock:
        if (lock_flags)
                xfs_iunlock(ip, lock_flags);
 out_destroy:
index b1047de2fffd5a66d465c9bdff399b82205cf596..61af610d79b395248aeb10b0dac0225d15c388af 100644 (file)
@@ -1455,10 +1455,19 @@ xlog_recover_add_to_trans(
        item = item->ri_prev;
 
        if (item->ri_total == 0) {              /* first region to be added */
-               item->ri_total  = in_f->ilf_size;
-               ASSERT(item->ri_total <= XLOG_MAX_REGIONS_IN_ITEM);
-               item->ri_buf = kmem_zalloc((item->ri_total *
-                                           sizeof(xfs_log_iovec_t)), KM_SLEEP);
+               if (in_f->ilf_size == 0 ||
+                   in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
+                       xlog_warn(
+       "XFS: bad number of regions (%d) in inode log format",
+                                 in_f->ilf_size);
+                       ASSERT(0);
+                       return XFS_ERROR(EIO);
+               }
+
+               item->ri_total = in_f->ilf_size;
+               item->ri_buf =
+                       kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
+                                   KM_SLEEP);
        }
        ASSERT(item->ri_total > item->ri_cnt);
        /* Description region is ri_buf[0] */
index a62720a7edc0165b7b698a5d7a2d14841a78f27f..ab0b85cf21f38df08b0eb3a3b752b42e2453d8fa 100644 (file)
@@ -144,6 +144,7 @@ void __iomem *acpi_os_map_memory(acpi_physical_address where,
                                acpi_size length);
 
 void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
+void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size);
 
 #ifdef ACPI_FUTURE_USAGE
 acpi_status
index c8e8cf45830f6c5203f5d7e13f1531ff949d34ce..cc40102fe2f3568f262a23862da91010eddde864 100644 (file)
@@ -130,6 +130,10 @@ acpi_get_table_header(acpi_string signature,
                      struct acpi_table_header *out_table_header);
 
 acpi_status
+acpi_get_table_with_size(acpi_string signature,
+              u32 instance, struct acpi_table_header **out_table,
+              acpi_size *tbl_size);
+acpi_status
 acpi_get_table(acpi_string signature,
               u32 instance, struct acpi_table_header **out_table);
 
index afb3396ba5ede7c8ef97abaeea48ad0421efdab7..f305834b4799123df5d3e1efeb499ef13336ef8a 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _ASM_SWAB_H
 #define _ASM_SWAB_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
 #  define __SWAB_64_THRU_32__
index b0e63c672ebdf5a065ddea3374558a9638bc43c1..00f45ff081a63fc9f4401d2656346e1e5444f469 100644 (file)
@@ -80,4 +80,56 @@ extern void setup_per_cpu_areas(void);
 #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
                                        __typeof__(type) per_cpu_var(name)
 
+/*
+ * Optional methods for optimized non-lvalue per-cpu variable access.
+ *
+ * @var can be a percpu variable or a field of it and its size should
+ * equal char, int or long.  percpu_read() evaluates to a lvalue and
+ * all others to void.
+ *
+ * These operations are guaranteed to be atomic w.r.t. preemption.
+ * The generic versions use plain get/put_cpu_var().  Archs are
+ * encouraged to implement single-instruction alternatives which don't
+ * require preemption protection.
+ */
+#ifndef percpu_read
+# define percpu_read(var)                                              \
+  ({                                                                   \
+       typeof(per_cpu_var(var)) __tmp_var__;                           \
+       __tmp_var__ = get_cpu_var(var);                                 \
+       put_cpu_var(var);                                               \
+       __tmp_var__;                                                    \
+  })
+#endif
+
+#define __percpu_generic_to_op(var, val, op)                           \
+do {                                                                   \
+       get_cpu_var(var) op val;                                        \
+       put_cpu_var(var);                                               \
+} while (0)
+
+#ifndef percpu_write
+# define percpu_write(var, val)                __percpu_generic_to_op(var, (val), =)
+#endif
+
+#ifndef percpu_add
+# define percpu_add(var, val)          __percpu_generic_to_op(var, (val), +=)
+#endif
+
+#ifndef percpu_sub
+# define percpu_sub(var, val)          __percpu_generic_to_op(var, (val), -=)
+#endif
+
+#ifndef percpu_and
+# define percpu_and(var, val)          __percpu_generic_to_op(var, (val), &=)
+#endif
+
+#ifndef percpu_or
+# define percpu_or(var, val)           __percpu_generic_to_op(var, (val), |=)
+#endif
+
+#ifndef percpu_xor
+# define percpu_xor(var, val)          __percpu_generic_to_op(var, (val), ^=)
+#endif
+
 #endif /* _ASM_GENERIC_PERCPU_H_ */
index 79a7ff925bf8ea24f0d2fa83a02115113d772a34..4ce48e878530d9141bb9429cdfbe11a67f139f4c 100644 (file)
@@ -9,7 +9,7 @@ extern char __bss_start[], __bss_stop[];
 extern char __init_begin[], __init_end[];
 extern char _sinittext[], _einittext[];
 extern char _end[];
-extern char __per_cpu_start[], __per_cpu_end[];
+extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[];
 extern char __kprobes_text_start[], __kprobes_text_end[];
 extern char __initdata_begin[], __initdata_end[];
 extern char __start_rodata[], __end_rodata[];
index c61fab1dd2f82cbde0df850274a595d4c79aa450..5406e70aba864d1d03f154e36b2f212baacb93c4 100644 (file)
        *(.initcall7.init)                                              \
        *(.initcall7s.init)
 
+/**
+ * PERCPU_VADDR - define output section for percpu area
+ * @vaddr: explicit base address (optional)
+ * @phdr: destination PHDR (optional)
+ *
+ * Macro which expands to output section for percpu area.  If @vaddr
+ * is not blank, it specifies explicit base address and all percpu
+ * symbols will be offset from the given address.  If blank, @vaddr
+ * always equals @laddr + LOAD_OFFSET.
+ *
+ * @phdr defines the output PHDR to use if not blank.  Be warned that
+ * output PHDR is sticky.  If @phdr is specified, the next output
+ * section in the linker script will go there too.  @phdr should have
+ * a leading colon.
+ *
+ * Note that this macros defines __per_cpu_load as an absolute symbol.
+ * If there is no need to put the percpu section at a predetermined
+ * address, use PERCPU().
+ */
+#define PERCPU_VADDR(vaddr, phdr)                                      \
+       VMLINUX_SYMBOL(__per_cpu_load) = .;                             \
+       .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)          \
+                               - LOAD_OFFSET) {                        \
+               VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
+               *(.data.percpu.first)                                   \
+               *(.data.percpu.page_aligned)                            \
+               *(.data.percpu)                                         \
+               *(.data.percpu.shared_aligned)                          \
+               VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
+       } phdr                                                          \
+       . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
+
+/**
+ * PERCPU - define output section for percpu area, simple version
+ * @align: required alignment
+ *
+ * Align to @align and outputs output section for percpu area.  This
+ * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
+ * __per_cpu_start will be identical.
+ *
+ * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
+ * that __per_cpu_load is defined as a relative symbol against
+ * .data.percpu which is required for relocatable x86_32
+ * configuration.
+ */
 #define PERCPU(align)                                                  \
        . = ALIGN(align);                                               \
-       VMLINUX_SYMBOL(__per_cpu_start) = .;                            \
-       .data.percpu  : AT(ADDR(.data.percpu) - LOAD_OFFSET) {          \
+       .data.percpu    : AT(ADDR(.data.percpu) - LOAD_OFFSET) {        \
+               VMLINUX_SYMBOL(__per_cpu_load) = .;                     \
+               VMLINUX_SYMBOL(__per_cpu_start) = .;                    \
+               *(.data.percpu.first)                                   \
                *(.data.percpu.page_aligned)                            \
                *(.data.percpu)                                         \
                *(.data.percpu.shared_aligned)                          \
-       }                                                               \
-       VMLINUX_SYMBOL(__per_cpu_end) = .;
+               VMLINUX_SYMBOL(__per_cpu_end) = .;                      \
+       }
index 97973e10182528fd2f6bcf0b4976814b55d56cbe..54dab001d6d12d86766fe75e4eed06158a823a50 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _ASM_M32R_SWAB_H
 #define _ASM_M32R_SWAB_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 #if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
 #  define __SWAB_64_THRU_32__
index 4504d1b4b4776e9a104ea349362806c5fbaef436..bd818a820ca8f2a3d8d4800f2828c9f2e76ff17b 100644 (file)
@@ -11,7 +11,7 @@
 #ifndef _ASM_SWAB_H
 #define _ASM_SWAB_H
 
-#include <asm/types.h>
+#include <linux/types.h>
 
 #ifdef __GNUC__
 
index 6fce2fc2d124bc184da9f10f40e31ed97254a7f5..78199151c00b2b1a996f2723f5496159da66a6fb 100644 (file)
@@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table);
 typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
 
 char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
+void __acpi_unmap_table(char *map, unsigned long size);
 int early_acpi_boot_init(void);
 int acpi_boot_init (void);
 int acpi_boot_table_init (void);
index 95837bfb52561dded74d03982bb252bdae4776f5..455d83219fae5f3e40bdf83575da34ea9c864da7 100644 (file)
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size);
 #define BOOTMEM_DEFAULT                0
 #define BOOTMEM_EXCLUSIVE      (1<<0)
 
+extern int reserve_bootmem(unsigned long addr,
+                          unsigned long size,
+                          int flags);
 extern int reserve_bootmem_node(pg_data_t *pgdat,
-                                unsigned long physaddr,
-                                unsigned long size,
-                                int flags);
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
-extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
-#endif
+                               unsigned long physaddr,
+                               unsigned long size,
+                               int flags);
 
-extern void *__alloc_bootmem_nopanic(unsigned long size,
+extern void *__alloc_bootmem(unsigned long size,
                             unsigned long align,
                             unsigned long goal);
-extern void *__alloc_bootmem(unsigned long size,
+extern void *__alloc_bootmem_nopanic(unsigned long size,
                                     unsigned long align,
                                     unsigned long goal);
-extern void *__alloc_bootmem_low(unsigned long size,
-                                unsigned long align,
-                                unsigned long goal);
 extern void *__alloc_bootmem_node(pg_data_t *pgdat,
                                  unsigned long size,
                                  unsigned long align,
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
                                  unsigned long size,
                                  unsigned long align,
                                  unsigned long goal);
+extern void *__alloc_bootmem_low(unsigned long size,
+                                unsigned long align,
+                                unsigned long goal);
 extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
                                      unsigned long size,
                                      unsigned long align,
                                      unsigned long goal);
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
+
 #define alloc_bootmem(x) \
        __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_nopanic(x) \
        __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low(x) \
-       __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
 #define alloc_bootmem_pages(x) \
        __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_pages_nopanic(x) \
        __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
-#define alloc_bootmem_low_pages(x) \
-       __alloc_bootmem_low(x, PAGE_SIZE, 0)
 #define alloc_bootmem_node(pgdat, x) \
        __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_pages_node(pgdat, x) \
        __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
+       __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
+
+#define alloc_bootmem_low(x) \
+       __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
+#define alloc_bootmem_low_pages(x) \
+       __alloc_bootmem_low(x, PAGE_SIZE, 0)
 #define alloc_bootmem_low_pages_node(pgdat, x) \
        __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
-#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
 
 extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
                                   int flags);
index 07ae8f846055a5b9004d30fec723ffd18b4d3bc0..5b5d4731f95654f45da356d5c631bb7758bd3395 100644 (file)
@@ -6,6 +6,7 @@
 #define CODA_PSDEV_MAJOR 67
 #define MAX_CODADEVS  5           /* how many do we allow */
 
+#ifdef __KERNEL__
 struct kstatfs;
 
 /* communication pending/processing queues */
@@ -24,7 +25,6 @@ static inline struct venus_comm *coda_vcp(struct super_block *sb)
        return (struct venus_comm *)((sb)->s_fs_info);
 }
 
-
 /* upcalls */
 int venus_rootfid(struct super_block *sb, struct CodaFid *fidp);
 int venus_getattr(struct super_block *sb, struct CodaFid *fid,
@@ -64,6 +64,12 @@ int coda_downcall(int opcode, union outputArgs *out, struct super_block *sb);
 int venus_fsync(struct super_block *sb, struct CodaFid *fid);
 int venus_statfs(struct dentry *dentry, struct kstatfs *sfs);
 
+/*
+ * Statistics
+ */
+
+extern struct venus_comm coda_comms[];
+#endif /* __KERNEL__ */
 
 /* messages between coda filesystem in kernel and Venus */
 struct upc_req {
@@ -82,11 +88,4 @@ struct upc_req {
 #define REQ_WRITE  0x4
 #define REQ_ABORT  0x8
 
-
-/*
- * Statistics
- */
-
-extern struct venus_comm coda_comms[];
-
 #endif
diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h
new file mode 100644 (file)
index 0000000..1152721
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef DECOMPRESS_BUNZIP2_H
+#define DECOMPRESS_BUNZIP2_H
+
+int bunzip2(unsigned char *inbuf, int len,
+           int(*fill)(void*, unsigned int),
+           int(*flush)(void*, unsigned int),
+           unsigned char *output,
+           int *pos,
+           void(*error)(char *x));
+#endif
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
new file mode 100644 (file)
index 0000000..6dfb856
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef DECOMPRESS_GENERIC_H
+#define DECOMPRESS_GENERIC_H
+
+/* Minimal chunksize to be read.
+ *Bzip2 prefers at least 4096
+ *Lzma prefers 0x10000 */
+#define COMPR_IOBUF_SIZE       4096
+
+typedef int (*decompress_fn) (unsigned char *inbuf, int len,
+                             int(*fill)(void*, unsigned int),
+                             int(*writebb)(void*, unsigned int),
+                             unsigned char *output,
+                             int *posp,
+                             void(*error)(char *x));
+
+/* inbuf   - input buffer
+ *len     - len of pre-read data in inbuf
+ *fill    - function to fill inbuf if empty
+ *writebb - function to write out outbug
+ *posp    - if non-null, input position (number of bytes read) will be
+ *       returned here
+ *
+ *If len != 0, the inbuf is initialized (with as much data), and fill
+ *should not be called
+ *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
+ *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
+ */
+
+/* Utility routine to detect the decompression method */
+decompress_fn decompress_method(const unsigned char *inbuf, int len,
+                               const char **name);
+
+#endif
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
new file mode 100644 (file)
index 0000000..f9b06cc
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef INFLATE_H
+#define INFLATE_H
+
+/* Other housekeeping constants */
+#define INBUFSIZ 4096
+
+int gunzip(unsigned char *inbuf, int len,
+          int(*fill)(void*, unsigned int),
+          int(*flush)(void*, unsigned int),
+          unsigned char *output,
+          int *pos,
+          void(*error_fn)(char *x));
+#endif
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
new file mode 100644 (file)
index 0000000..12ff8c3
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * linux/compr_mm.h
+ *
+ * Memory management for pre-boot and ramdisk uncompressors
+ *
+ * Authors: Alain Knaff <alain@knaff.lu>
+ *
+ */
+
+#ifndef DECOMPR_MM_H
+#define DECOMPR_MM_H
+
+#ifdef STATIC
+
+/* Code active when included from pre-boot environment: */
+
+/* A trivial malloc implementation, adapted from
+ *  malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
+ */
+static unsigned long malloc_ptr;
+static int malloc_count;
+
+static void *malloc(int size)
+{
+       void *p;
+
+       if (size < 0)
+               error("Malloc error");
+       if (!malloc_ptr)
+               malloc_ptr = free_mem_ptr;
+
+       malloc_ptr = (malloc_ptr + 3) & ~3;     /* Align */
+
+       p = (void *)malloc_ptr;
+       malloc_ptr += size;
+
+       if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
+               error("Out of memory");
+
+       malloc_count++;
+       return p;
+}
+
+static void free(void *where)
+{
+       malloc_count--;
+       if (!malloc_count)
+               malloc_ptr = free_mem_ptr;
+}
+
+#define large_malloc(a) malloc(a)
+#define large_free(a) free(a)
+
+#define set_error_fn(x)
+
+#define INIT
+
+#else /* STATIC */
+
+/* Code active when compiled standalone for use when loading ramdisk: */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+/* Use defines rather than static inline in order to avoid spurious
+ * warnings when not needed (indeed large_malloc / large_free are not
+ * needed by inflate */
+
+#define malloc(a) kmalloc(a, GFP_KERNEL)
+#define free(a) kfree(a)
+
+#define large_malloc(a) vmalloc(a)
+#define large_free(a) vfree(a)
+
+static void(*error)(char *m);
+#define set_error_fn(x) error = x;
+
+#define INIT __init
+#define STATIC
+
+#include <linux/init.h>
+
+#endif /* STATIC */
+
+#endif /* DECOMPR_MM_H */
diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h
new file mode 100644 (file)
index 0000000..7796538
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef DECOMPRESS_UNLZMA_H
+#define DECOMPRESS_UNLZMA_H
+
+int unlzma(unsigned char *, int,
+          int(*fill)(void*, unsigned int),
+          int(*flush)(void*, unsigned int),
+          unsigned char *output,
+          int *posp,
+          void(*error)(char *x)
+       );
+
+#endif
index 5ca54d77079f1dd14b713d54e5f4566f01da59dd..7605c5e9589f4cc9d08c4aca7ccebd3f707fb710 100644 (file)
@@ -111,6 +111,15 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
 #endif
 }
 
+static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs)
+{
+#ifdef ELF_CORE_COPY_KERNEL_REGS
+       ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs);
+#else
+       elf_core_copy_regs(elfregs, regs);
+#endif
+}
+
 static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
 {
 #ifdef ELF_CORE_COPY_TASK_REGS
index bc492048c349fd5ee21e3cc7432f4e68674eb07a..718bf21c57544e177cb7111a6c75e55429bad661 100644 (file)
@@ -44,11 +44,11 @@ struct in6_addr
  * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined
  * in network byte order, not in host byte order as are the IPv4 equivalents
  */
+#ifdef __KERNEL__
 extern const struct in6_addr in6addr_any;
 #define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } }
 extern const struct in6_addr in6addr_loopback;
 #define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
-#ifdef __KERNEL__
 extern const struct in6_addr in6addr_linklocal_allnodes;
 #define IN6ADDR_LINKLOCAL_ALLNODES_INIT        \
                { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } }
index 9127f6b51a39a0fac962030a1cedde2cdadd13b7..472f11765f608318093ed82919f5c7fdf4f00a7d 100644 (file)
@@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v);
 struct irq_desc;
 
 extern int early_irq_init(void);
+extern int arch_probe_nr_irqs(void);
 extern int arch_early_irq_init(void);
 extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
 
index f899b502f18622dce2c509aac5c656ef8150216f..27a67536511ef45907a4108c46983fa1977acb44 100644 (file)
@@ -182,11 +182,11 @@ struct irq_desc {
        unsigned int            irqs_unhandled;
        spinlock_t              lock;
 #ifdef CONFIG_SMP
-       cpumask_t               affinity;
+       cpumask_var_t           affinity;
        unsigned int            cpu;
-#endif
 #ifdef CONFIG_GENERIC_PENDING_IRQ
-       cpumask_t               pending_mask;
+       cpumask_var_t           pending_mask;
+#endif
 #endif
 #ifdef CONFIG_PROC_FS
        struct proc_dir_entry   *dir;
@@ -422,4 +422,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
 
 #endif /* !CONFIG_S390 */
 
+#ifdef CONFIG_SMP
+/**
+ * init_alloc_desc_masks - allocate cpumasks for irq_desc
+ * @desc:      pointer to irq_desc struct
+ * @cpu:       cpu which will be handling the cpumasks
+ * @boot:      true if need bootmem
+ *
+ * Allocates affinity and pending_mask cpumask if required.
+ * Returns true if successful (or not required).
+ * Side effect: affinity has all bits set, pending_mask has all bits clear.
+ */
+static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+                                                               bool boot)
+{
+       int node;
+
+       if (boot) {
+               alloc_bootmem_cpumask_var(&desc->affinity);
+               cpumask_setall(desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+               alloc_bootmem_cpumask_var(&desc->pending_mask);
+               cpumask_clear(desc->pending_mask);
+#endif
+               return true;
+       }
+
+       node = cpu_to_node(cpu);
+
+       if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
+               return false;
+       cpumask_setall(desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
+               free_cpumask_var(desc->affinity);
+               return false;
+       }
+       cpumask_clear(desc->pending_mask);
+#endif
+       return true;
+}
+
+/**
+ * init_copy_desc_masks - copy cpumasks for irq_desc
+ * @old_desc:  pointer to old irq_desc struct
+ * @new_desc:  pointer to new irq_desc struct
+ *
+ * Insures affinity and pending_masks are copied to new irq_desc.
+ * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
+ * irq_desc struct so the copy is redundant.
+ */
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+                                       struct irq_desc *new_desc)
+{
+#ifdef CONFIG_CPUMASKS_OFFSTACK
+       cpumask_copy(new_desc->affinity, old_desc->affinity);
+
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
+#endif
+#endif
+}
+
+#else /* !CONFIG_SMP */
+
+static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+                                                               bool boot)
+{
+       return true;
+}
+
+static inline void init_copy_desc_masks(struct irq_desc *old_desc,
+                                       struct irq_desc *new_desc)
+{
+}
+
+#endif /* CONFIG_SMP */
+
 #endif /* _LINUX_IRQ_H */
index 86af92e9e84c0e3115845bcd3abbc125d28b5088..887477bc2ab0841287460aef51517beb2377ea58 100644 (file)
@@ -20,6 +20,7 @@
 
 # define for_each_irq_desc_reverse(irq, desc)                          \
        for (irq = nr_irqs - 1; irq >= 0; irq--)
+
 #else /* CONFIG_GENERIC_HARDIRQS */
 
 extern int nr_irqs;
index 32851eef48f058908f2e30312007692540cedd2e..2ec6cc14a1146e584ee7da0f99e9c1d67349ce61 100644 (file)
@@ -182,6 +182,14 @@ struct kprobe_blackpoint {
 DECLARE_PER_CPU(struct kprobe *, current_kprobe);
 DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
 
+/*
+ * For #ifdef avoidance:
+ */
+static inline int kprobes_built_in(void)
+{
+       return 1;
+}
+
 #ifdef CONFIG_KRETPROBES
 extern void arch_prepare_kretprobe(struct kretprobe_instance *ri,
                                   struct pt_regs *regs);
@@ -271,8 +279,16 @@ void unregister_kretprobes(struct kretprobe **rps, int num);
 void kprobe_flush_task(struct task_struct *tk);
 void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
 
-#else /* CONFIG_KPROBES */
+#else /* !CONFIG_KPROBES: */
 
+static inline int kprobes_built_in(void)
+{
+       return 0;
+}
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+       return 0;
+}
 static inline struct kprobe *get_kprobe(void *addr)
 {
        return NULL;
@@ -329,5 +345,5 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num)
 static inline void kprobe_flush_task(struct task_struct *tk)
 {
 }
-#endif                         /* CONFIG_KPROBES */
-#endif                         /* _LINUX_KPROBES_H */
+#endif /* CONFIG_KPROBES */
+#endif /* _LINUX_KPROBES_H */
index 0b4df7eba85242d0897d1b3312c6967955be6a0a..5b4e28bcb788de92ffc3993c39e912f675a11c7f 100644 (file)
@@ -49,4 +49,5 @@
 #define FUTEXFS_SUPER_MAGIC    0xBAD1DEA
 #define INOTIFYFS_SUPER_MAGIC  0x2BAD1DEA
 
+#define STACK_END_MAGIC                0x57AC6E9D
 #endif /* __LINUX_MAGIC_H__ */
index 92915e81443ff028afc2ac8a188a7bdbf94721ee..d84feb7bdbf00b8fa054364c7fa2d80d2a20b2a6 100644 (file)
@@ -276,4 +276,7 @@ struct mm_struct {
 #endif
 };
 
+/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
+#define mm_cpumask(mm) (&(mm)->cpu_vm_mask)
+
 #endif /* _LINUX_MM_TYPES_H */
index 139d7c88d9c91ddebe065b1898916f17d2dc27f1..3d1b7bde128361d0905bfbb6b28375c91d6465c3 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef MMIOTRACE_H
-#define MMIOTRACE_H
+#ifndef _LINUX_MMIOTRACE_H
+#define _LINUX_MMIOTRACE_H
 
 #include <linux/types.h>
 #include <linux/list.h>
@@ -13,28 +13,34 @@ typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
                                unsigned long condition, struct pt_regs *);
 
 struct kmmio_probe {
-       struct list_head list; /* kmmio internal list */
-       unsigned long addr; /* start location of the probe point */
-       unsigned long len; /* length of the probe region */
-       kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */
-       kmmio_post_handler_t post_handler; /* Called after addr is executed */
-       void *private;
+       /* kmmio internal list: */
+       struct list_head        list;
+       /* start location of the probe point: */
+       unsigned long           addr;
+       /* length of the probe region: */
+       unsigned long           len;
+       /* Called before addr is executed: */
+       kmmio_pre_handler_t     pre_handler;
+       /* Called after addr is executed: */
+       kmmio_post_handler_t    post_handler;
+       void                    *private;
 };
 
+extern unsigned int kmmio_count;
+
+extern int register_kmmio_probe(struct kmmio_probe *p);
+extern void unregister_kmmio_probe(struct kmmio_probe *p);
+
+#ifdef CONFIG_MMIOTRACE
 /* kmmio is active by some kmmio_probes? */
 static inline int is_kmmio_active(void)
 {
-       extern unsigned int kmmio_count;
        return kmmio_count;
 }
 
-extern int register_kmmio_probe(struct kmmio_probe *p);
-extern void unregister_kmmio_probe(struct kmmio_probe *p);
-
 /* Called from page fault handler. */
 extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
 
-#ifdef CONFIG_MMIOTRACE
 /* Called from ioremap.c */
 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
                                                        void __iomem *addr);
@@ -43,7 +49,17 @@ extern void mmiotrace_iounmap(volatile void __iomem *addr);
 /* For anyone to insert markers. Remember trailing newline. */
 extern int mmiotrace_printk(const char *fmt, ...)
                                __attribute__ ((format (printf, 1, 2)));
-#else
+#else /* !CONFIG_MMIOTRACE: */
+static inline int is_kmmio_active(void)
+{
+       return 0;
+}
+
+static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+{
+       return 0;
+}
+
 static inline void mmiotrace_ioremap(resource_size_t offset,
                                        unsigned long size, void __iomem *addr)
 {
@@ -63,28 +79,28 @@ static inline int mmiotrace_printk(const char *fmt, ...)
 #endif /* CONFIG_MMIOTRACE */
 
 enum mm_io_opcode {
-       MMIO_READ = 0x1,     /* struct mmiotrace_rw */
-       MMIO_WRITE = 0x2,    /* struct mmiotrace_rw */
-       MMIO_PROBE = 0x3,    /* struct mmiotrace_map */
-       MMIO_UNPROBE = 0x4,  /* struct mmiotrace_map */
-       MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */
+       MMIO_READ       = 0x1,  /* struct mmiotrace_rw */
+       MMIO_WRITE      = 0x2,  /* struct mmiotrace_rw */
+       MMIO_PROBE      = 0x3,  /* struct mmiotrace_map */
+       MMIO_UNPROBE    = 0x4,  /* struct mmiotrace_map */
+       MMIO_UNKNOWN_OP = 0x5,  /* struct mmiotrace_rw */
 };
 
 struct mmiotrace_rw {
-       resource_size_t phys;   /* PCI address of register */
-       unsigned long value;
-       unsigned long pc;       /* optional program counter */
-       int map_id;
-       unsigned char opcode;   /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
-       unsigned char width;    /* size of register access in bytes */
+       resource_size_t phys;   /* PCI address of register */
+       unsigned long   value;
+       unsigned long   pc;     /* optional program counter */
+       int             map_id;
+       unsigned char   opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
+       unsigned char   width;  /* size of register access in bytes */
 };
 
 struct mmiotrace_map {
-       resource_size_t phys;   /* base address in PCI space */
-       unsigned long virt;     /* base virtual address */
-       unsigned long len;      /* mapping size */
-       int map_id;
-       unsigned char opcode;   /* MMIO_PROBE or MMIO_UNPROBE */
+       resource_size_t phys;   /* base address in PCI space */
+       unsigned long   virt;   /* base virtual address */
+       unsigned long   len;    /* mapping size */
+       int             map_id;
+       unsigned char   opcode; /* MMIO_PROBE or MMIO_UNPROBE */
 };
 
 /* in kernel/trace/trace_mmiotrace.c */
@@ -94,4 +110,4 @@ extern void mmio_trace_rw(struct mmiotrace_rw *rw);
 extern void mmio_trace_mapping(struct mmiotrace_map *map);
 extern int mmio_trace_printk(const char *fmt, va_list args);
 
-#endif /* MMIOTRACE_H */
+#endif /* _LINUX_MMIOTRACE_H */
index 7382af374731eca162a79dff5e9156126c2b7f94..e137b3c486a7799f9f39b470f827db2ab00dc819 100644 (file)
@@ -237,6 +237,7 @@ struct nubus_dirent
        int mask;
 };
 
+#ifdef __KERNEL__
 struct nubus_board {
        struct nubus_board* next;
        struct nubus_dev* first_dev;
@@ -351,6 +352,7 @@ void nubus_get_rsrc_mem(void* dest,
 void nubus_get_rsrc_str(void* dest,
                        const struct nubus_dirent *dirent,
                        int maxlen);
+#endif /* __KERNEL__ */
 
 /* We'd like to get rid of this eventually.  Only daynaport.c uses it now. */
 static inline void *nubus_slot_addr(int slot)
index 9f2a3751873a8d41e8d438b028c432c0a12b2781..ee5615d6521105b1ec527a0e7f12b8793cbd4de8 100644 (file)
@@ -5,53 +5,66 @@
 #include <linux/slab.h> /* For kmalloc() */
 #include <linux/smp.h>
 #include <linux/cpumask.h>
+#include <linux/pfn.h>
 
 #include <asm/percpu.h>
 
+#ifndef PER_CPU_BASE_SECTION
+#ifdef CONFIG_SMP
+#define PER_CPU_BASE_SECTION ".data.percpu"
+#else
+#define PER_CPU_BASE_SECTION ".data"
+#endif
+#endif
+
 #ifdef CONFIG_SMP
-#define DEFINE_PER_CPU(type, name)                                     \
-       __attribute__((__section__(".data.percpu")))                    \
-       PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
 
 #ifdef MODULE
-#define SHARED_ALIGNED_SECTION ".data.percpu"
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
 #else
-#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned"
+#define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned"
 #endif
+#define PER_CPU_FIRST_SECTION ".first"
 
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                      \
-       __attribute__((__section__(SHARED_ALIGNED_SECTION)))            \
-       PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name             \
-       ____cacheline_aligned_in_smp
+#else
+
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_FIRST_SECTION ""
 
-#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                        \
-       __attribute__((__section__(".data.percpu.page_aligned")))       \
+#endif
+
+#define DEFINE_PER_CPU_SECTION(type, name, section)                    \
+       __attribute__((__section__(PER_CPU_BASE_SECTION section)))      \
        PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
-#else
+
 #define DEFINE_PER_CPU(type, name)                                     \
-       PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+       DEFINE_PER_CPU_SECTION(type, name, "")
 
-#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                    \
-       DEFINE_PER_CPU(type, name)
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name)                      \
+       DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
+       ____cacheline_aligned_in_smp
 
-#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                      \
-       DEFINE_PER_CPU(type, name)
-#endif
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name)                                \
+       DEFINE_PER_CPU_SECTION(type, name, ".page_aligned")
+
+#define DEFINE_PER_CPU_FIRST(type, name)                               \
+       DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION)
 
 #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
 #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
 
-/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
-#ifndef PERCPU_ENOUGH_ROOM
+/* enough to cover all DEFINE_PER_CPUs in modules */
 #ifdef CONFIG_MODULES
-#define PERCPU_MODULE_RESERVE  8192
+#define PERCPU_MODULE_RESERVE          (8 << 10)
 #else
-#define PERCPU_MODULE_RESERVE  0
+#define PERCPU_MODULE_RESERVE          0
 #endif
 
+#ifndef PERCPU_ENOUGH_ROOM
 #define PERCPU_ENOUGH_ROOM                                             \
-       (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
-#endif /* PERCPU_ENOUGH_ROOM */
+       (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +      \
+        PERCPU_MODULE_RESERVE)
+#endif
 
 /*
  * Must be an lvalue. Since @var must be a simple identifier,
 
 #ifdef CONFIG_SMP
 
+#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+
+/* minimum unit size, also is the maximum supported allocation size */
+#define PCPU_MIN_UNIT_SIZE             PFN_ALIGN(64 << 10)
+
+/*
+ * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
+ * back on the first chunk for dynamic percpu allocation if arch is
+ * manually allocating and mapping it for faster access (as a part of
+ * large page mapping for example).
+ *
+ * The following values give between one and two pages of free space
+ * after typical minimal boot (2-way SMP, single disk and NIC) with
+ * both defconfig and a distro config on x86_64 and 32.  More
+ * intelligent way to determine this would be nice.
+ */
+#if BITS_PER_LONG > 32
+#define PERCPU_DYNAMIC_RESERVE         (20 << 10)
+#else
+#define PERCPU_DYNAMIC_RESERVE         (12 << 10)
+#endif
+
+extern void *pcpu_base_addr;
+
+typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
+typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
+
+extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
+                               size_t static_size, size_t reserved_size,
+                               ssize_t dyn_size, ssize_t unit_size,
+                               void *base_addr,
+                               pcpu_populate_pte_fn_t populate_pte_fn);
+
+extern ssize_t __init pcpu_embed_first_chunk(
+                               size_t static_size, size_t reserved_size,
+                               ssize_t dyn_size, ssize_t unit_size);
+
+/*
+ * Use this to get to a cpu's version of the per-cpu object
+ * dynamically allocated. Non-atomic access to the current CPU's
+ * version should probably be combined with get_cpu()/put_cpu().
+ */
+#define per_cpu_ptr(ptr, cpu)  SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
+
+extern void *__alloc_reserved_percpu(size_t size, size_t align);
+
+#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+
 struct percpu_data {
        void *ptrs[1];
 };
 
 #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
-/* 
- * Use this to get to a cpu's version of the per-cpu object dynamically
- * allocated. Non-atomic access to the current CPU's version should
- * probably be combined with get_cpu()/put_cpu().
- */ 
-#define percpu_ptr(ptr, cpu)                              \
-({                                                        \
-        struct percpu_data *__p = __percpu_disguise(ptr); \
-        (__typeof__(ptr))__p->ptrs[(cpu)];               \
+
+#define per_cpu_ptr(ptr, cpu)                                          \
+({                                                                     \
+        struct percpu_data *__p = __percpu_disguise(ptr);              \
+        (__typeof__(ptr))__p->ptrs[(cpu)];                             \
 })
 
-extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
-extern void percpu_free(void *__pdata);
+#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+
+extern void *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(void *__pdata);
 
 #else /* CONFIG_SMP */
 
-#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
+#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
 
-static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+static inline void *__alloc_percpu(size_t size, size_t align)
 {
-       return kzalloc(size, gfp);
+       /*
+        * Can't easily make larger alignment work with kmalloc.  WARN
+        * on it.  Larger alignment should only be used for module
+        * percpu sections on SMP for which this path isn't used.
+        */
+       WARN_ON_ONCE(align > SMP_CACHE_BYTES);
+       return kzalloc(size, GFP_KERNEL);
 }
 
-static inline void percpu_free(void *__pdata)
+static inline void free_percpu(void *p)
 {
-       kfree(__pdata);
+       kfree(p);
 }
 
 #endif /* CONFIG_SMP */
 
-#define percpu_alloc_mask(size, gfp, mask) \
-       __percpu_alloc_mask((size), (gfp), &(mask))
-
-#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
-
-/* (legacy) interface for use without CPU hotplug handling */
-
-#define __alloc_percpu(size)   percpu_alloc_mask((size), GFP_KERNEL, \
-                                                 cpu_possible_map)
-#define alloc_percpu(type)     (type *)__alloc_percpu(sizeof(type))
-#define free_percpu(ptr)       percpu_free((ptr))
-#define per_cpu_ptr(ptr, cpu)  percpu_ptr((ptr), (cpu))
+#define alloc_percpu(type)     (type *)__alloc_percpu(sizeof(type), \
+                                                      __alignof__(type))
 
 #endif /* __LINUX_PERCPU_H */
index bc5114d35e99b65db4e11acefe3bae623a7c9db9..e356c99f06590c0d48e1aa687f5c9465813608bb 100644 (file)
@@ -28,8 +28,6 @@
 #include <linux/reiserfs_fs_sb.h>
 #endif
 
-struct fid;
-
 /*
  *  include/linux/reiser_fs.h
  *
@@ -37,6 +35,33 @@ struct fid;
  *
  */
 
+/* ioctl's command */
+#define REISERFS_IOC_UNPACK            _IOW(0xCD,1,long)
+/* define following flags to be the same as in ext2, so that chattr(1),
+   lsattr(1) will work with us. */
+#define REISERFS_IOC_GETFLAGS          FS_IOC_GETFLAGS
+#define REISERFS_IOC_SETFLAGS          FS_IOC_SETFLAGS
+#define REISERFS_IOC_GETVERSION                FS_IOC_GETVERSION
+#define REISERFS_IOC_SETVERSION                FS_IOC_SETVERSION
+
+#ifdef __KERNEL__
+/* the 32 bit compat definitions with int argument */
+#define REISERFS_IOC32_UNPACK          _IOW(0xCD, 1, int)
+#define REISERFS_IOC32_GETFLAGS                FS_IOC32_GETFLAGS
+#define REISERFS_IOC32_SETFLAGS                FS_IOC32_SETFLAGS
+#define REISERFS_IOC32_GETVERSION      FS_IOC32_GETVERSION
+#define REISERFS_IOC32_SETVERSION      FS_IOC32_SETVERSION
+
+/* Locking primitives */
+/* Right now we are still falling back to (un)lock_kernel, but eventually that
+   would evolve into real per-fs locks */
+#define reiserfs_write_lock( sb ) lock_kernel()
+#define reiserfs_write_unlock( sb ) unlock_kernel()
+
+/* xattr stuff */
+#define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem)
+struct fid;
+
 /* in reading the #defines, it may help to understand that they employ
    the following abbreviations:
 
@@ -698,6 +723,7 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key)
 /* object identifier for root dir */
 #define REISERFS_ROOT_OBJECTID 2
 #define REISERFS_ROOT_PARENT_OBJECTID 1
+
 extern struct reiserfs_key root_key;
 
 /* 
@@ -1540,7 +1566,6 @@ struct reiserfs_iget_args {
 /*                    FUNCTION DECLARATIONS                                */
 /***************************************************************************/
 
-/*#ifdef __KERNEL__*/
 #define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12)
 
 #define journal_trans_half(blocksize) \
@@ -2178,29 +2203,6 @@ long reiserfs_compat_ioctl(struct file *filp,
                   unsigned int cmd, unsigned long arg);
 int reiserfs_unpack(struct inode *inode, struct file *filp);
 
-/* ioctl's command */
-#define REISERFS_IOC_UNPACK            _IOW(0xCD,1,long)
-/* define following flags to be the same as in ext2, so that chattr(1),
-   lsattr(1) will work with us. */
-#define REISERFS_IOC_GETFLAGS          FS_IOC_GETFLAGS
-#define REISERFS_IOC_SETFLAGS          FS_IOC_SETFLAGS
-#define REISERFS_IOC_GETVERSION                FS_IOC_GETVERSION
-#define REISERFS_IOC_SETVERSION                FS_IOC_SETVERSION
-
-/* the 32 bit compat definitions with int argument */
-#define REISERFS_IOC32_UNPACK          _IOW(0xCD, 1, int)
-#define REISERFS_IOC32_GETFLAGS                FS_IOC32_GETFLAGS
-#define REISERFS_IOC32_SETFLAGS                FS_IOC32_SETFLAGS
-#define REISERFS_IOC32_GETVERSION      FS_IOC32_GETVERSION
-#define REISERFS_IOC32_SETVERSION      FS_IOC32_SETVERSION
-
-/* Locking primitives */
-/* Right now we are still falling back to (un)lock_kernel, but eventually that
-   would evolve into real per-fs locks */
-#define reiserfs_write_lock( sb ) lock_kernel()
-#define reiserfs_write_unlock( sb ) unlock_kernel()
-
-/* xattr stuff */
-#define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem)
 
+#endif /* __KERNEL__ */
 #endif                         /* _LINUX_REISER_FS_H */
index 8c216e057c94230ea01c110b0f5bc2c28e8f9ee3..46d680643f894b62f80adc7195fd069e62d452e3 100644 (file)
@@ -1178,10 +1178,9 @@ struct task_struct {
        pid_t pid;
        pid_t tgid;
 
-#ifdef CONFIG_CC_STACKPROTECTOR
        /* Canary value for the -fstack-protector gcc feature */
        unsigned long stack_canary;
-#endif
+
        /* 
         * pointers to (original) parent process, youngest child, younger sibling,
         * older sibling, respectively.  (p->father can be replaced with 
@@ -1419,6 +1418,9 @@ struct task_struct {
 #endif
 };
 
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
+
 /*
  * Priority of a process goes from 0..MAX_PRIO-1, valid RT
  * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
@@ -2087,6 +2089,19 @@ static inline int object_is_on_stack(void *obj)
 
 extern void thread_info_cache_init(void);
 
+#ifdef CONFIG_DEBUG_STACK_USAGE
+static inline unsigned long stack_not_used(struct task_struct *p)
+{
+       unsigned long *n = end_of_stack(p);
+
+       do {    /* Skip over canary */
+               n++;
+       } while (!*n);
+
+       return (unsigned long)n - (unsigned long)end_of_stack(p);
+}
+#endif
+
 /* set thread flags in other task's structures
  * - see asm/thread_info.h for TIF_xxxx flags available
  */
index 715196b09d6714ff971ddd95c839225d81810666..bbacb7baa44618b973a280a3aeac3eab68084009 100644 (file)
@@ -176,6 +176,12 @@ static inline void init_call_single_data(void)
 #define put_cpu()              preempt_enable()
 #define put_cpu_no_resched()   preempt_enable_no_resched()
 
+/*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+ */
+extern void arch_disable_smp_support(void);
+
 void smp_setup_processor_id(void);
 
 #endif /* __LINUX_SMP_H */
index 20fc4bbfca42b4327c346fd1f491661838062e34..afc01909a42858b6389d7370fed45d94219df302 100644 (file)
@@ -24,10 +24,12 @@ struct __kernel_sockaddr_storage {
 #include <linux/types.h>               /* pid_t                        */
 #include <linux/compiler.h>            /* __user                       */
 
-#ifdef CONFIG_PROC_FS
+#ifdef __KERNEL__
+# ifdef CONFIG_PROC_FS
 struct seq_file;
 extern void socket_seq_show(struct seq_file *seq);
-#endif
+# endif
+#endif /* __KERNEL__ */
 
 typedef unsigned short sa_family_t;
 
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
new file mode 100644 (file)
index 0000000..6f3e54c
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _LINUX_STACKPROTECTOR_H
+#define _LINUX_STACKPROTECTOR_H 1
+
+#include <linux/compiler.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+# include <asm/stackprotector.h>
+#else
+static inline void boot_init_stack_canary(void)
+{
+}
+#endif
+
+#endif
index e632d29f054473c2ca34186185f1d7bb797ef931..a16b9e06f2e53e803af040390383d54436430d52 100644 (file)
@@ -193,5 +193,11 @@ int arch_update_cpu_topology(void);
 #ifndef topology_core_siblings
 #define topology_core_siblings(cpu)            cpumask_of_cpu(cpu)
 #endif
+#ifndef topology_thread_cpumask
+#define topology_thread_cpumask(cpu)           cpumask_of(cpu)
+#endif
+#ifndef topology_core_cpumask
+#define topology_core_cpumask(cpu)             cpumask_of(cpu)
+#endif
 
 #endif /* _LINUX_TOPOLOGY_H */
index 712ca53bc348847d7dd7e176f6b0dbabac93fa29..fca82ed55f4975e08eb7542a98d683dce1ea7fe7 100644 (file)
@@ -1,6 +1,9 @@
 #ifndef _LINUX_TYPES_H
 #define _LINUX_TYPES_H
 
+#include <asm/types.h>
+
+#ifndef __ASSEMBLY__
 #ifdef __KERNEL__
 
 #define DECLARE_BITMAP(name,bits) \
@@ -9,7 +12,6 @@
 #endif
 
 #include <linux/posix_types.h>
-#include <asm/types.h>
 
 #ifndef __KERNEL_STRICT_NAMES
 
@@ -212,5 +214,5 @@ struct ustat {
 };
 
 #endif /* __KERNEL__ */
-
+#endif /*  __ASSEMBLY__ */
 #endif /* _LINUX_TYPES_H */
index 9c0890c7a06a357dd62e12cde059ca86764f8efa..a43ebec3a7b92692f0cc970a075e2462b705e270 100644 (file)
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
 
 extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
                        struct page ***pages);
+extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
+                                   pgprot_t prot, struct page **pages);
+extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
 extern void unmap_kernel_range(unsigned long addr, unsigned long size);
 
 /* Allocate/destroy a 'vmalloc' VM area. */
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
  */
 extern rwlock_t vmlist_lock;
 extern struct vm_struct *vmlist;
+extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
 
 #endif /* _LINUX_VMALLOC_H */
index 6a5c5fed66c96e36fcaf03a9f3c8403be132a193..38396ec7ee369fbba39969b4f004f7a5f0c21779 100644 (file)
@@ -101,6 +101,66 @@ config LOCALVERSION_AUTO
 
          which is done within the script "scripts/setlocalversion".)
 
+config HAVE_KERNEL_GZIP
+       bool
+
+config HAVE_KERNEL_BZIP2
+       bool
+
+config HAVE_KERNEL_LZMA
+       bool
+
+choice
+       prompt "Kernel compression mode"
+       default KERNEL_GZIP
+       depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA
+       help
+         The linux kernel is a kind of self-extracting executable.
+         Several compression algorithms are available, which differ
+         in efficiency, compression and decompression speed.
+         Compression speed is only relevant when building a kernel.
+         Decompression speed is relevant at each boot.
+
+         If you have any problems with bzip2 or lzma compressed
+         kernels, mail me (Alain Knaff) <alain@knaff.lu>. (An older
+         version of this functionality (bzip2 only), for 2.4, was
+         supplied by Christian Ludwig)
+
+         High compression options are mostly useful for users, who
+         are low on disk space (embedded systems), but for whom ram
+         size matters less.
+
+         If in doubt, select 'gzip'
+
+config KERNEL_GZIP
+       bool "Gzip"
+       depends on HAVE_KERNEL_GZIP
+       help
+         The old and tried gzip compression. Its compression ratio is
+         the poorest among the 3 choices; however its speed (both
+         compression and decompression) is the fastest.
+
+config KERNEL_BZIP2
+       bool "Bzip2"
+       depends on HAVE_KERNEL_BZIP2
+       help
+         Its compression ratio and speed is intermediate.
+         Decompression speed is slowest among the three.  The kernel
+         size is about 10% smaller with bzip2, in comparison to gzip.
+         Bzip2 uses a large amount of memory. For modern kernels you
+         will need at least 8MB RAM or more for booting.
+
+config KERNEL_LZMA
+       bool "LZMA"
+       depends on HAVE_KERNEL_LZMA
+       help
+         The most recent compression algorithm.
+         Its ratio is best, decompression speed is between the other
+         two. Compression is slowest.  The kernel size is about 33%
+         smaller with LZMA in comparison to gzip.
+
+endchoice
+
 config SWAP
        bool "Support for paging of anonymous memory (swap)"
        depends on MMU && BLOCK
index 0f0f0cf3ba9aa97c34d88824fbf4644ca76b0fac..027a402708de6c7fa505f989b6151f5ea5bf1935 100644 (file)
@@ -11,6 +11,9 @@
 #include "do_mounts.h"
 #include "../fs/squashfs/squashfs_fs.h"
 
+#include <linux/decompress/generic.h>
+
+
 int __initdata rd_prompt = 1;/* 1 = prompt for RAM disk, 0 = don't prompt */
 
 static int __init prompt_ramdisk(char *str)
@@ -29,7 +32,7 @@ static int __init ramdisk_start_setup(char *str)
 }
 __setup("ramdisk_start=", ramdisk_start_setup);
 
-static int __init crd_load(int in_fd, int out_fd);
+static int __init crd_load(int in_fd, int out_fd, decompress_fn deco);
 
 /*
  * This routine tries to find a RAM disk image to load, and returns the
@@ -38,15 +41,15 @@ static int __init crd_load(int in_fd, int out_fd);
  * numbers could not be found.
  *
  * We currently check for the following magic numbers:
- *     minix
- *     ext2
+ *     minix
+ *     ext2
  *     romfs
  *     cramfs
  *     squashfs
- *     gzip
+ *     gzip
  */
-static int __init 
-identify_ramdisk_image(int fd, int start_block)
+static int __init
+identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor)
 {
        const int size = 512;
        struct minix_super_block *minixsb;
@@ -56,6 +59,7 @@ identify_ramdisk_image(int fd, int start_block)
        struct squashfs_super_block *squashfsb;
        int nblocks = -1;
        unsigned char *buf;
+       const char *compress_name;
 
        buf = kmalloc(size, GFP_KERNEL);
        if (!buf)
@@ -69,18 +73,19 @@ identify_ramdisk_image(int fd, int start_block)
        memset(buf, 0xe5, size);
 
        /*
-        * Read block 0 to test for gzipped kernel
+        * Read block 0 to test for compressed kernel
         */
        sys_lseek(fd, start_block * BLOCK_SIZE, 0);
        sys_read(fd, buf, size);
 
-       /*
-        * If it matches the gzip magic numbers, return 0
-        */
-       if (buf[0] == 037 && ((buf[1] == 0213) || (buf[1] == 0236))) {
-               printk(KERN_NOTICE
-                      "RAMDISK: Compressed image found at block %d\n",
-                      start_block);
+       *decompressor = decompress_method(buf, size, &compress_name);
+       if (compress_name) {
+               printk(KERN_NOTICE "RAMDISK: %s image found at block %d\n",
+                      compress_name, start_block);
+               if (!*decompressor)
+                       printk(KERN_EMERG
+                              "RAMDISK: %s decompressor not configured!\n",
+                              compress_name);
                nblocks = 0;
                goto done;
        }
@@ -142,7 +147,7 @@ identify_ramdisk_image(int fd, int start_block)
        printk(KERN_NOTICE
               "RAMDISK: Couldn't find valid RAM disk image starting at %d.\n",
               start_block);
-       
+
 done:
        sys_lseek(fd, start_block * BLOCK_SIZE, 0);
        kfree(buf);
@@ -157,6 +162,7 @@ int __init rd_load_image(char *from)
        int nblocks, i, disk;
        char *buf = NULL;
        unsigned short rotate = 0;
+       decompress_fn decompressor = NULL;
 #if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES)
        char rotator[4] = { '|' , '/' , '-' , '\\' };
 #endif
@@ -169,12 +175,12 @@ int __init rd_load_image(char *from)
        if (in_fd < 0)
                goto noclose_input;
 
-       nblocks = identify_ramdisk_image(in_fd, rd_image_start);
+       nblocks = identify_ramdisk_image(in_fd, rd_image_start, &decompressor);
        if (nblocks < 0)
                goto done;
 
        if (nblocks == 0) {
-               if (crd_load(in_fd, out_fd) == 0)
+               if (crd_load(in_fd, out_fd, decompressor) == 0)
                        goto successful_load;
                goto done;
        }
@@ -200,7 +206,7 @@ int __init rd_load_image(char *from)
                       nblocks, rd_blocks);
                goto done;
        }
-               
+
        /*
         * OK, time to copy in the data
         */
@@ -273,138 +279,48 @@ int __init rd_load_disk(int n)
        return rd_load_image("/dev/root");
 }
 
-/*
- * gzip declarations
- */
-
-#define OF(args)  args
-
-#ifndef memzero
-#define memzero(s, n)     memset ((s), 0, (n))
-#endif
-
-typedef unsigned char  uch;
-typedef unsigned short ush;
-typedef unsigned long  ulg;
-
-#define INBUFSIZ 4096
-#define WSIZE 0x8000    /* window size--must be a power of two, and */
-                       /*  at least 32K for zip's deflate method */
-
-static uch *inbuf;
-static uch *window;
-
-static unsigned insize;  /* valid bytes in inbuf */
-static unsigned inptr;   /* index of next byte to be processed in inbuf */
-static unsigned outcnt;  /* bytes in output buffer */
 static int exit_code;
-static int unzip_error;
-static long bytes_out;
+static int decompress_error;
 static int crd_infd, crd_outfd;
 
-#define get_byte()  (inptr < insize ? inbuf[inptr++] : fill_inbuf())
-               
-/* Diagnostic functions (stubbed out) */
-#define Assert(cond,msg)
-#define Trace(x)
-#define Tracev(x)
-#define Tracevv(x)
-#define Tracec(c,x)
-#define Tracecv(c,x)
-
-#define STATIC static
-#define INIT __init
-
-static int  __init fill_inbuf(void);
-static void __init flush_window(void);
-static void __init error(char *m);
-
-#define NO_INFLATE_MALLOC
-
-#include "../lib/inflate.c"
-
-/* ===========================================================================
- * Fill the input buffer. This is called only when the buffer is empty
- * and at least one byte is really needed.
- * Returning -1 does not guarantee that gunzip() will ever return.
- */
-static int __init fill_inbuf(void)
+static int __init compr_fill(void *buf, unsigned int len)
 {
-       if (exit_code) return -1;
-       
-       insize = sys_read(crd_infd, inbuf, INBUFSIZ);
-       if (insize == 0) {
-               error("RAMDISK: ran out of compressed data");
-               return -1;
-       }
-
-       inptr = 1;
-
-       return inbuf[0];
+       int r = sys_read(crd_infd, buf, len);
+       if (r < 0)
+               printk(KERN_ERR "RAMDISK: error while reading compressed data");
+       else if (r == 0)
+               printk(KERN_ERR "RAMDISK: EOF while reading compressed data");
+       return r;
 }
 
-/* ===========================================================================
- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
- * (Used for the decompressed data only.)
- */
-static void __init flush_window(void)
+static int __init compr_flush(void *window, unsigned int outcnt)
 {
-    ulg c = crc;         /* temporary variable */
-    unsigned n, written;
-    uch *in, ch;
-    
-    written = sys_write(crd_outfd, window, outcnt);
-    if (written != outcnt && unzip_error == 0) {
-       printk(KERN_ERR "RAMDISK: incomplete write (%d != %d) %ld\n",
-              written, outcnt, bytes_out);
-       unzip_error = 1;
-    }
-    in = window;
-    for (n = 0; n < outcnt; n++) {
-           ch = *in++;
-           c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
-    }
-    crc = c;
-    bytes_out += (ulg)outcnt;
-    outcnt = 0;
+       int written = sys_write(crd_outfd, window, outcnt);
+       if (written != outcnt) {
+               if (decompress_error == 0)
+                       printk(KERN_ERR
+                              "RAMDISK: incomplete write (%d != %d)\n",
+                              written, outcnt);
+               decompress_error = 1;
+               return -1;
+       }
+       return outcnt;
 }
 
 static void __init error(char *x)
 {
        printk(KERN_ERR "%s\n", x);
        exit_code = 1;
-       unzip_error = 1;
+       decompress_error = 1;
 }
 
-static int __init crd_load(int in_fd, int out_fd)
+static int __init crd_load(int in_fd, int out_fd, decompress_fn deco)
 {
        int result;
-
-       insize = 0;             /* valid bytes in inbuf */
-       inptr = 0;              /* index of next byte to be processed in inbuf */
-       outcnt = 0;             /* bytes in output buffer */
-       exit_code = 0;
-       bytes_out = 0;
-       crc = (ulg)0xffffffffL; /* shift register contents */
-
        crd_infd = in_fd;
        crd_outfd = out_fd;
-       inbuf = kmalloc(INBUFSIZ, GFP_KERNEL);
-       if (!inbuf) {
-               printk(KERN_ERR "RAMDISK: Couldn't allocate gzip buffer\n");
-               return -1;
-       }
-       window = kmalloc(WSIZE, GFP_KERNEL);
-       if (!window) {
-               printk(KERN_ERR "RAMDISK: Couldn't allocate gzip window\n");
-               kfree(inbuf);
-               return -1;
-       }
-       makecrc();
-       result = gunzip();
-       if (unzip_error)
+       result = deco(NULL, 0, compr_fill, compr_flush, NULL, NULL, error);
+       if (decompress_error)
                result = 1;
-       kfree(inbuf);
-       kfree(window);
        return result;
 }
index d9c941c0c3cacd26fb051059540673b5e011d967..7dcde7ea660306801958857fdce54c7ee56bce9b 100644 (file)
@@ -390,11 +390,13 @@ static int __init write_buffer(char *buf, unsigned len)
        return len - count;
 }
 
-static void __init flush_buffer(char *buf, unsigned len)
+static int __init flush_buffer(void *bufv, unsigned len)
 {
+       char *buf = (char *) bufv;
        int written;
+       int origLen = len;
        if (message)
-               return;
+               return -1;
        while ((written = write_buffer(buf, len)) < len && !message) {
                char c = buf[written];
                if (c == '0') {
@@ -408,84 +410,28 @@ static void __init flush_buffer(char *buf, unsigned len)
                } else
                        error("junk in compressed archive");
        }
+       return origLen;
 }
 
-/*
- * gzip declarations
- */
+static unsigned my_inptr;   /* index of next byte to be processed in inbuf */
 
-#define OF(args)  args
-
-#ifndef memzero
-#define memzero(s, n)     memset ((s), 0, (n))
-#endif
-
-typedef unsigned char  uch;
-typedef unsigned short ush;
-typedef unsigned long  ulg;
-
-#define WSIZE 0x8000    /* window size--must be a power of two, and */
-                       /*  at least 32K for zip's deflate method */
-
-static uch *inbuf;
-static uch *window;
-
-static unsigned insize;  /* valid bytes in inbuf */
-static unsigned inptr;   /* index of next byte to be processed in inbuf */
-static unsigned outcnt;  /* bytes in output buffer */
-static long bytes_out;
-
-#define get_byte()  (inptr < insize ? inbuf[inptr++] : -1)
-               
-/* Diagnostic functions (stubbed out) */
-#define Assert(cond,msg)
-#define Trace(x)
-#define Tracev(x)
-#define Tracevv(x)
-#define Tracec(c,x)
-#define Tracecv(c,x)
-
-#define STATIC static
-#define INIT __init
-
-static void __init flush_window(void);
-static void __init error(char *m);
-
-#define NO_INFLATE_MALLOC
-
-#include "../lib/inflate.c"
-
-/* ===========================================================================
- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
- * (Used for the decompressed data only.)
- */
-static void __init flush_window(void)
-{
-       ulg c = crc;         /* temporary variable */
-       unsigned n;
-       uch *in, ch;
-
-       flush_buffer(window, outcnt);
-       in = window;
-       for (n = 0; n < outcnt; n++) {
-               ch = *in++;
-               c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
-       }
-       crc = c;
-       bytes_out += (ulg)outcnt;
-       outcnt = 0;
-}
+#include <linux/decompress/generic.h>
 
 static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
 {
        int written;
+       decompress_fn decompress;
+       const char *compress_name;
+       static __initdata char msg_buf[64];
+
        dry_run = check_only;
        header_buf = kmalloc(110, GFP_KERNEL);
        symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
        name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
-       window = kmalloc(WSIZE, GFP_KERNEL);
-       if (!window || !header_buf || !symlink_buf || !name_buf)
+
+       if (!header_buf || !symlink_buf || !name_buf)
                panic("can't allocate buffers");
+
        state = Start;
        this_header = 0;
        message = NULL;
@@ -505,22 +451,25 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
                        continue;
                }
                this_header = 0;
-               insize = len;
-               inbuf = buf;
-               inptr = 0;
-               outcnt = 0;             /* bytes in output buffer */
-               bytes_out = 0;
-               crc = (ulg)0xffffffffL; /* shift register contents */
-               makecrc();
-               gunzip();
+               decompress = decompress_method(buf, len, &compress_name);
+               if (decompress)
+                       decompress(buf, len, NULL, flush_buffer, NULL,
+                                  &my_inptr, error);
+               else if (compress_name) {
+                       if (!message) {
+                               snprintf(msg_buf, sizeof msg_buf,
+                                        "compression method %s not configured",
+                                        compress_name);
+                               message = msg_buf;
+                       }
+               }
                if (state != Reset)
-                       error("junk in gzipped archive");
-               this_header = saved_offset + inptr;
-               buf += inptr;
-               len -= inptr;
+                       error("junk in compressed archive");
+               this_header = saved_offset + my_inptr;
+               buf += my_inptr;
+               len -= my_inptr;
        }
        dir_utime();
-       kfree(window);
        kfree(name_buf);
        kfree(symlink_buf);
        kfree(header_buf);
@@ -579,7 +528,7 @@ static int __init populate_rootfs(void)
        char *err = unpack_to_rootfs(__initramfs_start,
                         __initramfs_end - __initramfs_start, 0);
        if (err)
-               panic(err);
+               panic(err);     /* Failed to decompress INTERNAL initramfs */
        if (initrd_start) {
 #ifdef CONFIG_BLK_DEV_RAM
                int fd;
@@ -605,9 +554,12 @@ static int __init populate_rootfs(void)
                printk(KERN_INFO "Unpacking initramfs...");
                err = unpack_to_rootfs((char *)initrd_start,
                        initrd_end - initrd_start, 0);
-               if (err)
-                       panic(err);
-               printk(" done\n");
+               if (err) {
+                       printk(" failed!\n");
+                       printk(KERN_EMERG "%s\n", err);
+               } else {
+                       printk(" done\n");
+               }
                free_initrd();
 #endif
        }
index 83697e160b3a786d3d91c14000f116a441f61bfc..6bf83afd654da44b5b52383a4697d4d98e0ff6fa 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/proc_fs.h>
 #include <linux/kernel.h>
 #include <linux/syscalls.h>
+#include <linux/stackprotector.h>
 #include <linux/string.h>
 #include <linux/ctype.h>
 #include <linux/delay.h>
@@ -135,14 +136,14 @@ unsigned int __initdata setup_max_cpus = NR_CPUS;
  * greater than 0, limits the maximum number of CPUs activated in
  * SMP mode to <NUM>.
  */
-#ifndef CONFIG_X86_IO_APIC
-static inline void disable_ioapic_setup(void) {};
-#endif
+
+void __weak arch_disable_smp_support(void) { }
 
 static int __init nosmp(char *str)
 {
        setup_max_cpus = 0;
-       disable_ioapic_setup();
+       arch_disable_smp_support();
+
        return 0;
 }
 
@@ -152,14 +153,14 @@ static int __init maxcpus(char *str)
 {
        get_option(&str, &setup_max_cpus);
        if (setup_max_cpus == 0)
-               disable_ioapic_setup();
+               arch_disable_smp_support();
 
        return 0;
 }
 
 early_param("maxcpus", maxcpus);
 #else
-#define setup_max_cpus NR_CPUS
+const unsigned int setup_max_cpus = NR_CPUS;
 #endif
 
 /*
@@ -540,6 +541,12 @@ asmlinkage void __init start_kernel(void)
         */
        lockdep_init();
        debug_objects_early_init();
+
+       /*
+        * Set up the the initial canary ASAP:
+        */
+       boot_init_stack_canary();
+
        cgroup_init_early();
 
        local_irq_disable();
index efd30ccf38584f48c6ef68da62b9236479b6da69..167e1e3ad7c61f5c0a73db9dc457c30a97527a91 100644 (file)
@@ -980,12 +980,9 @@ static void check_stack_usage(void)
 {
        static DEFINE_SPINLOCK(low_water_lock);
        static int lowest_to_date = THREAD_SIZE;
-       unsigned long *n = end_of_stack(current);
        unsigned long free;
 
-       while (*n == 0)
-               n++;
-       free = (unsigned long)n - (unsigned long)end_of_stack(current);
+       free = stack_not_used(current);
 
        if (free >= lowest_to_date)
                return;
index 4854c2c4a82eae9ed1032ee1b66abc46124499ad..6715ebc3761de3ed10eeda84ff932e59ff8289c5 100644 (file)
@@ -61,6 +61,7 @@
 #include <linux/proc_fs.h>
 #include <linux/blkdev.h>
 #include <trace/sched.h>
+#include <linux/magic.h>
 
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
@@ -212,6 +213,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
 {
        struct task_struct *tsk;
        struct thread_info *ti;
+       unsigned long *stackend;
+
        int err;
 
        prepare_to_copy(orig);
@@ -237,6 +240,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
                goto out;
 
        setup_thread_stack(tsk, orig);
+       stackend = end_of_stack(tsk);
+       *stackend = STACK_END_MAGIC;    /* for overflow detection */
 
 #ifdef CONFIG_CC_STACKPROTECTOR
        tsk->stack_canary = get_random_int();
index 7de11bd64dfe33a2fa8c1091a308ed58b685d0c7..122fef4b0bd30d82ade4af7a84f281d019030280 100644 (file)
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
        desc->irq_count = 0;
        desc->irqs_unhandled = 0;
 #ifdef CONFIG_SMP
-       cpumask_setall(&desc->affinity);
+       cpumask_setall(desc->affinity);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+       cpumask_clear(desc->pending_mask);
+#endif
 #endif
        spin_unlock_irqrestore(&desc->lock, flags);
 }
index 3aba8d12f328ec91e59f5c72217ceb0c0d2fd0a1..f51eaee921b603b202bf184cdfdaee3a8da2ca08 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/rculist.h>
 #include <linux/hash.h>
+#include <linux/bootmem.h>
 
 #include "internals.h"
 
@@ -69,6 +70,7 @@ int nr_irqs = NR_IRQS;
 EXPORT_SYMBOL_GPL(nr_irqs);
 
 #ifdef CONFIG_SPARSE_IRQ
+
 static struct irq_desc irq_desc_init = {
        .irq        = -1,
        .status     = IRQ_DISABLED,
@@ -76,9 +78,6 @@ static struct irq_desc irq_desc_init = {
        .handle_irq = handle_bad_irq,
        .depth      = 1,
        .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
-       .affinity   = CPU_MASK_ALL
-#endif
 };
 
 void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
@@ -113,6 +112,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
                printk(KERN_ERR "can not alloc kstat_irqs\n");
                BUG_ON(1);
        }
+       if (!init_alloc_desc_masks(desc, cpu, false)) {
+               printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
+               BUG_ON(1);
+       }
        arch_init_chip_data(desc, cpu);
 }
 
@@ -121,7 +124,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
  */
 DEFINE_SPINLOCK(sparse_irq_lock);
 
-struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly;
+struct irq_desc **irq_desc_ptrs __read_mostly;
 
 static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
        [0 ... NR_IRQS_LEGACY-1] = {
@@ -131,14 +134,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
                .handle_irq = handle_bad_irq,
                .depth      = 1,
                .lock       = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
-#ifdef CONFIG_SMP
-               .affinity   = CPU_MASK_ALL
-#endif
        }
 };
 
-/* FIXME: use bootmem alloc ...*/
-static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
+static unsigned int *kstat_irqs_legacy;
 
 int __init early_irq_init(void)
 {
@@ -148,18 +147,30 @@ int __init early_irq_init(void)
 
        init_irq_default_affinity();
 
+        /* initialize nr_irqs based on nr_cpu_ids */
+       arch_probe_nr_irqs();
+       printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
+
        desc = irq_desc_legacy;
        legacy_count = ARRAY_SIZE(irq_desc_legacy);
 
+       /* allocate irq_desc_ptrs array based on nr_irqs */
+       irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
+
+       /* allocate based on nr_cpu_ids */
+       /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
+       kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
+                                         sizeof(int));
+
        for (i = 0; i < legacy_count; i++) {
                desc[i].irq = i;
-               desc[i].kstat_irqs = kstat_irqs_legacy[i];
+               desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
-
+               init_alloc_desc_masks(&desc[i], 0, true);
                irq_desc_ptrs[i] = desc + i;
        }
 
-       for (i = legacy_count; i < NR_IRQS; i++)
+       for (i = legacy_count; i < nr_irqs; i++)
                irq_desc_ptrs[i] = NULL;
 
        return arch_early_irq_init();
@@ -167,7 +178,10 @@ int __init early_irq_init(void)
 
 struct irq_desc *irq_to_desc(unsigned int irq)
 {
-       return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL;
+       if (irq_desc_ptrs && irq < nr_irqs)
+               return irq_desc_ptrs[irq];
+
+       return NULL;
 }
 
 struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@@ -176,10 +190,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
        unsigned long flags;
        int node;
 
-       if (irq >= NR_IRQS) {
-               printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n",
-                               irq, NR_IRQS);
-               WARN_ON(1);
+       if (irq >= nr_irqs) {
+               WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
+                       irq, nr_irqs);
                return NULL;
        }
 
@@ -221,9 +234,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
                .handle_irq = handle_bad_irq,
                .depth = 1,
                .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
-#ifdef CONFIG_SMP
-               .affinity = CPU_MASK_ALL
-#endif
        }
 };
 
@@ -235,12 +245,15 @@ int __init early_irq_init(void)
 
        init_irq_default_affinity();
 
+       printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
+
        desc = irq_desc;
        count = ARRAY_SIZE(irq_desc);
 
-       for (i = 0; i < count; i++)
+       for (i = 0; i < count; i++) {
                desc[i].irq = i;
-
+               init_alloc_desc_masks(&desc[i], 0, true);
+       }
        return arch_early_irq_init();
 }
 
index e6d0a43cc1255c2abb3778fcec5f5b1d4e5290ec..40416a81a0f5af98b159bf675e3fd18e41648ce1 100644 (file)
@@ -16,7 +16,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
 extern struct lock_class_key irq_desc_lock_class;
 extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
 extern spinlock_t sparse_irq_lock;
+
+#ifdef CONFIG_SPARSE_IRQ
+/* irq_desc_ptrs allocated at boot time */
+extern struct irq_desc **irq_desc_ptrs;
+#else
+/* irq_desc_ptrs is a fixed size array */
 extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
+#endif
 
 #ifdef CONFIG_PROC_FS
 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
index 291f03664552387658690f947b1d3a4d9562dcc6..a3a5dc9ef346d813edf3b971926cb649d9c89d67 100644 (file)
@@ -90,14 +90,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
-               cpumask_copy(&desc->affinity, cpumask);
+               cpumask_copy(desc->affinity, cpumask);
                desc->chip->set_affinity(irq, cpumask);
        } else {
                desc->status |= IRQ_MOVE_PENDING;
-               cpumask_copy(&desc->pending_mask, cpumask);
+               cpumask_copy(desc->pending_mask, cpumask);
        }
 #else
-       cpumask_copy(&desc->affinity, cpumask);
+       cpumask_copy(desc->affinity, cpumask);
        desc->chip->set_affinity(irq, cpumask);
 #endif
        desc->status |= IRQ_AFFINITY_SET;
@@ -119,16 +119,16 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
         * one of the targets is online.
         */
        if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
-               if (cpumask_any_and(&desc->affinity, cpu_online_mask)
+               if (cpumask_any_and(desc->affinity, cpu_online_mask)
                    < nr_cpu_ids)
                        goto set_affinity;
                else
                        desc->status &= ~IRQ_AFFINITY_SET;
        }
 
-       cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity);
+       cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
 set_affinity:
-       desc->chip->set_affinity(irq, &desc->affinity);
+       desc->chip->set_affinity(irq, desc->affinity);
 
        return 0;
 }
index bd72329e630c6cc6f3958bf4d70484f9124f4330..e05ad9be43b7a5fa7e6d6a24f94e49828a73572a 100644 (file)
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
 
        desc->status &= ~IRQ_MOVE_PENDING;
 
-       if (unlikely(cpumask_empty(&desc->pending_mask)))
+       if (unlikely(cpumask_empty(desc->pending_mask)))
                return;
 
        if (!desc->chip->set_affinity)
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
         * For correct operation this depends on the caller
         * masking the irqs.
         */
-       if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask)
+       if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
                   < nr_cpu_ids)) {
-               cpumask_and(&desc->affinity,
-                           &desc->pending_mask, cpu_online_mask);
-               desc->chip->set_affinity(irq, &desc->affinity);
+               cpumask_and(desc->affinity,
+                           desc->pending_mask, cpu_online_mask);
+               desc->chip->set_affinity(irq, desc->affinity);
        }
-       cpumask_clear(&desc->pending_mask);
+       cpumask_clear(desc->pending_mask);
 }
 
 void move_native_irq(int irq)
index acd88356ac76abfcbd186209f16d8e92d2ff31fb..7f9b80434e32a295b463bc7aaa97457207d56686 100644 (file)
@@ -38,15 +38,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
        old_desc->kstat_irqs = NULL;
 }
 
-static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
+static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
                 struct irq_desc *desc, int cpu)
 {
        memcpy(desc, old_desc, sizeof(struct irq_desc));
+       if (!init_alloc_desc_masks(desc, cpu, false)) {
+               printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
+                               "for migration.\n", irq);
+               return false;
+       }
        spin_lock_init(&desc->lock);
        desc->cpu = cpu;
        lockdep_set_class(&desc->lock, &irq_desc_lock_class);
        init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
+       init_copy_desc_masks(old_desc, desc);
        arch_init_copy_chip_data(old_desc, desc, cpu);
+       return true;
 }
 
 static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -76,12 +83,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
        node = cpu_to_node(cpu);
        desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
        if (!desc) {
-               printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq);
+               printk(KERN_ERR "irq %d: can not get new irq_desc "
+                               "for migration.\n", irq);
+               /* still use old one */
+               desc = old_desc;
+               goto out_unlock;
+       }
+       if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
                /* still use old one */
+               kfree(desc);
                desc = old_desc;
                goto out_unlock;
        }
-       init_copy_one_irq_desc(irq, old_desc, desc, cpu);
 
        irq_desc_ptrs[irq] = desc;
        spin_unlock_irqrestore(&sparse_irq_lock, flags);
index aae3f742bcec5cb7f5e348c2bec4a4945041966b..692363dd591f0c447d26b36318889f43bbe20ac3 100644 (file)
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
 static int irq_affinity_proc_show(struct seq_file *m, void *v)
 {
        struct irq_desc *desc = irq_to_desc((long)m->private);
-       const struct cpumask *mask = &desc->affinity;
+       const struct cpumask *mask = desc->affinity;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
        if (desc->status & IRQ_MOVE_PENDING)
-               mask = &desc->pending_mask;
+               mask = desc->pending_mask;
 #endif
        seq_cpumask(m, mask);
        seq_putc(m, '\n');
index 483899578259ebc725264bedae6abb132295b9d3..c7fd6692939d6bd03c13019996874677f296d83c 100644 (file)
@@ -1130,7 +1130,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
                return;
        memset(&prstatus, 0, sizeof(prstatus));
        prstatus.pr_pid = current->pid;
-       elf_core_copy_regs(&prstatus.pr_reg, regs);
+       elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
        buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
                              &prstatus, sizeof(prstatus));
        final_note(buf);
index ba22484a987eb96a9d2bff0cdeede4b62a1d7156..f0e04d6b67d8cb325526dce1fdb456265ba3f926 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/tracepoint.h>
 #include <linux/ftrace.h>
 #include <linux/async.h>
+#include <linux/percpu.h>
 
 #if 0
 #define DEBUGP printk
@@ -366,6 +367,34 @@ static struct module *find_module(const char *name)
 }
 
 #ifdef CONFIG_SMP
+
+#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+
+static void *percpu_modalloc(unsigned long size, unsigned long align,
+                            const char *name)
+{
+       void *ptr;
+
+       if (align > PAGE_SIZE) {
+               printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
+                      name, align, PAGE_SIZE);
+               align = PAGE_SIZE;
+       }
+
+       ptr = __alloc_reserved_percpu(size, align);
+       if (!ptr)
+               printk(KERN_WARNING
+                      "Could not allocate %lu bytes percpu data\n", size);
+       return ptr;
+}
+
+static void percpu_modfree(void *freeme)
+{
+       free_percpu(freeme);
+}
+
+#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+
 /* Number of blocks used and allocated. */
 static unsigned int pcpu_num_used, pcpu_num_allocated;
 /* Size of each block.  -ve means used. */
@@ -480,21 +509,6 @@ static void percpu_modfree(void *freeme)
        }
 }
 
-static unsigned int find_pcpusec(Elf_Ehdr *hdr,
-                                Elf_Shdr *sechdrs,
-                                const char *secstrings)
-{
-       return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
-}
-
-static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu)
-               memcpy(pcpudest + per_cpu_offset(cpu), from, size);
-}
-
 static int percpu_modinit(void)
 {
        pcpu_num_used = 2;
@@ -513,7 +527,26 @@ static int percpu_modinit(void)
        return 0;
 }
 __initcall(percpu_modinit);
+
+#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
+
+static unsigned int find_pcpusec(Elf_Ehdr *hdr,
+                                Elf_Shdr *sechdrs,
+                                const char *secstrings)
+{
+       return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
+}
+
+static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               memcpy(pcpudest + per_cpu_offset(cpu), from, size);
+}
+
 #else /* ... !CONFIG_SMP */
+
 static inline void *percpu_modalloc(unsigned long size, unsigned long align,
                                    const char *name)
 {
@@ -535,6 +568,7 @@ static inline void percpu_modcopy(void *pcpudst, const void *src,
        /* pcpusec should be 0, and size of that section should be 0. */
        BUG_ON(size != 0);
 }
+
 #endif /* CONFIG_SMP */
 
 #define MODINFO_ATTR(field)    \
index 2a2ff36ff44dd5ed341876d0a2c71c92b2bef914..32fe4eff1b89ecf75af7c229fc8db2e46179c924 100644 (file)
@@ -74,6 +74,9 @@ NORET_TYPE void panic(const char * fmt, ...)
        vsnprintf(buf, sizeof(buf), fmt, args);
        va_end(args);
        printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+       dump_stack();
+#endif
        bust_spinlocks(0);
 
        /*
@@ -355,15 +358,18 @@ EXPORT_SYMBOL(warn_slowpath);
 #endif
 
 #ifdef CONFIG_CC_STACKPROTECTOR
+
 /*
  * Called when gcc's -fstack-protector feature is used, and
  * gcc detects corruption of the on-stack canary value
  */
 void __stack_chk_fail(void)
 {
-       panic("stack-protector: Kernel stack is corrupted");
+       panic("stack-protector: Kernel stack is corrupted in: %p\n",
+               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(__stack_chk_fail);
+
 #endif
 
 core_param(panic, panic_timeout, int, 0644);
index 8e2558c2ba67be440ef7bcb2d95ff8138c806404..61e63562f2737add49588c4951c9e82b4b7519d0 100644 (file)
@@ -5944,12 +5944,7 @@ void sched_show_task(struct task_struct *p)
                printk(KERN_CONT " %016lx ", thread_saved_pc(p));
 #endif
 #ifdef CONFIG_DEBUG_STACK_USAGE
-       {
-               unsigned long *n = end_of_stack(p);
-               while (!*n)
-                       n++;
-               free = (unsigned long)n - (unsigned long)end_of_stack(p);
-       }
+       free = stack_not_used(p);
 #endif
        printk(KERN_CONT "%5lu %5d %6d\n", free,
                task_pid_nr(p), task_pid_nr(p->real_parent));
@@ -9490,7 +9485,7 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
 
 static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
 {
-       u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+       u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
        u64 data;
 
 #ifndef CONFIG_64BIT
@@ -9509,7 +9504,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
 
 static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
 {
-       u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+       u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
 
 #ifndef CONFIG_64BIT
        /*
@@ -9604,10 +9599,11 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
        cpu = task_cpu(tsk);
        ca = task_ca(tsk);
 
-       for (; ca; ca = ca->parent) {
-               u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu);
+       do {
+               u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
                *cpuusage += cputime;
-       }
+               ca = ca->parent;
+       } while (ca);
 }
 
 struct cgroup_subsys cpuacct_subsys = {
index bac1061cea2f4ab67f7c6d5b81300d02b717d5ba..da932f4c85240abce39c62461b08a8d2c298bf9f 100644 (file)
@@ -960,12 +960,13 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
 
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 
-static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
+static inline int pick_optimal_cpu(int this_cpu,
+                                  const struct cpumask *mask)
 {
        int first;
 
        /* "this_cpu" is cheaper to preempt than a remote processor */
-       if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
+       if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
                return this_cpu;
 
        first = cpumask_first(mask);
@@ -981,6 +982,7 @@ static int find_lowest_rq(struct task_struct *task)
        struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
        int this_cpu = smp_processor_id();
        int cpu      = task_cpu(task);
+       cpumask_var_t domain_mask;
 
        if (task->rt.nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
@@ -1013,19 +1015,25 @@ static int find_lowest_rq(struct task_struct *task)
        if (this_cpu == cpu)
                this_cpu = -1; /* Skip this_cpu opt if the same */
 
-       for_each_domain(cpu, sd) {
-               if (sd->flags & SD_WAKE_AFFINE) {
-                       cpumask_t domain_mask;
-                       int       best_cpu;
+       if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
+               for_each_domain(cpu, sd) {
+                       if (sd->flags & SD_WAKE_AFFINE) {
+                               int best_cpu;
 
-                       cpumask_and(&domain_mask, sched_domain_span(sd),
-                                   lowest_mask);
+                               cpumask_and(domain_mask,
+                                           sched_domain_span(sd),
+                                           lowest_mask);
 
-                       best_cpu = pick_optimal_cpu(this_cpu,
-                                                   &domain_mask);
-                       if (best_cpu != -1)
-                               return best_cpu;
+                               best_cpu = pick_optimal_cpu(this_cpu,
+                                                           domain_mask);
+
+                               if (best_cpu != -1) {
+                                       free_cpumask_var(domain_mask);
+                                       return best_cpu;
+                               }
+                       }
                }
+               free_cpumask_var(domain_mask);
        }
 
        /*
index 9041ea7948feffbf887ed92fb8ce1e73ad55c81c..57d3f67f6f38af7fdfb0fad66ff1cdbef790951e 100644 (file)
@@ -796,6 +796,11 @@ int __init __weak early_irq_init(void)
        return 0;
 }
 
+int __init __weak arch_probe_nr_irqs(void)
+{
+       return 0;
+}
+
 int __init __weak arch_early_irq_init(void)
 {
        return 0;
index 0cd415ee62a262ccadea6b592a7402eb89fc4797..74541ca49536fda5dab61d473f8ce9d6aa3f3254 100644 (file)
@@ -170,7 +170,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
         * doesn't hit this CPU until we're ready. */
        get_cpu();
        for_each_online_cpu(i) {
-               sm_work = percpu_ptr(stop_machine_work, i);
+               sm_work = per_cpu_ptr(stop_machine_work, i);
                INIT_WORK(sm_work, stop_cpu);
                queue_work_on(i, stop_machine_wq, sm_work);
        }
index 03c2c24b9083c17aff9372224ec2b22912691ffb..daa481824d9c9a68438d097ec4bedd9c2114dc2e 100644 (file)
@@ -97,6 +97,20 @@ config LZO_COMPRESS
 config LZO_DECOMPRESS
        tristate
 
+#
+# These all provide a common interface (hence the apparent duplication with
+# ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.)
+#
+config DECOMPRESS_GZIP
+       select ZLIB_INFLATE
+       tristate
+
+config DECOMPRESS_BZIP2
+       tristate
+
+config DECOMPRESS_LZMA
+       tristate
+
 #
 # Generic allocator support is selected if needed
 #
index 32b0e64ded27d7c1f614fdc974d0857abfc19bbc..790de7c25d0d01eac70dcfce6327ca100919da5a 100644 (file)
@@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         rbtree.o radix-tree.o dump_stack.o \
         idr.o int_sqrt.o extable.o prio_tree.o \
         sha1.o irq_regs.o reciprocal_div.o argv_split.o \
-        proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o
+        proportions.o prio_heap.o ratelimit.o show_mem.o \
+        is_single_threaded.o decompress.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -65,6 +66,10 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/
 obj-$(CONFIG_LZO_COMPRESS) += lzo/
 obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
 
+lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
+lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
+lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
+
 obj-$(CONFIG_TEXTSEARCH) += textsearch.o
 obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
 obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
index 1338469ac849cb230fc8f1e74441ece4a6cfd58c..35a1f7ff414988d86c6c081d7bbf3671733a15d6 100644 (file)
@@ -948,15 +948,15 @@ done:
  */
 int bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
 {
-       int pos;                /* scans bitmap by regions of size order */
+       int pos, end;           /* scans bitmap by regions of size order */
 
-       for (pos = 0; pos < bits; pos += (1 << order))
-               if (__reg_op(bitmap, pos, order, REG_OP_ISFREE))
-                       break;
-       if (pos == bits)
-               return -ENOMEM;
-       __reg_op(bitmap, pos, order, REG_OP_ALLOC);
-       return pos;
+       for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) {
+               if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE))
+                       continue;
+               __reg_op(bitmap, pos, order, REG_OP_ALLOC);
+               return pos;
+       }
+       return -ENOMEM;
 }
 EXPORT_SYMBOL(bitmap_find_free_region);
 
diff --git a/lib/decompress.c b/lib/decompress.c
new file mode 100644 (file)
index 0000000..d2842f5
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * decompress.c
+ *
+ * Detect the decompression method based on magic number
+ */
+
+#include <linux/decompress/generic.h>
+
+#include <linux/decompress/bunzip2.h>
+#include <linux/decompress/unlzma.h>
+#include <linux/decompress/inflate.h>
+
+#include <linux/types.h>
+#include <linux/string.h>
+
+#ifndef CONFIG_DECOMPRESS_GZIP
+# define gunzip NULL
+#endif
+#ifndef CONFIG_DECOMPRESS_BZIP2
+# define bunzip2 NULL
+#endif
+#ifndef CONFIG_DECOMPRESS_LZMA
+# define unlzma NULL
+#endif
+
+static const struct compress_format {
+       unsigned char magic[2];
+       const char *name;
+       decompress_fn decompressor;
+} compressed_formats[] = {
+       { {037, 0213}, "gzip", gunzip },
+       { {037, 0236}, "gzip", gunzip },
+       { {0x42, 0x5a}, "bzip2", bunzip2 },
+       { {0x5d, 0x00}, "lzma", unlzma },
+       { {0, 0}, NULL, NULL }
+};
+
+decompress_fn decompress_method(const unsigned char *inbuf, int len,
+                               const char **name)
+{
+       const struct compress_format *cf;
+
+       if (len < 2)
+               return NULL;    /* Need at least this much... */
+
+       for (cf = compressed_formats; cf->name; cf++) {
+               if (!memcmp(inbuf, cf->magic, 2))
+                       break;
+
+       }
+       if (name)
+               *name = cf->name;
+       return cf->decompressor;
+}
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
new file mode 100644 (file)
index 0000000..5d3ddb5
--- /dev/null
@@ -0,0 +1,735 @@
+/* vi: set sw = 4 ts = 4: */
+/*     Small bzip2 deflate implementation, by Rob Landley (rob@landley.net).
+
+       Based on bzip2 decompression code by Julian R Seward (jseward@acm.org),
+       which also acknowledges contributions by Mike Burrows, David Wheeler,
+       Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten,
+       Robert Sedgewick, and Jon L. Bentley.
+
+       This code is licensed under the LGPLv2:
+               LGPL (http://www.gnu.org/copyleft/lgpl.html
+*/
+
+/*
+       Size and speed optimizations by Manuel Novoa III  (mjn3@codepoet.org).
+
+       More efficient reading of Huffman codes, a streamlined read_bunzip()
+       function, and various other tweaks.  In (limited) tests, approximately
+       20% faster than bzcat on x86 and about 10% faster on arm.
+
+       Note that about 2/3 of the time is spent in read_unzip() reversing
+       the Burrows-Wheeler transformation.  Much of that time is delay
+       resulting from cache misses.
+
+       I would ask that anyone benefiting from this work, especially those
+       using it in commercial products, consider making a donation to my local
+       non-profit hospice organization in the name of the woman I loved, who
+       passed away Feb. 12, 2003.
+
+               In memory of Toni W. Hagan
+
+               Hospice of Acadiana, Inc.
+               2600 Johnston St., Suite 200
+               Lafayette, LA 70503-3240
+
+               Phone (337) 232-1234 or 1-800-738-2226
+               Fax   (337) 232-1297
+
+               http://www.hospiceacadiana.com/
+
+       Manuel
+ */
+
+/*
+       Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu)
+*/
+
+
+#ifndef STATIC
+#include <linux/decompress/bunzip2.h>
+#endif /* !STATIC */
+
+#include <linux/decompress/mm.h>
+
+#ifndef INT_MAX
+#define INT_MAX 0x7fffffff
+#endif
+
+/* Constants for Huffman coding */
+#define MAX_GROUPS             6
+#define GROUP_SIZE             50      /* 64 would have been more efficient */
+#define MAX_HUFCODE_BITS       20      /* Longest Huffman code allowed */
+#define MAX_SYMBOLS            258     /* 256 literals + RUNA + RUNB */
+#define SYMBOL_RUNA            0
+#define SYMBOL_RUNB            1
+
+/* Status return values */
+#define RETVAL_OK                      0
+#define RETVAL_LAST_BLOCK              (-1)
+#define RETVAL_NOT_BZIP_DATA           (-2)
+#define RETVAL_UNEXPECTED_INPUT_EOF    (-3)
+#define RETVAL_UNEXPECTED_OUTPUT_EOF   (-4)
+#define RETVAL_DATA_ERROR              (-5)
+#define RETVAL_OUT_OF_MEMORY           (-6)
+#define RETVAL_OBSOLETE_INPUT          (-7)
+
+/* Other housekeeping constants */
+#define BZIP2_IOBUF_SIZE               4096
+
+/* This is what we know about each Huffman coding group */
+struct group_data {
+       /* We have an extra slot at the end of limit[] for a sentinal value. */
+       int limit[MAX_HUFCODE_BITS+1];
+       int base[MAX_HUFCODE_BITS];
+       int permute[MAX_SYMBOLS];
+       int minLen, maxLen;
+};
+
+/* Structure holding all the housekeeping data, including IO buffers and
+   memory that persists between calls to bunzip */
+struct bunzip_data {
+       /* State for interrupting output loop */
+       int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent;
+       /* I/O tracking data (file handles, buffers, positions, etc.) */
+       int (*fill)(void*, unsigned int);
+       int inbufCount, inbufPos /*, outbufPos*/;
+       unsigned char *inbuf /*,*outbuf*/;
+       unsigned int inbufBitCount, inbufBits;
+       /* The CRC values stored in the block header and calculated from the
+       data */
+       unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC;
+       /* Intermediate buffer and its size (in bytes) */
+       unsigned int *dbuf, dbufSize;
+       /* These things are a bit too big to go on the stack */
+       unsigned char selectors[32768];         /* nSelectors = 15 bits */
+       struct group_data groups[MAX_GROUPS];   /* Huffman coding tables */
+       int io_error;                   /* non-zero if we have IO error */
+};
+
+
+/* Return the next nnn bits of input.  All reads from the compressed input
+   are done through this function.  All reads are big endian */
+static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted)
+{
+       unsigned int bits = 0;
+
+       /* If we need to get more data from the byte buffer, do so.
+          (Loop getting one byte at a time to enforce endianness and avoid
+          unaligned access.) */
+       while (bd->inbufBitCount < bits_wanted) {
+               /* If we need to read more data from file into byte buffer, do
+                  so */
+               if (bd->inbufPos == bd->inbufCount) {
+                       if (bd->io_error)
+                               return 0;
+                       bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE);
+                       if (bd->inbufCount <= 0) {
+                               bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF;
+                               return 0;
+                       }
+                       bd->inbufPos = 0;
+               }
+               /* Avoid 32-bit overflow (dump bit buffer to top of output) */
+               if (bd->inbufBitCount >= 24) {
+                       bits = bd->inbufBits&((1 << bd->inbufBitCount)-1);
+                       bits_wanted -= bd->inbufBitCount;
+                       bits <<= bits_wanted;
+                       bd->inbufBitCount = 0;
+               }
+               /* Grab next 8 bits of input from buffer. */
+               bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
+               bd->inbufBitCount += 8;
+       }
+       /* Calculate result */
+       bd->inbufBitCount -= bits_wanted;
+       bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1);
+
+       return bits;
+}
+
+/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */
+
+static int INIT get_next_block(struct bunzip_data *bd)
+{
+       struct group_data *hufGroup = NULL;
+       int *base = NULL;
+       int *limit = NULL;
+       int dbufCount, nextSym, dbufSize, groupCount, selector,
+               i, j, k, t, runPos, symCount, symTotal, nSelectors,
+               byteCount[256];
+       unsigned char uc, symToByte[256], mtfSymbol[256], *selectors;
+       unsigned int *dbuf, origPtr;
+
+       dbuf = bd->dbuf;
+       dbufSize = bd->dbufSize;
+       selectors = bd->selectors;
+
+       /* Read in header signature and CRC, then validate signature.
+          (last block signature means CRC is for whole file, return now) */
+       i = get_bits(bd, 24);
+       j = get_bits(bd, 24);
+       bd->headerCRC = get_bits(bd, 32);
+       if ((i == 0x177245) && (j == 0x385090))
+               return RETVAL_LAST_BLOCK;
+       if ((i != 0x314159) || (j != 0x265359))
+               return RETVAL_NOT_BZIP_DATA;
+       /* We can add support for blockRandomised if anybody complains.
+          There was some code for this in busybox 1.0.0-pre3, but nobody ever
+          noticed that it didn't actually work. */
+       if (get_bits(bd, 1))
+               return RETVAL_OBSOLETE_INPUT;
+       origPtr = get_bits(bd, 24);
+       if (origPtr > dbufSize)
+               return RETVAL_DATA_ERROR;
+       /* mapping table: if some byte values are never used (encoding things
+          like ascii text), the compression code removes the gaps to have fewer
+          symbols to deal with, and writes a sparse bitfield indicating which
+          values were present.  We make a translation table to convert the
+          symbols back to the corresponding bytes. */
+       t = get_bits(bd, 16);
+       symTotal = 0;
+       for (i = 0; i < 16; i++) {
+               if (t&(1 << (15-i))) {
+                       k = get_bits(bd, 16);
+                       for (j = 0; j < 16; j++)
+                               if (k&(1 << (15-j)))
+                                       symToByte[symTotal++] = (16*i)+j;
+               }
+       }
+       /* How many different Huffman coding groups does this block use? */
+       groupCount = get_bits(bd, 3);
+       if (groupCount < 2 || groupCount > MAX_GROUPS)
+               return RETVAL_DATA_ERROR;
+       /* nSelectors: Every GROUP_SIZE many symbols we select a new
+          Huffman coding group.  Read in the group selector list,
+          which is stored as MTF encoded bit runs.  (MTF = Move To
+          Front, as each value is used it's moved to the start of the
+          list.) */
+       nSelectors = get_bits(bd, 15);
+       if (!nSelectors)
+               return RETVAL_DATA_ERROR;
+       for (i = 0; i < groupCount; i++)
+               mtfSymbol[i] = i;
+       for (i = 0; i < nSelectors; i++) {
+               /* Get next value */
+               for (j = 0; get_bits(bd, 1); j++)
+                       if (j >= groupCount)
+                               return RETVAL_DATA_ERROR;
+               /* Decode MTF to get the next selector */
+               uc = mtfSymbol[j];
+               for (; j; j--)
+                       mtfSymbol[j] = mtfSymbol[j-1];
+               mtfSymbol[0] = selectors[i] = uc;
+       }
+       /* Read the Huffman coding tables for each group, which code
+          for symTotal literal symbols, plus two run symbols (RUNA,
+          RUNB) */
+       symCount = symTotal+2;
+       for (j = 0; j < groupCount; j++) {
+               unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
+               int     minLen, maxLen, pp;
+               /* Read Huffman code lengths for each symbol.  They're
+                  stored in a way similar to mtf; record a starting
+                  value for the first symbol, and an offset from the
+                  previous value for everys symbol after that.
+                  (Subtracting 1 before the loop and then adding it
+                  back at the end is an optimization that makes the
+                  test inside the loop simpler: symbol length 0
+                  becomes negative, so an unsigned inequality catches
+                  it.) */
+               t = get_bits(bd, 5)-1;
+               for (i = 0; i < symCount; i++) {
+                       for (;;) {
+                               if (((unsigned)t) > (MAX_HUFCODE_BITS-1))
+                                       return RETVAL_DATA_ERROR;
+
+                               /* If first bit is 0, stop.  Else
+                                  second bit indicates whether to
+                                  increment or decrement the value.
+                                  Optimization: grab 2 bits and unget
+                                  the second if the first was 0. */
+
+                               k = get_bits(bd, 2);
+                               if (k < 2) {
+                                       bd->inbufBitCount++;
+                                       break;
+                               }
+                               /* Add one if second bit 1, else
+                                * subtract 1.  Avoids if/else */
+                               t += (((k+1)&2)-1);
+                       }
+                       /* Correct for the initial -1, to get the
+                        * final symbol length */
+                       length[i] = t+1;
+               }
+               /* Find largest and smallest lengths in this group */
+               minLen = maxLen = length[0];
+
+               for (i = 1; i < symCount; i++) {
+                       if (length[i] > maxLen)
+                               maxLen = length[i];
+                       else if (length[i] < minLen)
+                               minLen = length[i];
+               }
+
+               /* Calculate permute[], base[], and limit[] tables from
+                * length[].
+                *
+                * permute[] is the lookup table for converting
+                * Huffman coded symbols into decoded symbols.  base[]
+                * is the amount to subtract from the value of a
+                * Huffman symbol of a given length when using
+                * permute[].
+                *
+                * limit[] indicates the largest numerical value a
+                * symbol with a given number of bits can have.  This
+                * is how the Huffman codes can vary in length: each
+                * code with a value > limit[length] needs another
+                * bit.
+                */
+               hufGroup = bd->groups+j;
+               hufGroup->minLen = minLen;
+               hufGroup->maxLen = maxLen;
+               /* Note that minLen can't be smaller than 1, so we
+                  adjust the base and limit array pointers so we're
+                  not always wasting the first entry.  We do this
+                  again when using them (during symbol decoding).*/
+               base = hufGroup->base-1;
+               limit = hufGroup->limit-1;
+               /* Calculate permute[].  Concurently, initialize
+                * temp[] and limit[]. */
+               pp = 0;
+               for (i = minLen; i <= maxLen; i++) {
+                       temp[i] = limit[i] = 0;
+                       for (t = 0; t < symCount; t++)
+                               if (length[t] == i)
+                                       hufGroup->permute[pp++] = t;
+               }
+               /* Count symbols coded for at each bit length */
+               for (i = 0; i < symCount; i++)
+                       temp[length[i]]++;
+               /* Calculate limit[] (the largest symbol-coding value
+                *at each bit length, which is (previous limit <<
+                *1)+symbols at this level), and base[] (number of
+                *symbols to ignore at each bit length, which is limit
+                *minus the cumulative count of symbols coded for
+                *already). */
+               pp = t = 0;
+               for (i = minLen; i < maxLen; i++) {
+                       pp += temp[i];
+                       /* We read the largest possible symbol size
+                          and then unget bits after determining how
+                          many we need, and those extra bits could be
+                          set to anything.  (They're noise from
+                          future symbols.)  At each level we're
+                          really only interested in the first few
+                          bits, so here we set all the trailing
+                          to-be-ignored bits to 1 so they don't
+                          affect the value > limit[length]
+                          comparison. */
+                       limit[i] = (pp << (maxLen - i)) - 1;
+                       pp <<= 1;
+                       base[i+1] = pp-(t += temp[i]);
+               }
+               limit[maxLen+1] = INT_MAX; /* Sentinal value for
+                                           * reading next sym. */
+               limit[maxLen] = pp+temp[maxLen]-1;
+               base[minLen] = 0;
+       }
+       /* We've finished reading and digesting the block header.  Now
+          read this block's Huffman coded symbols from the file and
+          undo the Huffman coding and run length encoding, saving the
+          result into dbuf[dbufCount++] = uc */
+
+       /* Initialize symbol occurrence counters and symbol Move To
+        * Front table */
+       for (i = 0; i < 256; i++) {
+               byteCount[i] = 0;
+               mtfSymbol[i] = (unsigned char)i;
+       }
+       /* Loop through compressed symbols. */
+       runPos = dbufCount = symCount = selector = 0;
+       for (;;) {
+               /* Determine which Huffman coding group to use. */
+               if (!(symCount--)) {
+                       symCount = GROUP_SIZE-1;
+                       if (selector >= nSelectors)
+                               return RETVAL_DATA_ERROR;
+                       hufGroup = bd->groups+selectors[selector++];
+                       base = hufGroup->base-1;
+                       limit = hufGroup->limit-1;
+               }
+               /* Read next Huffman-coded symbol. */
+               /* Note: It is far cheaper to read maxLen bits and
+                  back up than it is to read minLen bits and then an
+                  additional bit at a time, testing as we go.
+                  Because there is a trailing last block (with file
+                  CRC), there is no danger of the overread causing an
+                  unexpected EOF for a valid compressed file.  As a
+                  further optimization, we do the read inline
+                  (falling back to a call to get_bits if the buffer
+                  runs dry).  The following (up to got_huff_bits:) is
+                  equivalent to j = get_bits(bd, hufGroup->maxLen);
+                */
+               while (bd->inbufBitCount < hufGroup->maxLen) {
+                       if (bd->inbufPos == bd->inbufCount) {
+                               j = get_bits(bd, hufGroup->maxLen);
+                               goto got_huff_bits;
+                       }
+                       bd->inbufBits =
+                               (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++];
+                       bd->inbufBitCount += 8;
+               };
+               bd->inbufBitCount -= hufGroup->maxLen;
+               j = (bd->inbufBits >> bd->inbufBitCount)&
+                       ((1 << hufGroup->maxLen)-1);
+got_huff_bits:
+               /* Figure how how many bits are in next symbol and
+                * unget extras */
+               i = hufGroup->minLen;
+               while (j > limit[i])
+                       ++i;
+               bd->inbufBitCount += (hufGroup->maxLen - i);
+               /* Huffman decode value to get nextSym (with bounds checking) */
+               if ((i > hufGroup->maxLen)
+                       || (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i]))
+                               >= MAX_SYMBOLS))
+                       return RETVAL_DATA_ERROR;
+               nextSym = hufGroup->permute[j];
+               /* We have now decoded the symbol, which indicates
+                  either a new literal byte, or a repeated run of the
+                  most recent literal byte.  First, check if nextSym
+                  indicates a repeated run, and if so loop collecting
+                  how many times to repeat the last literal. */
+               if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */
+                       /* If this is the start of a new run, zero out
+                        * counter */
+                       if (!runPos) {
+                               runPos = 1;
+                               t = 0;
+                       }
+                       /* Neat trick that saves 1 symbol: instead of
+                          or-ing 0 or 1 at each bit position, add 1
+                          or 2 instead.  For example, 1011 is 1 << 0
+                          + 1 << 1 + 2 << 2.  1010 is 2 << 0 + 2 << 1
+                          + 1 << 2.  You can make any bit pattern
+                          that way using 1 less symbol than the basic
+                          or 0/1 method (except all bits 0, which
+                          would use no symbols, but a run of length 0
+                          doesn't mean anything in this context).
+                          Thus space is saved. */
+                       t += (runPos << nextSym);
+                       /* +runPos if RUNA; +2*runPos if RUNB */
+
+                       runPos <<= 1;
+                       continue;
+               }
+               /* When we hit the first non-run symbol after a run,
+                  we now know how many times to repeat the last
+                  literal, so append that many copies to our buffer
+                  of decoded symbols (dbuf) now.  (The last literal
+                  used is the one at the head of the mtfSymbol
+                  array.) */
+               if (runPos) {
+                       runPos = 0;
+                       if (dbufCount+t >= dbufSize)
+                               return RETVAL_DATA_ERROR;
+
+                       uc = symToByte[mtfSymbol[0]];
+                       byteCount[uc] += t;
+                       while (t--)
+                               dbuf[dbufCount++] = uc;
+               }
+               /* Is this the terminating symbol? */
+               if (nextSym > symTotal)
+                       break;
+               /* At this point, nextSym indicates a new literal
+                  character.  Subtract one to get the position in the
+                  MTF array at which this literal is currently to be
+                  found.  (Note that the result can't be -1 or 0,
+                  because 0 and 1 are RUNA and RUNB.  But another
+                  instance of the first symbol in the mtf array,
+                  position 0, would have been handled as part of a
+                  run above.  Therefore 1 unused mtf position minus 2
+                  non-literal nextSym values equals -1.) */
+               if (dbufCount >= dbufSize)
+                       return RETVAL_DATA_ERROR;
+               i = nextSym - 1;
+               uc = mtfSymbol[i];
+               /* Adjust the MTF array.  Since we typically expect to
+                *move only a small number of symbols, and are bound
+                *by 256 in any case, using memmove here would
+                *typically be bigger and slower due to function call
+                *overhead and other assorted setup costs. */
+               do {
+                       mtfSymbol[i] = mtfSymbol[i-1];
+               } while (--i);
+               mtfSymbol[0] = uc;
+               uc = symToByte[uc];
+               /* We have our literal byte.  Save it into dbuf. */
+               byteCount[uc]++;
+               dbuf[dbufCount++] = (unsigned int)uc;
+       }
+       /* At this point, we've read all the Huffman-coded symbols
+          (and repeated runs) for this block from the input stream,
+          and decoded them into the intermediate buffer.  There are
+          dbufCount many decoded bytes in dbuf[].  Now undo the
+          Burrows-Wheeler transform on dbuf.  See
+          http://dogma.net/markn/articles/bwt/bwt.htm
+        */
+       /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */
+       j = 0;
+       for (i = 0; i < 256; i++) {
+               k = j+byteCount[i];
+               byteCount[i] = j;
+               j = k;
+       }
+       /* Figure out what order dbuf would be in if we sorted it. */
+       for (i = 0; i < dbufCount; i++) {
+               uc = (unsigned char)(dbuf[i] & 0xff);
+               dbuf[byteCount[uc]] |= (i << 8);
+               byteCount[uc]++;
+       }
+       /* Decode first byte by hand to initialize "previous" byte.
+          Note that it doesn't get output, and if the first three
+          characters are identical it doesn't qualify as a run (hence
+          writeRunCountdown = 5). */
+       if (dbufCount) {
+               if (origPtr >= dbufCount)
+                       return RETVAL_DATA_ERROR;
+               bd->writePos = dbuf[origPtr];
+               bd->writeCurrent = (unsigned char)(bd->writePos&0xff);
+               bd->writePos >>= 8;
+               bd->writeRunCountdown = 5;
+       }
+       bd->writeCount = dbufCount;
+
+       return RETVAL_OK;
+}
+
+/* Undo burrows-wheeler transform on intermediate buffer to produce output.
+   If start_bunzip was initialized with out_fd =-1, then up to len bytes of
+   data are written to outbuf.  Return value is number of bytes written or
+   error (all errors are negative numbers).  If out_fd!=-1, outbuf and len
+   are ignored, data is written to out_fd and return is RETVAL_OK or error.
+*/
+
+static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len)
+{
+       const unsigned int *dbuf;
+       int pos, xcurrent, previous, gotcount;
+
+       /* If last read was short due to end of file, return last block now */
+       if (bd->writeCount < 0)
+               return bd->writeCount;
+
+       gotcount = 0;
+       dbuf = bd->dbuf;
+       pos = bd->writePos;
+       xcurrent = bd->writeCurrent;
+
+       /* We will always have pending decoded data to write into the output
+          buffer unless this is the very first call (in which case we haven't
+          Huffman-decoded a block into the intermediate buffer yet). */
+
+       if (bd->writeCopies) {
+               /* Inside the loop, writeCopies means extra copies (beyond 1) */
+               --bd->writeCopies;
+               /* Loop outputting bytes */
+               for (;;) {
+                       /* If the output buffer is full, snapshot
+                        * state and return */
+                       if (gotcount >= len) {
+                               bd->writePos = pos;
+                               bd->writeCurrent = xcurrent;
+                               bd->writeCopies++;
+                               return len;
+                       }
+                       /* Write next byte into output buffer, updating CRC */
+                       outbuf[gotcount++] = xcurrent;
+                       bd->writeCRC = (((bd->writeCRC) << 8)
+                               ^bd->crc32Table[((bd->writeCRC) >> 24)
+                               ^xcurrent]);
+                       /* Loop now if we're outputting multiple
+                        * copies of this byte */
+                       if (bd->writeCopies) {
+                               --bd->writeCopies;
+                               continue;
+                       }
+decode_next_byte:
+                       if (!bd->writeCount--)
+                               break;
+                       /* Follow sequence vector to undo
+                        * Burrows-Wheeler transform */
+                       previous = xcurrent;
+                       pos = dbuf[pos];
+                       xcurrent = pos&0xff;
+                       pos >>= 8;
+                       /* After 3 consecutive copies of the same
+                          byte, the 4th is a repeat count.  We count
+                          down from 4 instead *of counting up because
+                          testing for non-zero is faster */
+                       if (--bd->writeRunCountdown) {
+                               if (xcurrent != previous)
+                                       bd->writeRunCountdown = 4;
+                       } else {
+                               /* We have a repeated run, this byte
+                                * indicates the count */
+                               bd->writeCopies = xcurrent;
+                               xcurrent = previous;
+                               bd->writeRunCountdown = 5;
+                               /* Sometimes there are just 3 bytes
+                                * (run length 0) */
+                               if (!bd->writeCopies)
+                                       goto decode_next_byte;
+                               /* Subtract the 1 copy we'd output
+                                * anyway to get extras */
+                               --bd->writeCopies;
+                       }
+               }
+               /* Decompression of this block completed successfully */
+               bd->writeCRC = ~bd->writeCRC;
+               bd->totalCRC = ((bd->totalCRC << 1) |
+                               (bd->totalCRC >> 31)) ^ bd->writeCRC;
+               /* If this block had a CRC error, force file level CRC error. */
+               if (bd->writeCRC != bd->headerCRC) {
+                       bd->totalCRC = bd->headerCRC+1;
+                       return RETVAL_LAST_BLOCK;
+               }
+       }
+
+       /* Refill the intermediate buffer by Huffman-decoding next
+        * block of input */
+       /* (previous is just a convenient unused temp variable here) */
+       previous = get_next_block(bd);
+       if (previous) {
+               bd->writeCount = previous;
+               return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount;
+       }
+       bd->writeCRC = 0xffffffffUL;
+       pos = bd->writePos;
+       xcurrent = bd->writeCurrent;
+       goto decode_next_byte;
+}
+
+static int INIT nofill(void *buf, unsigned int len)
+{
+       return -1;
+}
+
+/* Allocate the structure, read file header.  If in_fd ==-1, inbuf must contain
+   a complete bunzip file (len bytes long).  If in_fd!=-1, inbuf and len are
+   ignored, and data is read from file handle into temporary buffer. */
+static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len,
+                            int (*fill)(void*, unsigned int))
+{
+       struct bunzip_data *bd;
+       unsigned int i, j, c;
+       const unsigned int BZh0 =
+               (((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16)
+               +(((unsigned int)'h') << 8)+(unsigned int)'0';
+
+       /* Figure out how much data to allocate */
+       i = sizeof(struct bunzip_data);
+
+       /* Allocate bunzip_data.  Most fields initialize to zero. */
+       bd = *bdp = malloc(i);
+       memset(bd, 0, sizeof(struct bunzip_data));
+       /* Setup input buffer */
+       bd->inbuf = inbuf;
+       bd->inbufCount = len;
+       if (fill != NULL)
+               bd->fill = fill;
+       else
+               bd->fill = nofill;
+
+       /* Init the CRC32 table (big endian) */
+       for (i = 0; i < 256; i++) {
+               c = i << 24;
+               for (j = 8; j; j--)
+                       c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1);
+               bd->crc32Table[i] = c;
+       }
+
+       /* Ensure that file starts with "BZh['1'-'9']." */
+       i = get_bits(bd, 32);
+       if (((unsigned int)(i-BZh0-1)) >= 9)
+               return RETVAL_NOT_BZIP_DATA;
+
+       /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of
+          uncompressed data.  Allocate intermediate buffer for block. */
+       bd->dbufSize = 100000*(i-BZh0);
+
+       bd->dbuf = large_malloc(bd->dbufSize * sizeof(int));
+       return RETVAL_OK;
+}
+
+/* Example usage: decompress src_fd to dst_fd.  (Stops at end of bzip2 data,
+   not end of file.) */
+STATIC int INIT bunzip2(unsigned char *buf, int len,
+                       int(*fill)(void*, unsigned int),
+                       int(*flush)(void*, unsigned int),
+                       unsigned char *outbuf,
+                       int *pos,
+                       void(*error_fn)(char *x))
+{
+       struct bunzip_data *bd;
+       int i = -1;
+       unsigned char *inbuf;
+
+       set_error_fn(error_fn);
+       if (flush)
+               outbuf = malloc(BZIP2_IOBUF_SIZE);
+       else
+               len -= 4; /* Uncompressed size hack active in pre-boot
+                            environment */
+       if (!outbuf) {
+               error("Could not allocate output bufer");
+               return -1;
+       }
+       if (buf)
+               inbuf = buf;
+       else
+               inbuf = malloc(BZIP2_IOBUF_SIZE);
+       if (!inbuf) {
+               error("Could not allocate input bufer");
+               goto exit_0;
+       }
+       i = start_bunzip(&bd, inbuf, len, fill);
+       if (!i) {
+               for (;;) {
+                       i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE);
+                       if (i <= 0)
+                               break;
+                       if (!flush)
+                               outbuf += i;
+                       else
+                               if (i != flush(outbuf, i)) {
+                                       i = RETVAL_UNEXPECTED_OUTPUT_EOF;
+                                       break;
+                               }
+               }
+       }
+       /* Check CRC and release memory */
+       if (i == RETVAL_LAST_BLOCK) {
+               if (bd->headerCRC != bd->totalCRC)
+                       error("Data integrity error when decompressing.");
+               else
+                       i = RETVAL_OK;
+       } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) {
+               error("Compressed file ends unexpectedly");
+       }
+       if (bd->dbuf)
+               large_free(bd->dbuf);
+       if (pos)
+               *pos = bd->inbufPos;
+       free(bd);
+       if (!buf)
+               free(inbuf);
+exit_0:
+       if (flush)
+               free(outbuf);
+       return i;
+}
+
+#define decompress bunzip2
diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c
new file mode 100644 (file)
index 0000000..839a329
--- /dev/null
@@ -0,0 +1,167 @@
+#ifdef STATIC
+/* Pre-boot environment: included */
+
+/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots
+ * errors about console_printk etc... on ARM */
+#define _LINUX_KERNEL_H
+
+#include "zlib_inflate/inftrees.c"
+#include "zlib_inflate/inffast.c"
+#include "zlib_inflate/inflate.c"
+
+#else /* STATIC */
+/* initramfs et al: linked */
+
+#include <linux/zutil.h>
+
+#include "zlib_inflate/inftrees.h"
+#include "zlib_inflate/inffast.h"
+#include "zlib_inflate/inflate.h"
+
+#include "zlib_inflate/infutil.h"
+
+#endif /* STATIC */
+
+#include <linux/decompress/mm.h>
+
+#define INBUF_LEN (16*1024)
+
+/* Included from initramfs et al code */
+STATIC int INIT gunzip(unsigned char *buf, int len,
+                      int(*fill)(void*, unsigned int),
+                      int(*flush)(void*, unsigned int),
+                      unsigned char *out_buf,
+                      int *pos,
+                      void(*error_fn)(char *x)) {
+       u8 *zbuf;
+       struct z_stream_s *strm;
+       int rc;
+       size_t out_len;
+
+       set_error_fn(error_fn);
+       rc = -1;
+       if (flush) {
+               out_len = 0x8000; /* 32 K */
+               out_buf = malloc(out_len);
+       } else {
+               out_len = 0x7fffffff; /* no limit */
+       }
+       if (!out_buf) {
+               error("Out of memory while allocating output buffer");
+               goto gunzip_nomem1;
+       }
+
+       if (buf)
+               zbuf = buf;
+       else {
+               zbuf = malloc(INBUF_LEN);
+               len = 0;
+       }
+       if (!zbuf) {
+               error("Out of memory while allocating input buffer");
+               goto gunzip_nomem2;
+       }
+
+       strm = malloc(sizeof(*strm));
+       if (strm == NULL) {
+               error("Out of memory while allocating z_stream");
+               goto gunzip_nomem3;
+       }
+
+       strm->workspace = malloc(flush ? zlib_inflate_workspacesize() :
+                                sizeof(struct inflate_state));
+       if (strm->workspace == NULL) {
+               error("Out of memory while allocating workspace");
+               goto gunzip_nomem4;
+       }
+
+       if (len == 0)
+               len = fill(zbuf, INBUF_LEN);
+
+       /* verify the gzip header */
+       if (len < 10 ||
+          zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) {
+               if (pos)
+                       *pos = 0;
+               error("Not a gzip file");
+               goto gunzip_5;
+       }
+
+       /* skip over gzip header (1f,8b,08... 10 bytes total +
+        * possible asciz filename)
+        */
+       strm->next_in = zbuf + 10;
+       /* skip over asciz filename */
+       if (zbuf[3] & 0x8) {
+               while (strm->next_in[0])
+                       strm->next_in++;
+               strm->next_in++;
+       }
+       strm->avail_in = len - (strm->next_in - zbuf);
+
+       strm->next_out = out_buf;
+       strm->avail_out = out_len;
+
+       rc = zlib_inflateInit2(strm, -MAX_WBITS);
+
+       if (!flush) {
+               WS(strm)->inflate_state.wsize = 0;
+               WS(strm)->inflate_state.window = NULL;
+       }
+
+       while (rc == Z_OK) {
+               if (strm->avail_in == 0) {
+                       /* TODO: handle case where both pos and fill are set */
+                       len = fill(zbuf, INBUF_LEN);
+                       if (len < 0) {
+                               rc = -1;
+                               error("read error");
+                               break;
+                       }
+                       strm->next_in = zbuf;
+                       strm->avail_in = len;
+               }
+               rc = zlib_inflate(strm, 0);
+
+               /* Write any data generated */
+               if (flush && strm->next_out > out_buf) {
+                       int l = strm->next_out - out_buf;
+                       if (l != flush(out_buf, l)) {
+                               rc = -1;
+                               error("write error");
+                               break;
+                       }
+                       strm->next_out = out_buf;
+                       strm->avail_out = out_len;
+               }
+
+               /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */
+               if (rc == Z_STREAM_END) {
+                       rc = 0;
+                       break;
+               } else if (rc != Z_OK) {
+                       error("uncompression error");
+                       rc = -1;
+               }
+       }
+
+       zlib_inflateEnd(strm);
+       if (pos)
+               /* add + 8 to skip over trailer */
+               *pos = strm->next_in - zbuf+8;
+
+gunzip_5:
+       free(strm->workspace);
+gunzip_nomem4:
+       free(strm);
+gunzip_nomem3:
+       if (!buf)
+               free(zbuf);
+gunzip_nomem2:
+       if (flush)
+               free(out_buf);
+gunzip_nomem1:
+       return rc; /* returns Z_OK (0) if successful */
+}
+
+#define decompress gunzip
diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c
new file mode 100644 (file)
index 0000000..546f2f4
--- /dev/null
@@ -0,0 +1,647 @@
+/* Lzma decompressor for Linux kernel. Shamelessly snarfed
+ *from busybox 1.1.1
+ *
+ *Linux kernel adaptation
+ *Copyright (C) 2006  Alain < alain@knaff.lu >
+ *
+ *Based on small lzma deflate implementation/Small range coder
+ *implementation for lzma.
+ *Copyright (C) 2006  Aurelien Jacobs < aurel@gnuage.org >
+ *
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ *Copyright (C) 1999-2005  Igor Pavlov
+ *
+ *Copyrights of the parts, see headers below.
+ *
+ *
+ *This program is free software; you can redistribute it and/or
+ *modify it under the terms of the GNU Lesser General Public
+ *License as published by the Free Software Foundation; either
+ *version 2.1 of the License, or (at your option) any later version.
+ *
+ *This program is distributed in the hope that it will be useful,
+ *but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *Lesser General Public License for more details.
+ *
+ *You should have received a copy of the GNU Lesser General Public
+ *License along with this library; if not, write to the Free Software
+ *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#ifndef STATIC
+#include <linux/decompress/unlzma.h>
+#endif /* STATIC */
+
+#include <linux/decompress/mm.h>
+
+#define        MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+static long long INIT read_int(unsigned char *ptr, int size)
+{
+       int i;
+       long long ret = 0;
+
+       for (i = 0; i < size; i++)
+               ret = (ret << 8) | ptr[size-i-1];
+       return ret;
+}
+
+#define ENDIAN_CONVERT(x) \
+  x = (typeof(x))read_int((unsigned char *)&x, sizeof(x))
+
+
+/* Small range coder implementation for lzma.
+ *Copyright (C) 2006  Aurelien Jacobs < aurel@gnuage.org >
+ *
+ *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ *Copyright (c) 1999-2005  Igor Pavlov
+ */
+
+#include <linux/compiler.h>
+
+#define LZMA_IOBUF_SIZE        0x10000
+
+struct rc {
+       int (*fill)(void*, unsigned int);
+       uint8_t *ptr;
+       uint8_t *buffer;
+       uint8_t *buffer_end;
+       int buffer_size;
+       uint32_t code;
+       uint32_t range;
+       uint32_t bound;
+};
+
+
+#define RC_TOP_BITS 24
+#define RC_MOVE_BITS 5
+#define RC_MODEL_TOTAL_BITS 11
+
+
+/* Called twice: once at startup and once in rc_normalize() */
+static void INIT rc_read(struct rc *rc)
+{
+       rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
+       if (rc->buffer_size <= 0)
+               error("unexpected EOF");
+       rc->ptr = rc->buffer;
+       rc->buffer_end = rc->buffer + rc->buffer_size;
+}
+
+/* Called once */
+static inline void INIT rc_init(struct rc *rc,
+                                      int (*fill)(void*, unsigned int),
+                                      char *buffer, int buffer_size)
+{
+       rc->fill = fill;
+       rc->buffer = (uint8_t *)buffer;
+       rc->buffer_size = buffer_size;
+       rc->buffer_end = rc->buffer + rc->buffer_size;
+       rc->ptr = rc->buffer;
+
+       rc->code = 0;
+       rc->range = 0xFFFFFFFF;
+}
+
+static inline void INIT rc_init_code(struct rc *rc)
+{
+       int i;
+
+       for (i = 0; i < 5; i++) {
+               if (rc->ptr >= rc->buffer_end)
+                       rc_read(rc);
+               rc->code = (rc->code << 8) | *rc->ptr++;
+       }
+}
+
+
+/* Called once. TODO: bb_maybe_free() */
+static inline void INIT rc_free(struct rc *rc)
+{
+       free(rc->buffer);
+}
+
+/* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
+static void INIT rc_do_normalize(struct rc *rc)
+{
+       if (rc->ptr >= rc->buffer_end)
+               rc_read(rc);
+       rc->range <<= 8;
+       rc->code = (rc->code << 8) | *rc->ptr++;
+}
+static inline void INIT rc_normalize(struct rc *rc)
+{
+       if (rc->range < (1 << RC_TOP_BITS))
+               rc_do_normalize(rc);
+}
+
+/* Called 9 times */
+/* Why rc_is_bit_0_helper exists?
+ *Because we want to always expose (rc->code < rc->bound) to optimizer
+ */
+static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
+{
+       rc_normalize(rc);
+       rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
+       return rc->bound;
+}
+static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
+{
+       uint32_t t = rc_is_bit_0_helper(rc, p);
+       return rc->code < t;
+}
+
+/* Called ~10 times, but very small, thus inlined */
+static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
+{
+       rc->range = rc->bound;
+       *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
+}
+static inline void rc_update_bit_1(struct rc *rc, uint16_t *p)
+{
+       rc->range -= rc->bound;
+       rc->code -= rc->bound;
+       *p -= *p >> RC_MOVE_BITS;
+}
+
+/* Called 4 times in unlzma loop */
+static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
+{
+       if (rc_is_bit_0(rc, p)) {
+               rc_update_bit_0(rc, p);
+               *symbol *= 2;
+               return 0;
+       } else {
+               rc_update_bit_1(rc, p);
+               *symbol = *symbol * 2 + 1;
+               return 1;
+       }
+}
+
+/* Called once */
+static inline int INIT rc_direct_bit(struct rc *rc)
+{
+       rc_normalize(rc);
+       rc->range >>= 1;
+       if (rc->code >= rc->range) {
+               rc->code -= rc->range;
+               return 1;
+       }
+       return 0;
+}
+
+/* Called twice */
+static inline void INIT
+rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
+{
+       int i = num_levels;
+
+       *symbol = 1;
+       while (i--)
+               rc_get_bit(rc, p + *symbol, symbol);
+       *symbol -= 1 << num_levels;
+}
+
+
+/*
+ * Small lzma deflate implementation.
+ * Copyright (C) 2006  Aurelien Jacobs < aurel@gnuage.org >
+ *
+ * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
+ * Copyright (C) 1999-2005  Igor Pavlov
+ */
+
+
+struct lzma_header {
+       uint8_t pos;
+       uint32_t dict_size;
+       uint64_t dst_size;
+} __attribute__ ((packed)) ;
+
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+#define LZMA_NUM_POS_BITS_MAX 4
+
+#define LZMA_LEN_NUM_LOW_BITS 3
+#define LZMA_LEN_NUM_MID_BITS 3
+#define LZMA_LEN_NUM_HIGH_BITS 8
+
+#define LZMA_LEN_CHOICE 0
+#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
+#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
+#define LZMA_LEN_MID (LZMA_LEN_LOW \
+                     + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
+#define LZMA_LEN_HIGH (LZMA_LEN_MID \
+                      +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
+#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
+
+#define LZMA_NUM_STATES 12
+#define LZMA_NUM_LIT_STATES 7
+
+#define LZMA_START_POS_MODEL_INDEX 4
+#define LZMA_END_POS_MODEL_INDEX 14
+#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
+
+#define LZMA_NUM_POS_SLOT_BITS 6
+#define LZMA_NUM_LEN_TO_POS_STATES 4
+
+#define LZMA_NUM_ALIGN_BITS 4
+
+#define LZMA_MATCH_MIN_LEN 2
+
+#define LZMA_IS_MATCH 0
+#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
+#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
+#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
+#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
+#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
+#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
+                      + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
+#define LZMA_SPEC_POS (LZMA_POS_SLOT \
+                      +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
+#define LZMA_ALIGN (LZMA_SPEC_POS \
+                   + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
+#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
+#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
+#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
+
+
+struct writer {
+       uint8_t *buffer;
+       uint8_t previous_byte;
+       size_t buffer_pos;
+       int bufsize;
+       size_t global_pos;
+       int(*flush)(void*, unsigned int);
+       struct lzma_header *header;
+};
+
+struct cstate {
+       int state;
+       uint32_t rep0, rep1, rep2, rep3;
+};
+
+static inline size_t INIT get_pos(struct writer *wr)
+{
+       return
+               wr->global_pos + wr->buffer_pos;
+}
+
+static inline uint8_t INIT peek_old_byte(struct writer *wr,
+                                               uint32_t offs)
+{
+       if (!wr->flush) {
+               int32_t pos;
+               while (offs > wr->header->dict_size)
+                       offs -= wr->header->dict_size;
+               pos = wr->buffer_pos - offs;
+               return wr->buffer[pos];
+       } else {
+               uint32_t pos = wr->buffer_pos - offs;
+               while (pos >= wr->header->dict_size)
+                       pos += wr->header->dict_size;
+               return wr->buffer[pos];
+       }
+
+}
+
+static inline void INIT write_byte(struct writer *wr, uint8_t byte)
+{
+       wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
+       if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
+               wr->buffer_pos = 0;
+               wr->global_pos += wr->header->dict_size;
+               wr->flush((char *)wr->buffer, wr->header->dict_size);
+       }
+}
+
+
+static inline void INIT copy_byte(struct writer *wr, uint32_t offs)
+{
+       write_byte(wr, peek_old_byte(wr, offs));
+}
+
+static inline void INIT copy_bytes(struct writer *wr,
+                                        uint32_t rep0, int len)
+{
+       do {
+               copy_byte(wr, rep0);
+               len--;
+       } while (len != 0 && wr->buffer_pos < wr->header->dst_size);
+}
+
+static inline void INIT process_bit0(struct writer *wr, struct rc *rc,
+                                    struct cstate *cst, uint16_t *p,
+                                    int pos_state, uint16_t *prob,
+                                    int lc, uint32_t literal_pos_mask) {
+       int mi = 1;
+       rc_update_bit_0(rc, prob);
+       prob = (p + LZMA_LITERAL +
+               (LZMA_LIT_SIZE
+                * (((get_pos(wr) & literal_pos_mask) << lc)
+                   + (wr->previous_byte >> (8 - lc))))
+               );
+
+       if (cst->state >= LZMA_NUM_LIT_STATES) {
+               int match_byte = peek_old_byte(wr, cst->rep0);
+               do {
+                       int bit;
+                       uint16_t *prob_lit;
+
+                       match_byte <<= 1;
+                       bit = match_byte & 0x100;
+                       prob_lit = prob + 0x100 + bit + mi;
+                       if (rc_get_bit(rc, prob_lit, &mi)) {
+                               if (!bit)
+                                       break;
+                       } else {
+                               if (bit)
+                                       break;
+                       }
+               } while (mi < 0x100);
+       }
+       while (mi < 0x100) {
+               uint16_t *prob_lit = prob + mi;
+               rc_get_bit(rc, prob_lit, &mi);
+       }
+       write_byte(wr, mi);
+       if (cst->state < 4)
+               cst->state = 0;
+       else if (cst->state < 10)
+               cst->state -= 3;
+       else
+               cst->state -= 6;
+}
+
+static inline void INIT process_bit1(struct writer *wr, struct rc *rc,
+                                           struct cstate *cst, uint16_t *p,
+                                           int pos_state, uint16_t *prob) {
+  int offset;
+       uint16_t *prob_len;
+       int num_bits;
+       int len;
+
+       rc_update_bit_1(rc, prob);
+       prob = p + LZMA_IS_REP + cst->state;
+       if (rc_is_bit_0(rc, prob)) {
+               rc_update_bit_0(rc, prob);
+               cst->rep3 = cst->rep2;
+               cst->rep2 = cst->rep1;
+               cst->rep1 = cst->rep0;
+               cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3;
+               prob = p + LZMA_LEN_CODER;
+       } else {
+               rc_update_bit_1(rc, prob);
+               prob = p + LZMA_IS_REP_G0 + cst->state;
+               if (rc_is_bit_0(rc, prob)) {
+                       rc_update_bit_0(rc, prob);
+                       prob = (p + LZMA_IS_REP_0_LONG
+                               + (cst->state <<
+                                  LZMA_NUM_POS_BITS_MAX) +
+                               pos_state);
+                       if (rc_is_bit_0(rc, prob)) {
+                               rc_update_bit_0(rc, prob);
+
+                               cst->state = cst->state < LZMA_NUM_LIT_STATES ?
+                                       9 : 11;
+                               copy_byte(wr, cst->rep0);
+                               return;
+                       } else {
+                               rc_update_bit_1(rc, prob);
+                       }
+               } else {
+                       uint32_t distance;
+
+                       rc_update_bit_1(rc, prob);
+                       prob = p + LZMA_IS_REP_G1 + cst->state;
+                       if (rc_is_bit_0(rc, prob)) {
+                               rc_update_bit_0(rc, prob);
+                               distance = cst->rep1;
+                       } else {
+                               rc_update_bit_1(rc, prob);
+                               prob = p + LZMA_IS_REP_G2 + cst->state;
+                               if (rc_is_bit_0(rc, prob)) {
+                                       rc_update_bit_0(rc, prob);
+                                       distance = cst->rep2;
+                               } else {
+                                       rc_update_bit_1(rc, prob);
+                                       distance = cst->rep3;
+                                       cst->rep3 = cst->rep2;
+                               }
+                               cst->rep2 = cst->rep1;
+                       }
+                       cst->rep1 = cst->rep0;
+                       cst->rep0 = distance;
+               }
+               cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11;
+               prob = p + LZMA_REP_LEN_CODER;
+       }
+
+       prob_len = prob + LZMA_LEN_CHOICE;
+       if (rc_is_bit_0(rc, prob_len)) {
+               rc_update_bit_0(rc, prob_len);
+               prob_len = (prob + LZMA_LEN_LOW
+                           + (pos_state <<
+                              LZMA_LEN_NUM_LOW_BITS));
+               offset = 0;
+               num_bits = LZMA_LEN_NUM_LOW_BITS;
+       } else {
+               rc_update_bit_1(rc, prob_len);
+               prob_len = prob + LZMA_LEN_CHOICE_2;
+               if (rc_is_bit_0(rc, prob_len)) {
+                       rc_update_bit_0(rc, prob_len);
+                       prob_len = (prob + LZMA_LEN_MID
+                                   + (pos_state <<
+                                      LZMA_LEN_NUM_MID_BITS));
+                       offset = 1 << LZMA_LEN_NUM_LOW_BITS;
+                       num_bits = LZMA_LEN_NUM_MID_BITS;
+               } else {
+                       rc_update_bit_1(rc, prob_len);
+                       prob_len = prob + LZMA_LEN_HIGH;
+                       offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
+                                 + (1 << LZMA_LEN_NUM_MID_BITS));
+                       num_bits = LZMA_LEN_NUM_HIGH_BITS;
+               }
+       }
+
+       rc_bit_tree_decode(rc, prob_len, num_bits, &len);
+       len += offset;
+
+       if (cst->state < 4) {
+               int pos_slot;
+
+               cst->state += LZMA_NUM_LIT_STATES;
+               prob =
+                       p + LZMA_POS_SLOT +
+                       ((len <
+                         LZMA_NUM_LEN_TO_POS_STATES ? len :
+                         LZMA_NUM_LEN_TO_POS_STATES - 1)
+                        << LZMA_NUM_POS_SLOT_BITS);
+               rc_bit_tree_decode(rc, prob,
+                                  LZMA_NUM_POS_SLOT_BITS,
+                                  &pos_slot);
+               if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
+                       int i, mi;
+                       num_bits = (pos_slot >> 1) - 1;
+                       cst->rep0 = 2 | (pos_slot & 1);
+                       if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
+                               cst->rep0 <<= num_bits;
+                               prob = p + LZMA_SPEC_POS +
+                                       cst->rep0 - pos_slot - 1;
+                       } else {
+                               num_bits -= LZMA_NUM_ALIGN_BITS;
+                               while (num_bits--)
+                                       cst->rep0 = (cst->rep0 << 1) |
+                                               rc_direct_bit(rc);
+                               prob = p + LZMA_ALIGN;
+                               cst->rep0 <<= LZMA_NUM_ALIGN_BITS;
+                               num_bits = LZMA_NUM_ALIGN_BITS;
+                       }
+                       i = 1;
+                       mi = 1;
+                       while (num_bits--) {
+                               if (rc_get_bit(rc, prob + mi, &mi))
+                                       cst->rep0 |= i;
+                               i <<= 1;
+                       }
+               } else
+                       cst->rep0 = pos_slot;
+               if (++(cst->rep0) == 0)
+                       return;
+       }
+
+       len += LZMA_MATCH_MIN_LEN;
+
+       copy_bytes(wr, cst->rep0, len);
+}
+
+
+
+STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
+                             int(*fill)(void*, unsigned int),
+                             int(*flush)(void*, unsigned int),
+                             unsigned char *output,
+                             int *posp,
+                             void(*error_fn)(char *x)
+       )
+{
+       struct lzma_header header;
+       int lc, pb, lp;
+       uint32_t pos_state_mask;
+       uint32_t literal_pos_mask;
+       uint16_t *p;
+       int num_probs;
+       struct rc rc;
+       int i, mi;
+       struct writer wr;
+       struct cstate cst;
+       unsigned char *inbuf;
+       int ret = -1;
+
+       set_error_fn(error_fn);
+       if (!flush)
+               in_len -= 4; /* Uncompressed size hack active in pre-boot
+                               environment */
+       if (buf)
+               inbuf = buf;
+       else
+               inbuf = malloc(LZMA_IOBUF_SIZE);
+       if (!inbuf) {
+               error("Could not allocate input bufer");
+               goto exit_0;
+       }
+
+       cst.state = 0;
+       cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1;
+
+       wr.header = &header;
+       wr.flush = flush;
+       wr.global_pos = 0;
+       wr.previous_byte = 0;
+       wr.buffer_pos = 0;
+
+       rc_init(&rc, fill, inbuf, in_len);
+
+       for (i = 0; i < sizeof(header); i++) {
+               if (rc.ptr >= rc.buffer_end)
+                       rc_read(&rc);
+               ((unsigned char *)&header)[i] = *rc.ptr++;
+       }
+
+       if (header.pos >= (9 * 5 * 5))
+               error("bad header");
+
+       mi = 0;
+       lc = header.pos;
+       while (lc >= 9) {
+               mi++;
+               lc -= 9;
+       }
+       pb = 0;
+       lp = mi;
+       while (lp >= 5) {
+               pb++;
+               lp -= 5;
+       }
+       pos_state_mask = (1 << pb) - 1;
+       literal_pos_mask = (1 << lp) - 1;
+
+       ENDIAN_CONVERT(header.dict_size);
+       ENDIAN_CONVERT(header.dst_size);
+
+       if (header.dict_size == 0)
+               header.dict_size = 1;
+
+       if (output)
+               wr.buffer = output;
+       else {
+               wr.bufsize = MIN(header.dst_size, header.dict_size);
+               wr.buffer = large_malloc(wr.bufsize);
+       }
+       if (wr.buffer == NULL)
+               goto exit_1;
+
+       num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
+       p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
+       if (p == 0)
+               goto exit_2;
+       num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
+       for (i = 0; i < num_probs; i++)
+               p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
+
+       rc_init_code(&rc);
+
+       while (get_pos(&wr) < header.dst_size) {
+               int pos_state = get_pos(&wr) & pos_state_mask;
+               uint16_t *prob = p + LZMA_IS_MATCH +
+                       (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
+               if (rc_is_bit_0(&rc, prob))
+                       process_bit0(&wr, &rc, &cst, p, pos_state, prob,
+                                    lc, literal_pos_mask);
+               else {
+                       process_bit1(&wr, &rc, &cst, p, pos_state, prob);
+                       if (cst.rep0 == 0)
+                               break;
+               }
+       }
+
+       if (posp)
+               *posp = rc.ptr-rc.buffer;
+       if (wr.flush)
+               wr.flush(wr.buffer, wr.buffer_pos);
+       ret = 0;
+       large_free(p);
+exit_2:
+       if (!output)
+               large_free(wr.buffer);
+exit_1:
+       if (!buf)
+               free(inbuf);
+exit_0:
+       return ret;
+}
+
+#define decompress unlzma
index df8a6c92052df2c49888504af7277e1cfdfecfd6..3d17b3d1b21f1fc1a5609a77367d25997f351790 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef INFLATE_H
+#define INFLATE_H
+
 /* inflate.h -- internal inflate state definition
  * Copyright (C) 1995-2004 Mark Adler
  * For conditions of distribution and use, see copyright notice in zlib.h
@@ -105,3 +108,4 @@ struct inflate_state {
     unsigned short work[288];   /* work area for code table building */
     code codes[ENOUGH];         /* space for code tables */
 };
+#endif
index 5f5219b1240e58c7b5eb2c7a9e2a3a7add446ef8..b70b4731ac7a6e4ff32f48f84253881ad85c90e9 100644 (file)
@@ -1,3 +1,6 @@
+#ifndef INFTREES_H
+#define INFTREES_H
+
 /* inftrees.h -- header to use inftrees.c
  * Copyright (C) 1995-2005 Mark Adler
  * For conditions of distribution and use, see copyright notice in zlib.h
@@ -53,3 +56,4 @@ typedef enum {
 extern int zlib_inflate_table (codetype type, unsigned short *lens,
                              unsigned codes, code **table,
                              unsigned *bits, unsigned short *work);
+#endif
index 72255be57f89160cf8ff12e702c013db310d6ef4..818569b68f4652e623f5d2bd4ed91fe68fcea2b4 100644 (file)
@@ -30,6 +30,10 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
 obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
 obj-$(CONFIG_FS_XIP) += filemap_xip.o
 obj-$(CONFIG_MIGRATION) += migrate.o
+ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+obj-$(CONFIG_SMP) += percpu.o
+else
 obj-$(CONFIG_SMP) += allocpercpu.o
+endif
 obj-$(CONFIG_QUICKLIST) += quicklist.o
 obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
index 4297bc41bfd2aefc986d3d3957cf83e37a63cd4a..1882923bc706071b79f823dd17b8f69d4fccf145 100644 (file)
@@ -99,45 +99,51 @@ static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
        __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
 
 /**
- * percpu_alloc_mask - initial setup of per-cpu data
+ * alloc_percpu - initial setup of per-cpu data
  * @size: size of per-cpu object
- * @gfp: may sleep or not etc.
- * @mask: populate per-data for cpu's selected through mask bits
+ * @align: alignment
  *
- * Populating per-cpu data for all online cpu's would be a typical use case,
- * which is simplified by the percpu_alloc() wrapper.
- * Per-cpu objects are populated with zeroed buffers.
+ * Allocate dynamic percpu area.  Percpu objects are populated with
+ * zeroed buffers.
  */
-void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+void *__alloc_percpu(size_t size, size_t align)
 {
        /*
         * We allocate whole cache lines to avoid false sharing
         */
        size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
-       void *pdata = kzalloc(sz, gfp);
+       void *pdata = kzalloc(sz, GFP_KERNEL);
        void *__pdata = __percpu_disguise(pdata);
 
+       /*
+        * Can't easily make larger alignment work with kmalloc.  WARN
+        * on it.  Larger alignment should only be used for module
+        * percpu sections on SMP for which this path isn't used.
+        */
+       WARN_ON_ONCE(align > SMP_CACHE_BYTES);
+
        if (unlikely(!pdata))
                return NULL;
-       if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
+       if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
+                                          &cpu_possible_map)))
                return __pdata;
        kfree(pdata);
        return NULL;
 }
-EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
+EXPORT_SYMBOL_GPL(__alloc_percpu);
 
 /**
- * percpu_free - final cleanup of per-cpu data
+ * free_percpu - final cleanup of per-cpu data
  * @__pdata: object to clean up
  *
  * We simply clean up any per-cpu object left. No need for the client to
  * track and specify through a bis mask which per-cpu objects are to free.
  */
-void percpu_free(void *__pdata)
+void free_percpu(void *__pdata)
 {
        if (unlikely(!__pdata))
                return;
        __percpu_depopulate_mask(__pdata, &cpu_possible_map);
        kfree(__percpu_disguise(__pdata));
 }
-EXPORT_SYMBOL_GPL(percpu_free);
+EXPORT_SYMBOL_GPL(free_percpu);
index 51a0ccf61e0e97284cf3be1d05d4f43feb293d93..daf92713f7de0226a40fe1203dba635f2b20f1a0 100644 (file)
@@ -382,7 +382,6 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
        return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
 }
 
-#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
 /**
  * reserve_bootmem - mark a page range as usable
  * @addr: starting address of the range
@@ -403,7 +402,6 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size,
 
        return mark_bootmem(start, end, 1, flags);
 }
-#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
 
 static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx,
                        unsigned long step)
@@ -429,8 +427,8 @@ static unsigned long align_off(struct bootmem_data *bdata, unsigned long off,
 }
 
 static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
-                               unsigned long size, unsigned long align,
-                               unsigned long goal, unsigned long limit)
+                                       unsigned long size, unsigned long align,
+                                       unsigned long goal, unsigned long limit)
 {
        unsigned long fallback = 0;
        unsigned long min, max, start, sidx, midx, step;
@@ -530,17 +528,34 @@ find_block:
        return NULL;
 }
 
+static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
+                                       unsigned long size, unsigned long align,
+                                       unsigned long goal, unsigned long limit)
+{
+#ifdef CONFIG_HAVE_ARCH_BOOTMEM
+       bootmem_data_t *p_bdata;
+
+       p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit);
+       if (p_bdata)
+               return alloc_bootmem_core(p_bdata, size, align, goal, limit);
+#endif
+       return NULL;
+}
+
 static void * __init ___alloc_bootmem_nopanic(unsigned long size,
                                        unsigned long align,
                                        unsigned long goal,
                                        unsigned long limit)
 {
        bootmem_data_t *bdata;
+       void *region;
 
 restart:
-       list_for_each_entry(bdata, &bdata_list, list) {
-               void *region;
+       region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
+       if (region)
+               return region;
 
+       list_for_each_entry(bdata, &bdata_list, list) {
                if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
                        continue;
                if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
@@ -618,6 +633,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata,
 {
        void *ptr;
 
+       ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit);
+       if (ptr)
+               return ptr;
+
        ptr = alloc_bootmem_core(bdata, size, align, goal, limit);
        if (ptr)
                return ptr;
@@ -674,6 +693,10 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
 {
        void *ptr;
 
+       ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
+       if (ptr)
+               return ptr;
+
        ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
        if (ptr)
                return ptr;
index 23acefe51808fdf0b8ed7ec3a7a5c92ee424cad1..126d3973b3d1f3be7a3f5e4f42630725c796f31f 100644 (file)
@@ -1823,7 +1823,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                int copy = min(bytes, iov->iov_len - base);
 
                base = 0;
-               left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
+               left = __copy_from_user_inatomic(vaddr, buf, copy);
                copied += copy;
                bytes -= copy;
                vaddr += copy;
@@ -1851,8 +1851,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_inatomic_nocache(kaddr + offset,
-                                                       buf, bytes);
+               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
                copied = bytes - left;
        } else {
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1880,7 +1879,7 @@ size_t iov_iter_copy_from_user(struct page *page,
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+               left = __copy_from_user(kaddr + offset, buf, bytes);
                copied = bytes - left;
        } else {
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
diff --git a/mm/percpu.c b/mm/percpu.c
new file mode 100644 (file)
index 0000000..1aa5d8f
--- /dev/null
@@ -0,0 +1,1326 @@
+/*
+ * linux/mm/percpu.c - percpu memory allocator
+ *
+ * Copyright (C) 2009          SUSE Linux Products GmbH
+ * Copyright (C) 2009          Tejun Heo <tj@kernel.org>
+ *
+ * This file is released under the GPLv2.
+ *
+ * This is percpu allocator which can handle both static and dynamic
+ * areas.  Percpu areas are allocated in chunks in vmalloc area.  Each
+ * chunk is consisted of num_possible_cpus() units and the first chunk
+ * is used for static percpu variables in the kernel image (special
+ * boot time alloc/init handling necessary as these areas need to be
+ * brought up before allocation services are running).  Unit grows as
+ * necessary and all units grow or shrink in unison.  When a chunk is
+ * filled up, another chunk is allocated.  ie. in vmalloc area
+ *
+ *  c0                           c1                         c2
+ *  -------------------          -------------------        ------------
+ * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
+ *  -------------------  ......  -------------------  ....  ------------
+ *
+ * Allocation is done in offset-size areas of single unit space.  Ie,
+ * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
+ * c1:u1, c1:u2 and c1:u3.  Percpu access can be done by configuring
+ * percpu base registers UNIT_SIZE apart.
+ *
+ * There are usually many small percpu allocations many of them as
+ * small as 4 bytes.  The allocator organizes chunks into lists
+ * according to free size and tries to allocate from the fullest one.
+ * Each chunk keeps the maximum contiguous area size hint which is
+ * guaranteed to be eqaul to or larger than the maximum contiguous
+ * area in the chunk.  This helps the allocator not to iterate the
+ * chunk maps unnecessarily.
+ *
+ * Allocation state in each chunk is kept using an array of integers
+ * on chunk->map.  A positive value in the map represents a free
+ * region and negative allocated.  Allocation inside a chunk is done
+ * by scanning this map sequentially and serving the first matching
+ * entry.  This is mostly copied from the percpu_modalloc() allocator.
+ * Chunks are also linked into a rb tree to ease address to chunk
+ * mapping during free.
+ *
+ * To use this allocator, arch code should do the followings.
+ *
+ * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
+ *
+ * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
+ *   regular address to percpu pointer and back if they need to be
+ *   different from the default
+ *
+ * - use pcpu_setup_first_chunk() during percpu area initialization to
+ *   setup the first chunk containing the kernel static percpu area
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bootmem.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/pfn.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include <asm/cacheflush.h>
+#include <asm/sections.h>
+#include <asm/tlbflush.h>
+
+#define PCPU_SLOT_BASE_SHIFT           5       /* 1-31 shares the same slot */
+#define PCPU_DFL_MAP_ALLOC             16      /* start a map with 16 ents */
+
+/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
+#ifndef __addr_to_pcpu_ptr
+#define __addr_to_pcpu_ptr(addr)                                       \
+       (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr  \
+                + (unsigned long)__per_cpu_start)
+#endif
+#ifndef __pcpu_ptr_to_addr
+#define __pcpu_ptr_to_addr(ptr)                                                \
+       (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr   \
+                - (unsigned long)__per_cpu_start)
+#endif
+
+struct pcpu_chunk {
+       struct list_head        list;           /* linked to pcpu_slot lists */
+       struct rb_node          rb_node;        /* key is chunk->vm->addr */
+       int                     free_size;      /* free bytes in the chunk */
+       int                     contig_hint;    /* max contiguous size hint */
+       struct vm_struct        *vm;            /* mapped vmalloc region */
+       int                     map_used;       /* # of map entries used */
+       int                     map_alloc;      /* # of map entries allocated */
+       int                     *map;           /* allocation map */
+       bool                    immutable;      /* no [de]population allowed */
+       struct page             **page;         /* points to page array */
+       struct page             *page_ar[];     /* #cpus * UNIT_PAGES */
+};
+
+static int pcpu_unit_pages __read_mostly;
+static int pcpu_unit_size __read_mostly;
+static int pcpu_chunk_size __read_mostly;
+static int pcpu_nr_slots __read_mostly;
+static size_t pcpu_chunk_struct_size __read_mostly;
+
+/* the address of the first chunk which starts with the kernel static area */
+void *pcpu_base_addr __read_mostly;
+EXPORT_SYMBOL_GPL(pcpu_base_addr);
+
+/* optional reserved chunk, only accessible for reserved allocations */
+static struct pcpu_chunk *pcpu_reserved_chunk;
+/* offset limit of the reserved chunk */
+static int pcpu_reserved_chunk_limit;
+
+/*
+ * Synchronization rules.
+ *
+ * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
+ * protects allocation/reclaim paths, chunks and chunk->page arrays.
+ * The latter is a spinlock and protects the index data structures -
+ * chunk slots, rbtree, chunks and area maps in chunks.
+ *
+ * During allocation, pcpu_alloc_mutex is kept locked all the time and
+ * pcpu_lock is grabbed and released as necessary.  All actual memory
+ * allocations are done using GFP_KERNEL with pcpu_lock released.
+ *
+ * Free path accesses and alters only the index data structures, so it
+ * can be safely called from atomic context.  When memory needs to be
+ * returned to the system, free path schedules reclaim_work which
+ * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
+ * reclaimed, release both locks and frees the chunks.  Note that it's
+ * necessary to grab both locks to remove a chunk from circulation as
+ * allocation path might be referencing the chunk with only
+ * pcpu_alloc_mutex locked.
+ */
+static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
+static DEFINE_SPINLOCK(pcpu_lock);     /* protects index data structures */
+
+static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
+static struct rb_root pcpu_addr_root = RB_ROOT;        /* chunks by address */
+
+/* reclaim work to release fully free chunks, scheduled from free path */
+static void pcpu_reclaim(struct work_struct *work);
+static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
+
+static int __pcpu_size_to_slot(int size)
+{
+       int highbit = fls(size);        /* size is in bytes */
+       return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
+}
+
+static int pcpu_size_to_slot(int size)
+{
+       if (size == pcpu_unit_size)
+               return pcpu_nr_slots - 1;
+       return __pcpu_size_to_slot(size);
+}
+
+static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
+{
+       if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
+               return 0;
+
+       return pcpu_size_to_slot(chunk->free_size);
+}
+
+static int pcpu_page_idx(unsigned int cpu, int page_idx)
+{
+       return cpu * pcpu_unit_pages + page_idx;
+}
+
+static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk,
+                                     unsigned int cpu, int page_idx)
+{
+       return &chunk->page[pcpu_page_idx(cpu, page_idx)];
+}
+
+static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
+                                    unsigned int cpu, int page_idx)
+{
+       return (unsigned long)chunk->vm->addr +
+               (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT);
+}
+
+static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk,
+                                    int page_idx)
+{
+       return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL;
+}
+
+/**
+ * pcpu_mem_alloc - allocate memory
+ * @size: bytes to allocate
+ *
+ * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
+ * kzalloc() is used; otherwise, vmalloc() is used.  The returned
+ * memory is always zeroed.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Pointer to the allocated area on success, NULL on failure.
+ */
+static void *pcpu_mem_alloc(size_t size)
+{
+       if (size <= PAGE_SIZE)
+               return kzalloc(size, GFP_KERNEL);
+       else {
+               void *ptr = vmalloc(size);
+               if (ptr)
+                       memset(ptr, 0, size);
+               return ptr;
+       }
+}
+
+/**
+ * pcpu_mem_free - free memory
+ * @ptr: memory to free
+ * @size: size of the area
+ *
+ * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
+ */
+static void pcpu_mem_free(void *ptr, size_t size)
+{
+       if (size <= PAGE_SIZE)
+               kfree(ptr);
+       else
+               vfree(ptr);
+}
+
+/**
+ * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
+ * @chunk: chunk of interest
+ * @oslot: the previous slot it was on
+ *
+ * This function is called after an allocation or free changed @chunk.
+ * New slot according to the changed state is determined and @chunk is
+ * moved to the slot.  Note that the reserved chunk is never put on
+ * chunk slots.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
+{
+       int nslot = pcpu_chunk_slot(chunk);
+
+       if (chunk != pcpu_reserved_chunk && oslot != nslot) {
+               if (oslot < nslot)
+                       list_move(&chunk->list, &pcpu_slot[nslot]);
+               else
+                       list_move_tail(&chunk->list, &pcpu_slot[nslot]);
+       }
+}
+
+static struct rb_node **pcpu_chunk_rb_search(void *addr,
+                                            struct rb_node **parentp)
+{
+       struct rb_node **p = &pcpu_addr_root.rb_node;
+       struct rb_node *parent = NULL;
+       struct pcpu_chunk *chunk;
+
+       while (*p) {
+               parent = *p;
+               chunk = rb_entry(parent, struct pcpu_chunk, rb_node);
+
+               if (addr < chunk->vm->addr)
+                       p = &(*p)->rb_left;
+               else if (addr > chunk->vm->addr)
+                       p = &(*p)->rb_right;
+               else
+                       break;
+       }
+
+       if (parentp)
+               *parentp = parent;
+       return p;
+}
+
+/**
+ * pcpu_chunk_addr_search - search for chunk containing specified address
+ * @addr: address to search for
+ *
+ * Look for chunk which might contain @addr.  More specifically, it
+ * searchs for the chunk with the highest start address which isn't
+ * beyond @addr.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ *
+ * RETURNS:
+ * The address of the found chunk.
+ */
+static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
+{
+       struct rb_node *n, *parent;
+       struct pcpu_chunk *chunk;
+
+       /* is it in the reserved chunk? */
+       if (pcpu_reserved_chunk) {
+               void *start = pcpu_reserved_chunk->vm->addr;
+
+               if (addr >= start && addr < start + pcpu_reserved_chunk_limit)
+                       return pcpu_reserved_chunk;
+       }
+
+       /* nah... search the regular ones */
+       n = *pcpu_chunk_rb_search(addr, &parent);
+       if (!n) {
+               /* no exactly matching chunk, the parent is the closest */
+               n = parent;
+               BUG_ON(!n);
+       }
+       chunk = rb_entry(n, struct pcpu_chunk, rb_node);
+
+       if (addr < chunk->vm->addr) {
+               /* the parent was the next one, look for the previous one */
+               n = rb_prev(n);
+               BUG_ON(!n);
+               chunk = rb_entry(n, struct pcpu_chunk, rb_node);
+       }
+
+       return chunk;
+}
+
+/**
+ * pcpu_chunk_addr_insert - insert chunk into address rb tree
+ * @new: chunk to insert
+ *
+ * Insert @new into address rb tree.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_chunk_addr_insert(struct pcpu_chunk *new)
+{
+       struct rb_node **p, *parent;
+
+       p = pcpu_chunk_rb_search(new->vm->addr, &parent);
+       BUG_ON(*p);
+       rb_link_node(&new->rb_node, parent, p);
+       rb_insert_color(&new->rb_node, &pcpu_addr_root);
+}
+
+/**
+ * pcpu_extend_area_map - extend area map for allocation
+ * @chunk: target chunk
+ *
+ * Extend area map of @chunk so that it can accomodate an allocation.
+ * A single allocation can split an area into three areas, so this
+ * function makes sure that @chunk->map has at least two extra slots.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex, pcpu_lock.  pcpu_lock is released and reacquired
+ * if area map is extended.
+ *
+ * RETURNS:
+ * 0 if noop, 1 if successfully extended, -errno on failure.
+ */
+static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
+{
+       int new_alloc;
+       int *new;
+       size_t size;
+
+       /* has enough? */
+       if (chunk->map_alloc >= chunk->map_used + 2)
+               return 0;
+
+       spin_unlock_irq(&pcpu_lock);
+
+       new_alloc = PCPU_DFL_MAP_ALLOC;
+       while (new_alloc < chunk->map_used + 2)
+               new_alloc *= 2;
+
+       new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
+       if (!new) {
+               spin_lock_irq(&pcpu_lock);
+               return -ENOMEM;
+       }
+
+       /*
+        * Acquire pcpu_lock and switch to new area map.  Only free
+        * could have happened inbetween, so map_used couldn't have
+        * grown.
+        */
+       spin_lock_irq(&pcpu_lock);
+       BUG_ON(new_alloc < chunk->map_used + 2);
+
+       size = chunk->map_alloc * sizeof(chunk->map[0]);
+       memcpy(new, chunk->map, size);
+
+       /*
+        * map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
+        * one of the first chunks and still using static map.
+        */
+       if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
+               pcpu_mem_free(chunk->map, size);
+
+       chunk->map_alloc = new_alloc;
+       chunk->map = new;
+       return 0;
+}
+
+/**
+ * pcpu_split_block - split a map block
+ * @chunk: chunk of interest
+ * @i: index of map block to split
+ * @head: head size in bytes (can be 0)
+ * @tail: tail size in bytes (can be 0)
+ *
+ * Split the @i'th map block into two or three blocks.  If @head is
+ * non-zero, @head bytes block is inserted before block @i moving it
+ * to @i+1 and reducing its size by @head bytes.
+ *
+ * If @tail is non-zero, the target block, which can be @i or @i+1
+ * depending on @head, is reduced by @tail bytes and @tail byte block
+ * is inserted after the target block.
+ *
+ * @chunk->map must have enough free slots to accomodate the split.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
+                            int head, int tail)
+{
+       int nr_extra = !!head + !!tail;
+
+       BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
+
+       /* insert new subblocks */
+       memmove(&chunk->map[i + nr_extra], &chunk->map[i],
+               sizeof(chunk->map[0]) * (chunk->map_used - i));
+       chunk->map_used += nr_extra;
+
+       if (head) {
+               chunk->map[i + 1] = chunk->map[i] - head;
+               chunk->map[i++] = head;
+       }
+       if (tail) {
+               chunk->map[i++] -= tail;
+               chunk->map[i] = tail;
+       }
+}
+
+/**
+ * pcpu_alloc_area - allocate area from a pcpu_chunk
+ * @chunk: chunk of interest
+ * @size: wanted size in bytes
+ * @align: wanted align
+ *
+ * Try to allocate @size bytes area aligned at @align from @chunk.
+ * Note that this function only allocates the offset.  It doesn't
+ * populate or map the area.
+ *
+ * @chunk->map must have at least two free slots.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ *
+ * RETURNS:
+ * Allocated offset in @chunk on success, -1 if no matching area is
+ * found.
+ */
+static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
+{
+       int oslot = pcpu_chunk_slot(chunk);
+       int max_contig = 0;
+       int i, off;
+
+       for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
+               bool is_last = i + 1 == chunk->map_used;
+               int head, tail;
+
+               /* extra for alignment requirement */
+               head = ALIGN(off, align) - off;
+               BUG_ON(i == 0 && head != 0);
+
+               if (chunk->map[i] < 0)
+                       continue;
+               if (chunk->map[i] < head + size) {
+                       max_contig = max(chunk->map[i], max_contig);
+                       continue;
+               }
+
+               /*
+                * If head is small or the previous block is free,
+                * merge'em.  Note that 'small' is defined as smaller
+                * than sizeof(int), which is very small but isn't too
+                * uncommon for percpu allocations.
+                */
+               if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
+                       if (chunk->map[i - 1] > 0)
+                               chunk->map[i - 1] += head;
+                       else {
+                               chunk->map[i - 1] -= head;
+                               chunk->free_size -= head;
+                       }
+                       chunk->map[i] -= head;
+                       off += head;
+                       head = 0;
+               }
+
+               /* if tail is small, just keep it around */
+               tail = chunk->map[i] - head - size;
+               if (tail < sizeof(int))
+                       tail = 0;
+
+               /* split if warranted */
+               if (head || tail) {
+                       pcpu_split_block(chunk, i, head, tail);
+                       if (head) {
+                               i++;
+                               off += head;
+                               max_contig = max(chunk->map[i - 1], max_contig);
+                       }
+                       if (tail)
+                               max_contig = max(chunk->map[i + 1], max_contig);
+               }
+
+               /* update hint and mark allocated */
+               if (is_last)
+                       chunk->contig_hint = max_contig; /* fully scanned */
+               else
+                       chunk->contig_hint = max(chunk->contig_hint,
+                                                max_contig);
+
+               chunk->free_size -= chunk->map[i];
+               chunk->map[i] = -chunk->map[i];
+
+               pcpu_chunk_relocate(chunk, oslot);
+               return off;
+       }
+
+       chunk->contig_hint = max_contig;        /* fully scanned */
+       pcpu_chunk_relocate(chunk, oslot);
+
+       /* tell the upper layer that this chunk has no matching area */
+       return -1;
+}
+
+/**
+ * pcpu_free_area - free area to a pcpu_chunk
+ * @chunk: chunk of interest
+ * @freeme: offset of area to free
+ *
+ * Free area starting from @freeme to @chunk.  Note that this function
+ * only modifies the allocation map.  It doesn't depopulate or unmap
+ * the area.
+ *
+ * CONTEXT:
+ * pcpu_lock.
+ */
+static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
+{
+       int oslot = pcpu_chunk_slot(chunk);
+       int i, off;
+
+       for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
+               if (off == freeme)
+                       break;
+       BUG_ON(off != freeme);
+       BUG_ON(chunk->map[i] > 0);
+
+       chunk->map[i] = -chunk->map[i];
+       chunk->free_size += chunk->map[i];
+
+       /* merge with previous? */
+       if (i > 0 && chunk->map[i - 1] >= 0) {
+               chunk->map[i - 1] += chunk->map[i];
+               chunk->map_used--;
+               memmove(&chunk->map[i], &chunk->map[i + 1],
+                       (chunk->map_used - i) * sizeof(chunk->map[0]));
+               i--;
+       }
+       /* merge with next? */
+       if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
+               chunk->map[i] += chunk->map[i + 1];
+               chunk->map_used--;
+               memmove(&chunk->map[i + 1], &chunk->map[i + 2],
+                       (chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
+       }
+
+       chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
+       pcpu_chunk_relocate(chunk, oslot);
+}
+
+/**
+ * pcpu_unmap - unmap pages out of a pcpu_chunk
+ * @chunk: chunk of interest
+ * @page_start: page index of the first page to unmap
+ * @page_end: page index of the last page to unmap + 1
+ * @flush: whether to flush cache and tlb or not
+ *
+ * For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
+ * If @flush is true, vcache is flushed before unmapping and tlb
+ * after.
+ */
+static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
+                      bool flush)
+{
+       unsigned int last = num_possible_cpus() - 1;
+       unsigned int cpu;
+
+       /* unmap must not be done on immutable chunk */
+       WARN_ON(chunk->immutable);
+
+       /*
+        * Each flushing trial can be very expensive, issue flush on
+        * the whole region at once rather than doing it for each cpu.
+        * This could be an overkill but is more scalable.
+        */
+       if (flush)
+               flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
+                                  pcpu_chunk_addr(chunk, last, page_end));
+
+       for_each_possible_cpu(cpu)
+               unmap_kernel_range_noflush(
+                               pcpu_chunk_addr(chunk, cpu, page_start),
+                               (page_end - page_start) << PAGE_SHIFT);
+
+       /* ditto as flush_cache_vunmap() */
+       if (flush)
+               flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
+                                      pcpu_chunk_addr(chunk, last, page_end));
+}
+
+/**
+ * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk
+ * @chunk: chunk to depopulate
+ * @off: offset to the area to depopulate
+ * @size: size of the area to depopulate in bytes
+ * @flush: whether to flush cache and tlb or not
+ *
+ * For each cpu, depopulate and unmap pages [@page_start,@page_end)
+ * from @chunk.  If @flush is true, vcache is flushed before unmapping
+ * and tlb after.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex.
+ */
+static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size,
+                                 bool flush)
+{
+       int page_start = PFN_DOWN(off);
+       int page_end = PFN_UP(off + size);
+       int unmap_start = -1;
+       int uninitialized_var(unmap_end);
+       unsigned int cpu;
+       int i;
+
+       for (i = page_start; i < page_end; i++) {
+               for_each_possible_cpu(cpu) {
+                       struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
+
+                       if (!*pagep)
+                               continue;
+
+                       __free_page(*pagep);
+
+                       /*
+                        * If it's partial depopulation, it might get
+                        * populated or depopulated again.  Mark the
+                        * page gone.
+                        */
+                       *pagep = NULL;
+
+                       unmap_start = unmap_start < 0 ? i : unmap_start;
+                       unmap_end = i + 1;
+               }
+       }
+
+       if (unmap_start >= 0)
+               pcpu_unmap(chunk, unmap_start, unmap_end, flush);
+}
+
+/**
+ * pcpu_map - map pages into a pcpu_chunk
+ * @chunk: chunk of interest
+ * @page_start: page index of the first page to map
+ * @page_end: page index of the last page to map + 1
+ *
+ * For each cpu, map pages [@page_start,@page_end) into @chunk.
+ * vcache is flushed afterwards.
+ */
+static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end)
+{
+       unsigned int last = num_possible_cpus() - 1;
+       unsigned int cpu;
+       int err;
+
+       /* map must not be done on immutable chunk */
+       WARN_ON(chunk->immutable);
+
+       for_each_possible_cpu(cpu) {
+               err = map_kernel_range_noflush(
+                               pcpu_chunk_addr(chunk, cpu, page_start),
+                               (page_end - page_start) << PAGE_SHIFT,
+                               PAGE_KERNEL,
+                               pcpu_chunk_pagep(chunk, cpu, page_start));
+               if (err < 0)
+                       return err;
+       }
+
+       /* flush at once, please read comments in pcpu_unmap() */
+       flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start),
+                        pcpu_chunk_addr(chunk, last, page_end));
+       return 0;
+}
+
+/**
+ * pcpu_populate_chunk - populate and map an area of a pcpu_chunk
+ * @chunk: chunk of interest
+ * @off: offset to the area to populate
+ * @size: size of the area to populate in bytes
+ *
+ * For each cpu, populate and map pages [@page_start,@page_end) into
+ * @chunk.  The area is cleared on return.
+ *
+ * CONTEXT:
+ * pcpu_alloc_mutex, does GFP_KERNEL allocation.
+ */
+static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
+{
+       const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD;
+       int page_start = PFN_DOWN(off);
+       int page_end = PFN_UP(off + size);
+       int map_start = -1;
+       int uninitialized_var(map_end);
+       unsigned int cpu;
+       int i;
+
+       for (i = page_start; i < page_end; i++) {
+               if (pcpu_chunk_page_occupied(chunk, i)) {
+                       if (map_start >= 0) {
+                               if (pcpu_map(chunk, map_start, map_end))
+                                       goto err;
+                               map_start = -1;
+                       }
+                       continue;
+               }
+
+               map_start = map_start < 0 ? i : map_start;
+               map_end = i + 1;
+
+               for_each_possible_cpu(cpu) {
+                       struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i);
+
+                       *pagep = alloc_pages_node(cpu_to_node(cpu),
+                                                 alloc_mask, 0);
+                       if (!*pagep)
+                               goto err;
+               }
+       }
+
+       if (map_start >= 0 && pcpu_map(chunk, map_start, map_end))
+               goto err;
+
+       for_each_possible_cpu(cpu)
+               memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0,
+                      size);
+
+       return 0;
+err:
+       /* likely under heavy memory pressure, give memory back */
+       pcpu_depopulate_chunk(chunk, off, size, true);
+       return -ENOMEM;
+}
+
+static void free_pcpu_chunk(struct pcpu_chunk *chunk)
+{
+       if (!chunk)
+               return;
+       if (chunk->vm)
+               free_vm_area(chunk->vm);
+       pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
+       kfree(chunk);
+}
+
+static struct pcpu_chunk *alloc_pcpu_chunk(void)
+{
+       struct pcpu_chunk *chunk;
+
+       chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL);
+       if (!chunk)
+               return NULL;
+
+       chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
+       chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
+       chunk->map[chunk->map_used++] = pcpu_unit_size;
+       chunk->page = chunk->page_ar;
+
+       chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL);
+       if (!chunk->vm) {
+               free_pcpu_chunk(chunk);
+               return NULL;
+       }
+
+       INIT_LIST_HEAD(&chunk->list);
+       chunk->free_size = pcpu_unit_size;
+       chunk->contig_hint = pcpu_unit_size;
+
+       return chunk;
+}
+
+/**
+ * pcpu_alloc - the percpu allocator
+ * @size: size of area to allocate in bytes
+ * @align: alignment of area (max PAGE_SIZE)
+ * @reserved: allocate from the reserved chunk if available
+ *
+ * Allocate percpu area of @size bytes aligned at @align.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Percpu pointer to the allocated area on success, NULL on failure.
+ */
+static void *pcpu_alloc(size_t size, size_t align, bool reserved)
+{
+       struct pcpu_chunk *chunk;
+       int slot, off;
+
+       if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
+               WARN(true, "illegal size (%zu) or align (%zu) for "
+                    "percpu allocation\n", size, align);
+               return NULL;
+       }
+
+       mutex_lock(&pcpu_alloc_mutex);
+       spin_lock_irq(&pcpu_lock);
+
+       /* serve reserved allocations from the reserved chunk if available */
+       if (reserved && pcpu_reserved_chunk) {
+               chunk = pcpu_reserved_chunk;
+               if (size > chunk->contig_hint ||
+                   pcpu_extend_area_map(chunk) < 0)
+                       goto fail_unlock;
+               off = pcpu_alloc_area(chunk, size, align);
+               if (off >= 0)
+                       goto area_found;
+               goto fail_unlock;
+       }
+
+restart:
+       /* search through normal chunks */
+       for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
+               list_for_each_entry(chunk, &pcpu_slot[slot], list) {
+                       if (size > chunk->contig_hint)
+                               continue;
+
+                       switch (pcpu_extend_area_map(chunk)) {
+                       case 0:
+                               break;
+                       case 1:
+                               goto restart;   /* pcpu_lock dropped, restart */
+                       default:
+                               goto fail_unlock;
+                       }
+
+                       off = pcpu_alloc_area(chunk, size, align);
+                       if (off >= 0)
+                               goto area_found;
+               }
+       }
+
+       /* hmmm... no space left, create a new chunk */
+       spin_unlock_irq(&pcpu_lock);
+
+       chunk = alloc_pcpu_chunk();
+       if (!chunk)
+               goto fail_unlock_mutex;
+
+       spin_lock_irq(&pcpu_lock);
+       pcpu_chunk_relocate(chunk, -1);
+       pcpu_chunk_addr_insert(chunk);
+       goto restart;
+
+area_found:
+       spin_unlock_irq(&pcpu_lock);
+
+       /* populate, map and clear the area */
+       if (pcpu_populate_chunk(chunk, off, size)) {
+               spin_lock_irq(&pcpu_lock);
+               pcpu_free_area(chunk, off);
+               goto fail_unlock;
+       }
+
+       mutex_unlock(&pcpu_alloc_mutex);
+
+       return __addr_to_pcpu_ptr(chunk->vm->addr + off);
+
+fail_unlock:
+       spin_unlock_irq(&pcpu_lock);
+fail_unlock_mutex:
+       mutex_unlock(&pcpu_alloc_mutex);
+       return NULL;
+}
+
+/**
+ * __alloc_percpu - allocate dynamic percpu area
+ * @size: size of area to allocate in bytes
+ * @align: alignment of area (max PAGE_SIZE)
+ *
+ * Allocate percpu area of @size bytes aligned at @align.  Might
+ * sleep.  Might trigger writeouts.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Percpu pointer to the allocated area on success, NULL on failure.
+ */
+void *__alloc_percpu(size_t size, size_t align)
+{
+       return pcpu_alloc(size, align, false);
+}
+EXPORT_SYMBOL_GPL(__alloc_percpu);
+
+/**
+ * __alloc_reserved_percpu - allocate reserved percpu area
+ * @size: size of area to allocate in bytes
+ * @align: alignment of area (max PAGE_SIZE)
+ *
+ * Allocate percpu area of @size bytes aligned at @align from reserved
+ * percpu area if arch has set it up; otherwise, allocation is served
+ * from the same dynamic area.  Might sleep.  Might trigger writeouts.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * Percpu pointer to the allocated area on success, NULL on failure.
+ */
+void *__alloc_reserved_percpu(size_t size, size_t align)
+{
+       return pcpu_alloc(size, align, true);
+}
+
+/**
+ * pcpu_reclaim - reclaim fully free chunks, workqueue function
+ * @work: unused
+ *
+ * Reclaim all fully free chunks except for the first one.
+ *
+ * CONTEXT:
+ * workqueue context.
+ */
+static void pcpu_reclaim(struct work_struct *work)
+{
+       LIST_HEAD(todo);
+       struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
+       struct pcpu_chunk *chunk, *next;
+
+       mutex_lock(&pcpu_alloc_mutex);
+       spin_lock_irq(&pcpu_lock);
+
+       list_for_each_entry_safe(chunk, next, head, list) {
+               WARN_ON(chunk->immutable);
+
+               /* spare the first one */
+               if (chunk == list_first_entry(head, struct pcpu_chunk, list))
+                       continue;
+
+               rb_erase(&chunk->rb_node, &pcpu_addr_root);
+               list_move(&chunk->list, &todo);
+       }
+
+       spin_unlock_irq(&pcpu_lock);
+       mutex_unlock(&pcpu_alloc_mutex);
+
+       list_for_each_entry_safe(chunk, next, &todo, list) {
+               pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false);
+               free_pcpu_chunk(chunk);
+       }
+}
+
+/**
+ * free_percpu - free percpu area
+ * @ptr: pointer to area to free
+ *
+ * Free percpu area @ptr.
+ *
+ * CONTEXT:
+ * Can be called from atomic context.
+ */
+void free_percpu(void *ptr)
+{
+       void *addr = __pcpu_ptr_to_addr(ptr);
+       struct pcpu_chunk *chunk;
+       unsigned long flags;
+       int off;
+
+       if (!ptr)
+               return;
+
+       spin_lock_irqsave(&pcpu_lock, flags);
+
+       chunk = pcpu_chunk_addr_search(addr);
+       off = addr - chunk->vm->addr;
+
+       pcpu_free_area(chunk, off);
+
+       /* if there are more than one fully free chunks, wake up grim reaper */
+       if (chunk->free_size == pcpu_unit_size) {
+               struct pcpu_chunk *pos;
+
+               list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
+                       if (pos != chunk) {
+                               schedule_work(&pcpu_reclaim_work);
+                               break;
+                       }
+       }
+
+       spin_unlock_irqrestore(&pcpu_lock, flags);
+}
+EXPORT_SYMBOL_GPL(free_percpu);
+
+/**
+ * pcpu_setup_first_chunk - initialize the first percpu chunk
+ * @get_page_fn: callback to fetch page pointer
+ * @static_size: the size of static percpu area in bytes
+ * @reserved_size: the size of reserved percpu area in bytes
+ * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
+ * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
+ * @base_addr: mapped address, NULL for auto
+ * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary
+ *
+ * Initialize the first percpu chunk which contains the kernel static
+ * perpcu area.  This function is to be called from arch percpu area
+ * setup path.  The first two parameters are mandatory.  The rest are
+ * optional.
+ *
+ * @get_page_fn() should return pointer to percpu page given cpu
+ * number and page number.  It should at least return enough pages to
+ * cover the static area.  The returned pages for static area should
+ * have been initialized with valid data.  If @unit_size is specified,
+ * it can also return pages after the static area.  NULL return
+ * indicates end of pages for the cpu.  Note that @get_page_fn() must
+ * return the same number of pages for all cpus.
+ *
+ * @reserved_size, if non-zero, specifies the amount of bytes to
+ * reserve after the static area in the first chunk.  This reserves
+ * the first chunk such that it's available only through reserved
+ * percpu allocation.  This is primarily used to serve module percpu
+ * static areas on architectures where the addressing model has
+ * limited offset range for symbol relocations to guarantee module
+ * percpu symbols fall inside the relocatable range.
+ *
+ * @dyn_size, if non-negative, determines the number of bytes
+ * available for dynamic allocation in the first chunk.  Specifying
+ * non-negative value makes percpu leave alone the area beyond
+ * @static_size + @reserved_size + @dyn_size.
+ *
+ * @unit_size, if non-negative, specifies unit size and must be
+ * aligned to PAGE_SIZE and equal to or larger than @static_size +
+ * @reserved_size + if non-negative, @dyn_size.
+ *
+ * Non-null @base_addr means that the caller already allocated virtual
+ * region for the first chunk and mapped it.  percpu must not mess
+ * with the chunk.  Note that @base_addr with 0 @unit_size or non-NULL
+ * @populate_pte_fn doesn't make any sense.
+ *
+ * @populate_pte_fn is used to populate the pagetable.  NULL means the
+ * caller already populated the pagetable.
+ *
+ * If the first chunk ends up with both reserved and dynamic areas, it
+ * is served by two chunks - one to serve the core static and reserved
+ * areas and the other for the dynamic area.  They share the same vm
+ * and page map but uses different area allocation map to stay away
+ * from each other.  The latter chunk is circulated in the chunk slots
+ * and available for dynamic allocation like any other chunks.
+ *
+ * RETURNS:
+ * The determined pcpu_unit_size which can be used to initialize
+ * percpu access.
+ */
+size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
+                                    size_t static_size, size_t reserved_size,
+                                    ssize_t dyn_size, ssize_t unit_size,
+                                    void *base_addr,
+                                    pcpu_populate_pte_fn_t populate_pte_fn)
+{
+       static struct vm_struct first_vm;
+       static int smap[2], dmap[2];
+       size_t size_sum = static_size + reserved_size +
+                         (dyn_size >= 0 ? dyn_size : 0);
+       struct pcpu_chunk *schunk, *dchunk = NULL;
+       unsigned int cpu;
+       int nr_pages;
+       int err, i;
+
+       /* santiy checks */
+       BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
+                    ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
+       BUG_ON(!static_size);
+       if (unit_size >= 0) {
+               BUG_ON(unit_size < size_sum);
+               BUG_ON(unit_size & ~PAGE_MASK);
+               BUG_ON(unit_size < PCPU_MIN_UNIT_SIZE);
+       } else
+               BUG_ON(base_addr);
+       BUG_ON(base_addr && populate_pte_fn);
+
+       if (unit_size >= 0)
+               pcpu_unit_pages = unit_size >> PAGE_SHIFT;
+       else
+               pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT,
+                                       PFN_UP(size_sum));
+
+       pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
+       pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size;
+       pcpu_chunk_struct_size = sizeof(struct pcpu_chunk)
+               + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *);
+
+       if (dyn_size < 0)
+               dyn_size = pcpu_unit_size - static_size - reserved_size;
+
+       /*
+        * Allocate chunk slots.  The additional last slot is for
+        * empty chunks.
+        */
+       pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
+       pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
+       for (i = 0; i < pcpu_nr_slots; i++)
+               INIT_LIST_HEAD(&pcpu_slot[i]);
+
+       /*
+        * Initialize static chunk.  If reserved_size is zero, the
+        * static chunk covers static area + dynamic allocation area
+        * in the first chunk.  If reserved_size is not zero, it
+        * covers static area + reserved area (mostly used for module
+        * static percpu allocation).
+        */
+       schunk = alloc_bootmem(pcpu_chunk_struct_size);
+       INIT_LIST_HEAD(&schunk->list);
+       schunk->vm = &first_vm;
+       schunk->map = smap;
+       schunk->map_alloc = ARRAY_SIZE(smap);
+       schunk->page = schunk->page_ar;
+
+       if (reserved_size) {
+               schunk->free_size = reserved_size;
+               pcpu_reserved_chunk = schunk;   /* not for dynamic alloc */
+       } else {
+               schunk->free_size = dyn_size;
+               dyn_size = 0;                   /* dynamic area covered */
+       }
+       schunk->contig_hint = schunk->free_size;
+
+       schunk->map[schunk->map_used++] = -static_size;
+       if (schunk->free_size)
+               schunk->map[schunk->map_used++] = schunk->free_size;
+
+       pcpu_reserved_chunk_limit = static_size + schunk->free_size;
+
+       /* init dynamic chunk if necessary */
+       if (dyn_size) {
+               dchunk = alloc_bootmem(sizeof(struct pcpu_chunk));
+               INIT_LIST_HEAD(&dchunk->list);
+               dchunk->vm = &first_vm;
+               dchunk->map = dmap;
+               dchunk->map_alloc = ARRAY_SIZE(dmap);
+               dchunk->page = schunk->page_ar; /* share page map with schunk */
+
+               dchunk->contig_hint = dchunk->free_size = dyn_size;
+               dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
+               dchunk->map[dchunk->map_used++] = dchunk->free_size;
+       }
+
+       /* allocate vm address */
+       first_vm.flags = VM_ALLOC;
+       first_vm.size = pcpu_chunk_size;
+
+       if (!base_addr)
+               vm_area_register_early(&first_vm, PAGE_SIZE);
+       else {
+               /*
+                * Pages already mapped.  No need to remap into
+                * vmalloc area.  In this case the first chunks can't
+                * be mapped or unmapped by percpu and are marked
+                * immutable.
+                */
+               first_vm.addr = base_addr;
+               schunk->immutable = true;
+               if (dchunk)
+                       dchunk->immutable = true;
+       }
+
+       /* assign pages */
+       nr_pages = -1;
+       for_each_possible_cpu(cpu) {
+               for (i = 0; i < pcpu_unit_pages; i++) {
+                       struct page *page = get_page_fn(cpu, i);
+
+                       if (!page)
+                               break;
+                       *pcpu_chunk_pagep(schunk, cpu, i) = page;
+               }
+
+               BUG_ON(i < PFN_UP(static_size));
+
+               if (nr_pages < 0)
+                       nr_pages = i;
+               else
+                       BUG_ON(nr_pages != i);
+       }
+
+       /* map them */
+       if (populate_pte_fn) {
+               for_each_possible_cpu(cpu)
+                       for (i = 0; i < nr_pages; i++)
+                               populate_pte_fn(pcpu_chunk_addr(schunk,
+                                                               cpu, i));
+
+               err = pcpu_map(schunk, 0, nr_pages);
+               if (err)
+                       panic("failed to setup static percpu area, err=%d\n",
+                             err);
+       }
+
+       /* link the first chunk in */
+       if (!dchunk) {
+               pcpu_chunk_relocate(schunk, -1);
+               pcpu_chunk_addr_insert(schunk);
+       } else {
+               pcpu_chunk_relocate(dchunk, -1);
+               pcpu_chunk_addr_insert(dchunk);
+       }
+
+       /* we're done */
+       pcpu_base_addr = (void *)pcpu_chunk_addr(schunk, 0, 0);
+       return pcpu_unit_size;
+}
+
+/*
+ * Embedding first chunk setup helper.
+ */
+static void *pcpue_ptr __initdata;
+static size_t pcpue_size __initdata;
+static size_t pcpue_unit_size __initdata;
+
+static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
+{
+       size_t off = (size_t)pageno << PAGE_SHIFT;
+
+       if (off >= pcpue_size)
+               return NULL;
+
+       return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size + off);
+}
+
+/**
+ * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
+ * @static_size: the size of static percpu area in bytes
+ * @reserved_size: the size of reserved percpu area in bytes
+ * @dyn_size: free size for dynamic allocation in bytes, -1 for auto
+ * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, -1 for auto
+ *
+ * This is a helper to ease setting up embedded first percpu chunk and
+ * can be called where pcpu_setup_first_chunk() is expected.
+ *
+ * If this function is used to setup the first chunk, it is allocated
+ * as a contiguous area using bootmem allocator and used as-is without
+ * being mapped into vmalloc area.  This enables the first chunk to
+ * piggy back on the linear physical mapping which often uses larger
+ * page size.
+ *
+ * When @dyn_size is positive, dynamic area might be larger than
+ * specified to fill page alignment.  Also, when @dyn_size is auto,
+ * @dyn_size does not fill the whole first chunk but only what's
+ * necessary for page alignment after static and reserved areas.
+ *
+ * If the needed size is smaller than the minimum or specified unit
+ * size, the leftover is returned to the bootmem allocator.
+ *
+ * RETURNS:
+ * The determined pcpu_unit_size which can be used to initialize
+ * percpu access on success, -errno on failure.
+ */
+ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
+                                     ssize_t dyn_size, ssize_t unit_size)
+{
+       unsigned int cpu;
+
+       /* determine parameters and allocate */
+       pcpue_size = PFN_ALIGN(static_size + reserved_size +
+                              (dyn_size >= 0 ? dyn_size : 0));
+       if (dyn_size != 0)
+               dyn_size = pcpue_size - static_size - reserved_size;
+
+       if (unit_size >= 0) {
+               BUG_ON(unit_size < pcpue_size);
+               pcpue_unit_size = unit_size;
+       } else
+               pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
+
+       pcpue_ptr = __alloc_bootmem_nopanic(
+                                       num_possible_cpus() * pcpue_unit_size,
+                                       PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+       if (!pcpue_ptr)
+               return -ENOMEM;
+
+       /* return the leftover and copy */
+       for_each_possible_cpu(cpu) {
+               void *ptr = pcpue_ptr + cpu * pcpue_unit_size;
+
+               free_bootmem(__pa(ptr + pcpue_size),
+                            pcpue_unit_size - pcpue_size);
+               memcpy(ptr, __per_cpu_load, static_size);
+       }
+
+       /* we're ready, commit */
+       pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
+               pcpue_size >> PAGE_SHIFT, pcpue_ptr, static_size);
+
+       return pcpu_setup_first_chunk(pcpue_get_page, static_size,
+                                     reserved_size, dyn_size,
+                                     pcpue_unit_size, pcpue_ptr, NULL);
+}
index 520a7598026995c1aafe82dfbf523e9fcb7143a2..af58324c361addc715ee2eb9b839bada21cb2773 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/radix-tree.h>
 #include <linux/rcupdate.h>
 #include <linux/bootmem.h>
+#include <linux/pfn.h>
 
 #include <asm/atomic.h>
 #include <asm/uaccess.h>
@@ -152,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
  *
  * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
  */
-static int vmap_page_range(unsigned long start, unsigned long end,
-                               pgprot_t prot, struct page **pages)
+static int vmap_page_range_noflush(unsigned long start, unsigned long end,
+                                  pgprot_t prot, struct page **pages)
 {
        pgd_t *pgd;
        unsigned long next;
@@ -169,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
-       flush_cache_vmap(start, end);
 
        if (unlikely(err))
                return err;
        return nr;
 }
 
+static int vmap_page_range(unsigned long start, unsigned long end,
+                          pgprot_t prot, struct page **pages)
+{
+       int ret;
+
+       ret = vmap_page_range_noflush(start, end, prot, pages);
+       flush_cache_vmap(start, end);
+       return ret;
+}
+
 static inline int is_vmalloc_or_module_addr(const void *x)
 {
        /*
@@ -990,6 +1000,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
 }
 EXPORT_SYMBOL(vm_map_ram);
 
+/**
+ * vm_area_register_early - register vmap area early during boot
+ * @vm: vm_struct to register
+ * @align: requested alignment
+ *
+ * This function is used to register kernel vm area before
+ * vmalloc_init() is called.  @vm->size and @vm->flags should contain
+ * proper values on entry and other fields should be zero.  On return,
+ * vm->addr contains the allocated address.
+ *
+ * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
+ */
+void __init vm_area_register_early(struct vm_struct *vm, size_t align)
+{
+       static size_t vm_init_off __initdata;
+       unsigned long addr;
+
+       addr = ALIGN(VMALLOC_START + vm_init_off, align);
+       vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
+
+       vm->addr = (void *)addr;
+
+       vm->next = vmlist;
+       vmlist = vm;
+}
+
 void __init vmalloc_init(void)
 {
        struct vmap_area *va;
@@ -1017,6 +1053,58 @@ void __init vmalloc_init(void)
        vmap_initialized = true;
 }
 
+/**
+ * map_kernel_range_noflush - map kernel VM area with the specified pages
+ * @addr: start of the VM area to map
+ * @size: size of the VM area to map
+ * @prot: page protection flags to use
+ * @pages: pages to map
+ *
+ * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
+ * specify should have been allocated using get_vm_area() and its
+ * friends.
+ *
+ * NOTE:
+ * This function does NOT do any cache flushing.  The caller is
+ * responsible for calling flush_cache_vmap() on to-be-mapped areas
+ * before calling this function.
+ *
+ * RETURNS:
+ * The number of pages mapped on success, -errno on failure.
+ */
+int map_kernel_range_noflush(unsigned long addr, unsigned long size,
+                            pgprot_t prot, struct page **pages)
+{
+       return vmap_page_range_noflush(addr, addr + size, prot, pages);
+}
+
+/**
+ * unmap_kernel_range_noflush - unmap kernel VM area
+ * @addr: start of the VM area to unmap
+ * @size: size of the VM area to unmap
+ *
+ * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
+ * specify should have been allocated using get_vm_area() and its
+ * friends.
+ *
+ * NOTE:
+ * This function does NOT do any cache flushing.  The caller is
+ * responsible for calling flush_cache_vunmap() on to-be-mapped areas
+ * before calling this function and flush_tlb_kernel_range() after.
+ */
+void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
+{
+       vunmap_page_range(addr, addr + size);
+}
+
+/**
+ * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
+ * @addr: start of the VM area to unmap
+ * @size: size of the VM area to unmap
+ *
+ * Similar to unmap_kernel_range_noflush() but flushes vcache before
+ * the unmapping and tlb after.
+ */
 void unmap_kernel_range(unsigned long addr, unsigned long size)
 {
        unsigned long end = addr + size;
@@ -1267,6 +1355,7 @@ EXPORT_SYMBOL(vfree);
 void vunmap(const void *addr)
 {
        BUG_ON(in_interrupt());
+       might_sleep();
        __vunmap(addr, 0);
 }
 EXPORT_SYMBOL(vunmap);
@@ -1286,6 +1375,8 @@ void *vmap(struct page **pages, unsigned int count,
 {
        struct vm_struct *area;
 
+       might_sleep();
+
        if (count > num_physpages)
                return NULL;
 
index 6177e3bcd66bdc7b8fc74cb583d47bb89c4940ef..e89517141657bb6ecfa4c17cf1aceb8b76be78f6 100644 (file)
@@ -1469,7 +1469,7 @@ static void shrink_zone(int priority, struct zone *zone,
                int file = is_file_lru(l);
                int scan;
 
-               scan = zone_page_state(zone, NR_LRU_BASE + l);
+               scan = zone_nr_pages(zone, sc, l);
                if (priority) {
                        scan >>= priority;
                        scan = (scan * percent[file]) / 100;
index 743f5542d65a6385c7ae423ddb49d4c69daaeaee..3a3dad8013548d5581ae8e9250f8e818a2a3a122 100644 (file)
@@ -1375,10 +1375,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field);
 int snmp_mib_init(void *ptr[2], size_t mibsize)
 {
        BUG_ON(ptr == NULL);
-       ptr[0] = __alloc_percpu(mibsize);
+       ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
        if (!ptr[0])
                goto err0;
-       ptr[1] = __alloc_percpu(mibsize);
+       ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
        if (!ptr[1])
                goto err1;
        return 0;
index 97f71153584faa6bf379235f0db151e17a5ed4e9..bf895401218fe634df8432d914a02221d7c62766 100644 (file)
@@ -3376,7 +3376,7 @@ int __init ip_rt_init(void)
        int rc = 0;
 
 #ifdef CONFIG_NET_CLS_ROUTE
-       ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
+       ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
        if (!ip_rt_acct)
                panic("IP: failed to allocate ip_rt_acct\n");
 #endif
index e06365775bdfe16f5c0242f5bb4464a3c72c0c8c..3b949a354470e7022cd71769351324b155d307e1 100644 (file)
@@ -186,3 +186,17 @@ quiet_cmd_gzip = GZIP    $@
 cmd_gzip = gzip -f -9 < $< > $@
 
 
+# Bzip2
+# ---------------------------------------------------------------------------
+
+# Bzip2 does not include size in file... so we have to fake that
+size_append=$(CONFIG_SHELL) $(srctree)/scripts/bin_size
+
+quiet_cmd_bzip2 = BZIP2    $@
+cmd_bzip2 = (bzip2 -9 < $< && $(size_append) $<) > $@ || (rm -f $@ ; false)
+
+# Lzma
+# ---------------------------------------------------------------------------
+
+quiet_cmd_lzma = LZMA    $@
+cmd_lzma = (lzma -9 -c $< && $(size_append) $<) >$@ || (rm -f $@ ; false)
diff --git a/scripts/bin_size b/scripts/bin_size
new file mode 100644 (file)
index 0000000..43e1b36
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+if [ $# = 0 ] ; then
+   echo Usage: $0 file
+fi
+
+size_dec=`stat -c "%s" $1`
+size_hex_echo_string=`printf "%08x" $size_dec |
+     sed 's/\(..\)\(..\)\(..\)\(..\)/\\\\x\4\\\\x\3\\\\x\2\\\\x\1/g'`
+/bin/echo -ne $size_hex_echo_string
diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
new file mode 100644 (file)
index 0000000..29493dc
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+if [ "$?" -eq "0" ] ; then
+       echo y
+else
+       echo n
+fi
index 325c0a1b03b6bbcade1e5edff2665a469ac18168..afaec618b3955c9147608da4c915bc6159627cf0 100644 (file)
@@ -1,6 +1,8 @@
 #!/bin/sh
 
-echo "int foo(void) { char X[200]; return 3; }" | $1 -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
 if [ "$?" -eq "0" ] ; then
-       echo $2
+       echo y
+else
+       echo n
 fi
index 5f3415f28736a386d84b77ae4fa9e2f179958221..3eea8f15131bcde957bc0fc9699bd5f47934f62a 100644 (file)
@@ -5,7 +5,7 @@
 # Released under the terms of the GNU GPL
 #
 # Generate a cpio packed initramfs. It uses gen_init_cpio to generate
-# the cpio archive, and gzip to pack it.
+# the cpio archive, and then compresses it.
 # The script may also be used to generate the inputfile used for gen_init_cpio
 # This script assumes that gen_init_cpio is located in usr/ directory
 
@@ -16,8 +16,8 @@ usage() {
 cat << EOF
 Usage:
 $0 [-o <file>] [-u <uid>] [-g <gid>] {-d | <cpio_source>} ...
-       -o <file>      Create gzipped initramfs file named <file> using
-                      gen_init_cpio and gzip
+       -o <file>      Create compressed initramfs file named <file> using
+                      gen_init_cpio and compressor depending on the extension
        -u <uid>       User ID to map to user ID 0 (root).
                       <uid> is only meaningful if <cpio_source> is a
                       directory.  "squash" forces all files to uid 0.
@@ -225,6 +225,7 @@ cpio_list=
 output="/dev/stdout"
 output_file=""
 is_cpio_compressed=
+compr="gzip -9 -f"
 
 arg="$1"
 case "$arg" in
@@ -233,11 +234,15 @@ case "$arg" in
                echo "deps_initramfs := \\"
                shift
                ;;
-       "-o")   # generate gzipped cpio image named $1
+       "-o")   # generate compressed cpio image named $1
                shift
                output_file="$1"
                cpio_list="$(mktemp ${TMPDIR:-/tmp}/cpiolist.XXXXXX)"
                output=${cpio_list}
+               echo "$output_file" | grep -q "\.gz$" && compr="gzip -9 -f"
+               echo "$output_file" | grep -q "\.bz2$" && compr="bzip2 -9 -f"
+               echo "$output_file" | grep -q "\.lzma$" && compr="lzma -9 -f"
+               echo "$output_file" | grep -q "\.cpio$" && compr="cat"
                shift
                ;;
 esac
@@ -274,7 +279,7 @@ while [ $# -gt 0 ]; do
        esac
 done
 
-# If output_file is set we will generate cpio archive and gzip it
+# If output_file is set we will generate cpio archive and compress it
 # we are carefull to delete tmp files
 if [ ! -z ${output_file} ]; then
        if [ -z ${cpio_file} ]; then
@@ -287,7 +292,8 @@ if [ ! -z ${output_file} ]; then
        if [ "${is_cpio_compressed}" = "compressed" ]; then
                cat ${cpio_tfile} > ${output_file}
        else
-               cat ${cpio_tfile} | gzip -f -9 - > ${output_file}
+               (cat ${cpio_tfile} | ${compr}  - > ${output_file}) \
+               || (rm -f ${output_file} ; false)
        fi
        [ -z ${cpio_file} ] && rm ${cpio_tfile}
 fi
index db30fac3083efd488596577743422adae9b0c6cc..56f90a480899dd7bef86b88725b5668160b5fcad 100644 (file)
@@ -38,7 +38,7 @@ foreach my $file (@files) {
                &check_asm_types();
                &check_sizetypes();
                &check_prototypes();
-               &check_config();
+               # Dropped for now. Too much noise &check_config();
        }
        close FH;
 }
index 88921611b22e62e9b0792ea1cc5affe789a07964..7e62303133dc68e16eb3b1457068cdf52b5db1d2 100644 (file)
@@ -415,8 +415,9 @@ static int parse_elf(struct elf_info *info, const char *filename)
                const char *secstrings
                        = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
                const char *secname;
+               int nobits = sechdrs[i].sh_type == SHT_NOBITS;
 
-               if (sechdrs[i].sh_offset > info->size) {
+               if (!nobits && sechdrs[i].sh_offset > info->size) {
                        fatal("%s is truncated. sechdrs[i].sh_offset=%lu > "
                              "sizeof(*hrd)=%zu\n", filename,
                              (unsigned long)sechdrs[i].sh_offset,
@@ -425,6 +426,8 @@ static int parse_elf(struct elf_info *info, const char *filename)
                }
                secname = secstrings + sechdrs[i].sh_name;
                if (strcmp(secname, ".modinfo") == 0) {
+                       if (nobits)
+                               fatal("%s has NOBITS .modinfo\n", filename);
                        info->modinfo = (void *)hdr + sechdrs[i].sh_offset;
                        info->modinfo_len = sechdrs[i].sh_size;
                } else if (strcmp(secname, "__ksymtab") == 0)
index 8c6b7b09606acb150b2e3c7b26ccb4074950498f..fa4a0a17b7e0e0a12f1637c0c5e9c685a9aadeda 100644 (file)
@@ -35,9 +35,10 @@ $(objtree)/kernel.spec: $(MKSPEC) $(srctree)/Makefile
 rpm-pkg rpm: $(objtree)/kernel.spec FORCE
        $(MAKE) clean
        $(PREV) ln -sf $(srctree) $(KERNELPATH)
+       $(CONFIG_SHELL) $(srctree)/scripts/setlocalversion > $(objtree)/.scmversion
        $(PREV) tar -cz $(RCS_TAR_IGNORE) -f $(KERNELPATH).tar.gz $(KERNELPATH)/.
        $(PREV) rm $(KERNELPATH)
-
+       rm -f $(objtree)/.scmversion
        set -e; \
        $(CONFIG_SHELL) $(srctree)/scripts/mkversion > $(objtree)/.tmp_version
        set -e; \
index ee448cdc6a2b3cd47ca22fd94fd0995d5985f9ae..3d93f8c8125246988ade0aa265ddc6b6a9e7289d 100755 (executable)
@@ -96,7 +96,7 @@ echo "%endif"
 
 echo ""
 echo "%clean"
-echo '#echo -rf $RPM_BUILD_ROOT'
+echo 'rm -rf $RPM_BUILD_ROOT'
 echo ""
 echo "%files"
 echo '%defattr (-, root, root)'
index 552025e72acb1341923bfb471547189419567844..05a31a6c7e1bbda6537dd313e6498abedabbc4ca 100644 (file)
@@ -206,7 +206,7 @@ static void             done(void);
 static void             error(const char *);
 static int              findsym(const char *);
 static void             flushline(bool);
-static Linetype         getline(void);
+static Linetype         get_line(void);
 static Linetype         ifeval(const char **);
 static void             ignoreoff(void);
 static void             ignoreon(void);
@@ -512,7 +512,7 @@ process(void)
 
        for (;;) {
                linenum++;
-               lineval = getline();
+               lineval = get_line();
                trans_table[ifstate[depth]][lineval]();
                debug("process %s -> %s depth %d",
                    linetype_name[lineval],
@@ -526,7 +526,7 @@ process(void)
  * help from skipcomment().
  */
 static Linetype
-getline(void)
+get_line(void)
 {
        const char *cp;
        int cursym;
index 0bcf14640fdedf35886a6eaf5e7a5c1ab8a6469b..84714a65e5c81a0a777577cc78ce115f4f3a99c2 100644 (file)
@@ -33,7 +33,7 @@ if SND_DRIVERS
 
 config SND_PCSP
        tristate "PC-Speaker support (READ HELP!)"
-       depends on PCSPKR_PLATFORM && X86_PC && HIGH_RES_TIMERS
+       depends on PCSPKR_PLATFORM && X86 && HIGH_RES_TIMERS
        depends on INPUT
        depends on EXPERIMENTAL
        select SND_PCM
index 86cecb59dd0759ffedf3b551dd50f0a96fd3d2c7..43a3a0fe8f29b3637bcd49eb0d3824d5da67d2a6 100644 (file)
@@ -44,3 +44,92 @@ config INITRAMFS_ROOT_GID
          owned by group root in the initial ramdisk image.
 
          If you are not sure, leave it set to "0".
+
+config RD_GZIP
+       bool "Initial ramdisk compressed using gzip"
+       default y
+       depends on BLK_DEV_INITRD=y
+       select DECOMPRESS_GZIP
+       help
+         Support loading of a gzip encoded initial ramdisk or cpio buffer.
+         If unsure, say Y.
+
+config RD_BZIP2
+       bool "Initial ramdisk compressed using bzip2"
+       default n
+       depends on BLK_DEV_INITRD=y
+       select DECOMPRESS_BZIP2
+       help
+         Support loading of a bzip2 encoded initial ramdisk or cpio buffer
+         If unsure, say N.
+
+config RD_LZMA
+       bool "Initial ramdisk compressed using lzma"
+       default n
+       depends on BLK_DEV_INITRD=y
+       select DECOMPRESS_LZMA
+       help
+         Support loading of a lzma encoded initial ramdisk or cpio buffer
+         If unsure, say N.
+
+choice
+       prompt "Built-in initramfs compression mode"
+       help
+         This setting is only meaningful if the INITRAMFS_SOURCE is
+         set. It decides by which algorithm the INITRAMFS_SOURCE will
+         be compressed.
+         Several compression algorithms are available, which differ
+         in efficiency, compression and decompression speed.
+         Compression speed is only relevant when building a kernel.
+         Decompression speed is relevant at each boot.
+
+         If you have any problems with bzip2 or lzma compressed
+         initramfs, mail me (Alain Knaff) <alain@knaff.lu>.
+
+         High compression options are mostly useful for users who
+         are low on disk space (embedded systems), but for whom ram
+         size matters less.
+
+         If in doubt, select 'gzip'
+
+config INITRAMFS_COMPRESSION_NONE
+       bool "None"
+       help
+         Do not compress the built-in initramfs at all. This may
+         sound wasteful in space, but, you should be aware that the
+         built-in initramfs will be compressed at a later stage
+         anyways along with the rest of the kernel, on those
+         architectures that support this.
+         However, not compressing the initramfs may lead to slightly
+         higher memory consumption during a short time at boot, while
+         both the cpio image and the unpacked filesystem image will
+         be present in memory simultaneously
+
+config INITRAMFS_COMPRESSION_GZIP
+       bool "Gzip"
+       depends on RD_GZIP
+       help
+         The old and tried gzip compression. Its compression ratio is
+         the poorest among the 3 choices; however its speed (both
+         compression and decompression) is the fastest.
+
+config INITRAMFS_COMPRESSION_BZIP2
+       bool "Bzip2"
+       depends on RD_BZIP2
+       help
+         Its compression ratio and speed is intermediate.
+         Decompression speed is slowest among the three.  The initramfs
+         size is about 10% smaller with bzip2, in comparison to gzip.
+         Bzip2 uses a large amount of memory. For modern kernels you
+         will need at least 8MB RAM or more for booting.
+
+config INITRAMFS_COMPRESSION_LZMA
+       bool "LZMA"
+       depends on RD_LZMA
+       help
+         The most recent compression algorithm.
+         Its ratio is best, decompression speed is between the other
+         two. Compression is slowest.  The initramfs size is about 33%
+         smaller with LZMA in comparison to gzip.
+
+endchoice
index 201f27f8cbaf1c6b266b68f3a915c3edce50b67b..b84894b3929d4ad4b2e029b17370cef2c7332836 100644 (file)
@@ -6,13 +6,25 @@ klibcdirs:;
 PHONY += klibcdirs
 
 
+# No compression
+suffix_$(CONFIG_INITRAMFS_COMPRESSION_NONE)   =
+
+# Gzip, but no bzip2
+suffix_$(CONFIG_INITRAMFS_COMPRESSION_GZIP)   = .gz
+
+# Bzip2
+suffix_$(CONFIG_INITRAMFS_COMPRESSION_BZIP2)  = .bz2
+
+# Lzma
+suffix_$(CONFIG_INITRAMFS_COMPRESSION_LZMA)   = .lzma
+
 # Generate builtin.o based on initramfs_data.o
-obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data.o
+obj-$(CONFIG_BLK_DEV_INITRD) := initramfs_data$(suffix_y).o
 
-# initramfs_data.o contains the initramfs_data.cpio.gz image.
+# initramfs_data.o contains the compressed initramfs_data.cpio image.
 # The image is included using .incbin, a dependency which is not
 # tracked automatically.
-$(obj)/initramfs_data.o: $(obj)/initramfs_data.cpio.gz FORCE
+$(obj)/initramfs_data$(suffix_y).o: $(obj)/initramfs_data.cpio$(suffix_y) FORCE
 
 #####
 # Generate the initramfs cpio archive
@@ -25,28 +37,28 @@ ramfs-args  := \
         $(if $(CONFIG_INITRAMFS_ROOT_UID), -u $(CONFIG_INITRAMFS_ROOT_UID)) \
         $(if $(CONFIG_INITRAMFS_ROOT_GID), -g $(CONFIG_INITRAMFS_ROOT_GID))
 
-# .initramfs_data.cpio.gz.d is used to identify all files included
+# .initramfs_data.cpio.d is used to identify all files included
 # in initramfs and to detect if any files are added/removed.
 # Removed files are identified by directory timestamp being updated
 # The dependency list is generated by gen_initramfs.sh -l
-ifneq ($(wildcard $(obj)/.initramfs_data.cpio.gz.d),)
-       include $(obj)/.initramfs_data.cpio.gz.d
+ifneq ($(wildcard $(obj)/.initramfs_data.cpio.d),)
+       include $(obj)/.initramfs_data.cpio.d
 endif
 
 quiet_cmd_initfs = GEN     $@
       cmd_initfs = $(initramfs) -o $@ $(ramfs-args) $(ramfs-input)
 
-targets := initramfs_data.cpio.gz
+targets := initramfs_data.cpio.gz initramfs_data.cpio.bz2 initramfs_data.cpio.lzma initramfs_data.cpio
 # do not try to update files included in initramfs
 $(deps_initramfs): ;
 
 $(deps_initramfs): klibcdirs
-# We rebuild initramfs_data.cpio.gz if:
-# 1) Any included file is newer then initramfs_data.cpio.gz
+# We rebuild initramfs_data.cpio if:
+# 1) Any included file is newer then initramfs_data.cpio
 # 2) There are changes in which files are included (added or deleted)
-# 3) If gen_init_cpio are newer than initramfs_data.cpio.gz
+# 3) If gen_init_cpio are newer than initramfs_data.cpio
 # 4) arguments to gen_initramfs.sh changes
-$(obj)/initramfs_data.cpio.gz: $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
-       $(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.gz.d
+$(obj)/initramfs_data.cpio$(suffix_y): $(obj)/gen_init_cpio $(deps_initramfs) klibcdirs
+       $(Q)$(initramfs) -l $(ramfs-input) > $(obj)/.initramfs_data.cpio.d
        $(call if_changed,initfs)
 
index c2e1ad424f4a41ce7d69f52f58b3ee681762a39d..7c6973d8d829e76101e112c7ab021d5c28dbcbad 100644 (file)
@@ -26,5 +26,5 @@ SECTIONS
 */
 
 .section .init.ramfs,"a"
-.incbin "usr/initramfs_data.cpio.gz"
+.incbin "usr/initramfs_data.cpio"
 
diff --git a/usr/initramfs_data.bz2.S b/usr/initramfs_data.bz2.S
new file mode 100644 (file)
index 0000000..bc54d09
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+  initramfs_data includes the compressed binary that is the
+  filesystem used for early user space.
+  Note: Older versions of "as" (prior to binutils 2.11.90.0.23
+  released on 2001-07-14) dit not support .incbin.
+  If you are forced to use older binutils than that then the
+  following trick can be applied to create the resulting binary:
+
+
+  ld -m elf_i386  --format binary --oformat elf32-i386 -r \
+  -T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
+   ld -m elf_i386  -r -o built-in.o initramfs_data.o
+
+  initramfs_data.scr looks like this:
+SECTIONS
+{
+       .init.ramfs : { *(.data) }
+}
+
+  The above example is for i386 - the parameters vary from architectures.
+  Eventually look up LDFLAGS_BLOB in an older version of the
+  arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
+
+  Using .incbin has the advantage over ld that the correct flags are set
+  in the ELF header, as required by certain architectures.
+*/
+
+.section .init.ramfs,"a"
+.incbin "usr/initramfs_data.cpio.bz2"
diff --git a/usr/initramfs_data.gz.S b/usr/initramfs_data.gz.S
new file mode 100644 (file)
index 0000000..890c8dd
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+  initramfs_data includes the compressed binary that is the
+  filesystem used for early user space.
+  Note: Older versions of "as" (prior to binutils 2.11.90.0.23
+  released on 2001-07-14) dit not support .incbin.
+  If you are forced to use older binutils than that then the
+  following trick can be applied to create the resulting binary:
+
+
+  ld -m elf_i386  --format binary --oformat elf32-i386 -r \
+  -T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
+   ld -m elf_i386  -r -o built-in.o initramfs_data.o
+
+  initramfs_data.scr looks like this:
+SECTIONS
+{
+       .init.ramfs : { *(.data) }
+}
+
+  The above example is for i386 - the parameters vary from architectures.
+  Eventually look up LDFLAGS_BLOB in an older version of the
+  arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
+
+  Using .incbin has the advantage over ld that the correct flags are set
+  in the ELF header, as required by certain architectures.
+*/
+
+.section .init.ramfs,"a"
+.incbin "usr/initramfs_data.cpio.gz"
diff --git a/usr/initramfs_data.lzma.S b/usr/initramfs_data.lzma.S
new file mode 100644 (file)
index 0000000..e11469e
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+  initramfs_data includes the compressed binary that is the
+  filesystem used for early user space.
+  Note: Older versions of "as" (prior to binutils 2.11.90.0.23
+  released on 2001-07-14) dit not support .incbin.
+  If you are forced to use older binutils than that then the
+  following trick can be applied to create the resulting binary:
+
+
+  ld -m elf_i386  --format binary --oformat elf32-i386 -r \
+  -T initramfs_data.scr initramfs_data.cpio.gz -o initramfs_data.o
+   ld -m elf_i386  -r -o built-in.o initramfs_data.o
+
+  initramfs_data.scr looks like this:
+SECTIONS
+{
+       .init.ramfs : { *(.data) }
+}
+
+  The above example is for i386 - the parameters vary from architectures.
+  Eventually look up LDFLAGS_BLOB in an older version of the
+  arch/$(ARCH)/Makefile to see the flags used before .incbin was introduced.
+
+  Using .incbin has the advantage over ld that the correct flags are set
+  in the ELF header, as required by certain architectures.
+*/
+
+.section .init.ramfs,"a"
+.incbin "usr/initramfs_data.cpio.lzma"