Merge tag 'llvmlinux-for-v3.15' of git://git.linuxfoundation.org/llvmlinux/kernel
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 13 Apr 2014 00:00:40 +0000 (17:00 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 13 Apr 2014 00:00:40 +0000 (17:00 -0700)
Pull llvm patches from Behan Webster:
 "These are some initial updates to support compiling the kernel with
  clang.

  These patches have been through the proper reviews to the best of my
  ability, and have been soaking in linux-next for a few weeks.  These
  patches by themselves still do not completely allow clang to be used
  with the kernel code, but lay the foundation for other patches which
  are still under review.

  Several other of the LLVMLinux patches have been already added via
  maintainer trees"

* tag 'llvmlinux-for-v3.15' of git://git.linuxfoundation.org/llvmlinux/kernel:
  x86: LLVMLinux: Fix "incomplete type const struct x86cpu_device_id"
  x86 kbuild: LLVMLinux: More cc-options added for clang
  x86, acpi: LLVMLinux: Remove nested functions from Thinkpad ACPI
  LLVMLinux: Add support for clang to compiler.h and new compiler-clang.h
  LLVMLinux: Remove warning about returning an uninitialized variable
  kbuild: LLVMLinux: Fix LINUX_COMPILER definition script for compilation with clang
  Documentation: LLVMLinux: Update Documentation/dontdiff
  kbuild: LLVMLinux: Adapt warnings for compilation with clang
  kbuild: LLVMLinux: Add Kbuild support for building kernel with Clang

490 files changed:
Documentation/ABI/testing/sysfs-devices-power
Documentation/devicetree/bindings/dma/fsl-edma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/qcom_bam_dma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/sirfsoc-dma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/leds/leds-gpio.txt
Documentation/devicetree/bindings/mfd/mc13xxx.txt
Documentation/devicetree/bindings/sound/fsl,ssi.txt
Documentation/devicetree/bindings/spi/efm32-spi.txt
Documentation/devicetree/bindings/video/backlight/gpio-backlight.txt [new file with mode: 0644]
Documentation/filesystems/Locking
Documentation/filesystems/vfs.txt
Documentation/kernel-parameters.txt
MAINTAINERS
arch/alpha/Kconfig
arch/arm/Kconfig
arch/arm/boot/dts/atlas6.dtsi
arch/arm/boot/dts/prima2.dtsi
arch/arm/include/asm/assembler.h
arch/arm/include/asm/cputype.h
arch/arm/include/asm/syscall.h
arch/arm/kernel/crash_dump.c
arch/arm/kernel/entry-header.S
arch/arm/kernel/kprobes-common.c
arch/arm/kernel/kprobes-test-arm.c
arch/arm/kernel/kprobes-test-thumb.c
arch/arm/kernel/kprobes-test.c
arch/arm/kernel/kprobes-test.h
arch/arm/kernel/kprobes-thumb.c
arch/arm/kernel/kprobes.c
arch/arm/kernel/pj4-cp0.c
arch/arm/kernel/probes.c
arch/arm/kernel/process.c
arch/arm/kernel/traps.c
arch/arm/mach-vexpress/dcscb.c
arch/arm/mm/dump.c
arch/arm/vfp/entry.S
arch/arm/vfp/vfphw.S
arch/ia64/Kconfig
arch/microblaze/Kconfig
arch/microblaze/Kconfig.platform [new file with mode: 0644]
arch/microblaze/Makefile
arch/microblaze/boot/dts/system.dts [changed from symlink to file mode: 0644]
arch/microblaze/include/asm/io.h
arch/microblaze/include/asm/processor.h
arch/microblaze/include/asm/setup.h
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/Makefile
arch/microblaze/kernel/heartbeat.c
arch/microblaze/kernel/intc.c
arch/microblaze/kernel/platform.c [new file with mode: 0644]
arch/microblaze/kernel/process.c
arch/microblaze/kernel/signal.c
arch/microblaze/kernel/syscall_table.S
arch/microblaze/kernel/timer.c
arch/microblaze/mm/consistent.c
arch/microblaze/mm/init.c
arch/microblaze/mm/pgtable.c
arch/microblaze/platform/Kconfig.platform [deleted file]
arch/microblaze/platform/Makefile [deleted file]
arch/microblaze/platform/generic/Kconfig.auto [deleted file]
arch/microblaze/platform/generic/Makefile [deleted file]
arch/microblaze/platform/generic/system.dts [deleted file]
arch/microblaze/platform/platform.c [deleted file]
arch/mips/include/asm/syscall.h
arch/mips/kernel/ptrace.c
arch/mips/loongson/lemote-2f/clock.c
arch/mn10300/include/asm/highmem.h
arch/parisc/Kconfig
arch/powerpc/Kconfig
arch/powerpc/configs/ppc6xx_defconfig
arch/powerpc/configs/ps3_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/configs/pseries_le_defconfig
arch/powerpc/include/asm/reg.h
arch/powerpc/platforms/powernv/Kconfig
arch/s390/Kconfig
arch/s390/configs/default_defconfig
arch/s390/include/asm/syscall.h
arch/sh/Kconfig
arch/sh/configs/rsk7203_defconfig
arch/sparc/Kconfig
arch/um/Kconfig.common
arch/x86/Kconfig
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/head_32.S
arch/x86/boot/compressed/head_64.S
arch/x86/include/asm/syscall.h
arch/x86/kernel/acpi/cstate.c
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_intel.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/irq.c
arch/x86/kernel/ldt.c
arch/x86/kernel/pci-calgary_64.c
arch/xtensa/configs/iss_defconfig
arch/xtensa/configs/s6105_defconfig
block/blk-core.c
block/blk-map.c
block/blk-mq.c
block/blk-softirq.c
block/blk.h
block/elevator.c
drivers/acpi/Kconfig
drivers/acpi/dock.c
drivers/acpi/osl.c
drivers/acpi/thermal.c
drivers/acpi/utils.c
drivers/acpi/video.c
drivers/base/power/domain.c
drivers/base/regmap/regmap.c
drivers/block/drbd/drbd_receiver.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/nvme-core.c
drivers/block/nvme-scsi.c
drivers/char/Kconfig
drivers/char/virtio_console.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Kconfig.powerpc
drivers/cpufreq/Makefile
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/at32ap-cpufreq.c
drivers/cpufreq/cris-artpec3-cpufreq.c
drivers/cpufreq/cris-etraxfs-cpufreq.c
drivers/cpufreq/elanfreq.c
drivers/cpufreq/exynos4210-cpufreq.c
drivers/cpufreq/exynos4x12-cpufreq.c
drivers/cpufreq/exynos5250-cpufreq.c
drivers/cpufreq/freq_table.c
drivers/cpufreq/ia64-acpi-cpufreq.c
drivers/cpufreq/kirkwood-cpufreq.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/loongson2_cpufreq.c
drivers/cpufreq/maple-cpufreq.c
drivers/cpufreq/p4-clockmod.c
drivers/cpufreq/pasemi-cpufreq.c
drivers/cpufreq/pmac32-cpufreq.c
drivers/cpufreq/pmac64-cpufreq.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/powernv-cpufreq.c [new file with mode: 0644]
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/cpufreq/ppc_cbe_cpufreq.c
drivers/cpufreq/s3c2416-cpufreq.c
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpufreq/s3c64xx-cpufreq.c
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpufreq/sc520_freq.c
drivers/cpufreq/spear-cpufreq.c
drivers/cpufreq/speedstep-ich.c
drivers/cpufreq/speedstep-smi.c
drivers/cpufreq/unicore2-cpufreq.c
drivers/cpuidle/sysfs.c
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/acpi-dma.c
drivers/dma/at_hdmac.c
drivers/dma/cppi41.c
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/dw/core.c
drivers/dma/dw/pci.c
drivers/dma/dw/regs.h
drivers/dma/edma.c
drivers/dma/fsl-edma.c [new file with mode: 0644]
drivers/dma/imx-dma.c
drivers/dma/mmp_pdma.c
drivers/dma/mmp_tdma.c
drivers/dma/omap-dma.c
drivers/dma/pch_dma.c
drivers/dma/qcom_bam_dma.c [new file with mode: 0644]
drivers/dma/s3c24xx-dma.c
drivers/dma/sh/Kconfig
drivers/dma/sh/Makefile
drivers/dma/sh/rcar-audmapp.c [new file with mode: 0644]
drivers/dma/sh/shdma-base.c
drivers/dma/sh/shdma-of.c
drivers/dma/sh/shdmac.c
drivers/dma/sh/sudmac.c
drivers/dma/sirf-dma.c
drivers/firmware/efi/efi-stub-helper.c
drivers/idle/intel_idle.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/leds/Kconfig
drivers/leds/led-core.c
drivers/leds/led-triggers.c
drivers/leds/leds-88pm860x.c
drivers/leds/leds-adp5520.c
drivers/leds/leds-asic3.c
drivers/leds/leds-blinkm.c
drivers/leds/leds-clevo-mail.c
drivers/leds/leds-cobalt-qube.c
drivers/leds/leds-da903x.c
drivers/leds/leds-da9052.c
drivers/leds/leds-fsg.c
drivers/leds/leds-gpio.c
drivers/leds/leds-hp6xx.c
drivers/leds/leds-lm3533.c
drivers/leds/leds-lp5521.c
drivers/leds/leds-lp5523.c
drivers/leds/leds-lp5562.c
drivers/leds/leds-lt3593.c
drivers/leds/leds-mc13783.c
drivers/leds/leds-netxbig.c
drivers/leds/leds-ns2.c
drivers/leds/leds-ot200.c
drivers/leds/leds-pwm.c
drivers/leds/leds-s3c24xx.c
drivers/leds/leds-ss4200.c
drivers/leds/leds-wm831x-status.c
drivers/leds/leds-wm8350.c
drivers/leds/trigger/ledtrig-cpu.c
drivers/md/bitmap.c
drivers/md/md.c
drivers/md/md.h
drivers/md/raid1.c
drivers/md/raid5.c
drivers/media/dvb-frontends/drx39xyj/Kconfig
drivers/media/dvb-frontends/lgdt3305.c
drivers/media/dvb-frontends/m88rs2000.c
drivers/media/platform/ti-vpe/vpe.c
drivers/media/rc/img-ir/img-ir-hw.c
drivers/media/rc/img-ir/img-ir-nec.c
drivers/media/rc/ir-nec-decoder.c
drivers/media/rc/keymaps/rc-tivo.c
drivers/media/rc/rc-main.c
drivers/media/tuners/r820t.c
drivers/media/tuners/tuner-xc2028.c
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/gspca/jpeg.h
drivers/media/usb/stk1160/stk1160-ac97.c
drivers/net/ntb_netdev.c
drivers/ntb/ntb_hw.c
drivers/ntb/ntb_hw.h
drivers/ntb/ntb_transport.c
drivers/platform/x86/Kconfig
drivers/platform/x86/Makefile
drivers/platform/x86/alienware-wmi.c [new file with mode: 0644]
drivers/platform/x86/fujitsu-tablet.c
drivers/platform/x86/intel_baytrail.c [deleted file]
drivers/platform/x86/intel_baytrail.h [deleted file]
drivers/platform/x86/panasonic-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/toshiba_acpi.c
drivers/regulator/bcm590xx-regulator.c
drivers/regulator/s2mpa01.c
drivers/regulator/s2mps11.c
drivers/regulator/s5m8767.c
drivers/scsi/Kconfig
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_pm.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_scan.c
drivers/scsi/sd.c
drivers/spi/Kconfig
drivers/spi/spi-fsl-espi.c
drivers/spi/spi-fsl-spi.c
drivers/spi/spi-mpc512x-psc.c
drivers/spi/spi-mpc52xx-psc.c
drivers/spi/spi-mpc52xx.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-sh.c
drivers/spi/spi-txx9.c
drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
drivers/staging/lustre/lustre/llite/symlink.c
drivers/staging/media/msi3101/msi001.c
drivers/staging/media/msi3101/sdr-msi3101.c
drivers/staging/usbip/stub_dev.c
drivers/staging/usbip/usbip_common.c
drivers/staging/usbip/usbip_common.h
drivers/staging/usbip/vhci_hcd.c
drivers/staging/usbip/vhci_sysfs.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_tpg.h
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/loopback/tcm_loop.c
drivers/target/sbp/sbp_target.c
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tcm_fc.h
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/target/tcm_fc/tfc_sess.c
drivers/thermal/imx_thermal.c
drivers/thermal/rcar_thermal.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/tty/tty_audit.c
drivers/usb/gadget/tcm_usb_gadget.c
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/video/backlight/backlight.c
drivers/video/backlight/gpio_backlight.c
drivers/video/backlight/lm3639_bl.c
fs/aio.c
fs/bio-integrity.c
fs/bio.c
fs/block_dev.c
fs/btrfs/async-thread.c
fs/btrfs/backref.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/volumes.c
fs/buffer.c
fs/cachefiles/bind.c
fs/cachefiles/namei.c
fs/ceph/file.c
fs/ceph/ioctl.c
fs/cifs/cifsfs.c
fs/cifs/file.c
fs/exec.c
fs/exofs/ore_raid.c
fs/exofs/super.c
fs/ext4/file.c
fs/file.c
fs/file_table.c
fs/fuse/dev.c
fs/fuse/file.c
fs/mount.h
fs/namei.c
fs/namespace.c
fs/ncpfs/inode.c
fs/ncpfs/ncp_fs_sb.h
fs/ntfs/inode.c
fs/ocfs2/cluster/tcp.c
fs/ocfs2/file.c
fs/open.c
fs/pipe.c
fs/pnode.c
fs/pnode.h
fs/proc/base.c
fs/proc/namespaces.c
fs/proc/self.c
fs/proc_namespace.c
fs/splice.c
fs/udf/file.c
fs/xfs/xfs_file.c
fs/xfs/xfs_ioctl.c
include/asm-generic/syscall.h
include/drm/i915_drm.h
include/linux/acpi_dma.h
include/linux/audit.h
include/linux/bio.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/buffer_head.h
include/linux/cpufreq.h
include/linux/dmaengine.h
include/linux/dw_dmac.h
include/linux/fdtable.h
include/linux/fs.h
include/linux/ftrace_event.h
include/linux/mfd/mc13xxx.h
include/linux/mm.h
include/linux/mount.h
include/linux/nbd.h
include/linux/ntb.h
include/linux/nvme.h
include/linux/pipe_fs_i.h
include/linux/platform_data/dma-rcar-audmapp.h [new file with mode: 0644]
include/linux/platform_data/leds-s3c24xx.h
include/linux/sched.h
include/linux/syscalls.h
include/linux/tracepoint.h
include/linux/uio.h
include/media/rc-core.h
include/net/9p/client.h
include/net/9p/transport.h
include/scsi/scsi_device.h
include/sound/cs8427.h
include/target/iscsi/iscsi_transport.h
include/target/target_core_backend.h
include/target/target_core_base.h
include/target/target_core_fabric.h
include/trace/events/syscalls.h
include/trace/ftrace.h
include/uapi/linux/audit.h
include/uapi/linux/capability.h
include/uapi/linux/nvme.h
include/uapi/linux/v4l2-common.h
init/Kconfig
kernel/audit.c
kernel/audit.h
kernel/auditfilter.c
kernel/auditsc.c
kernel/relay.c
kernel/seccomp.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_export.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_output.c
kernel/trace/trace_uprobe.c
kernel/tracepoint.c
lib/Kconfig
lib/Kconfig.debug
lib/Makefile
lib/audit.c
lib/compat_audit.c [new file with mode: 0644]
mm/Makefile
mm/filemap.c
mm/iov_iter.c [new file with mode: 0644]
mm/process_vm_access.c
mm/shmem.c
mm/util.c
net/9p/client.c
net/9p/trans_fd.c
net/9p/trans_rdma.c
net/9p/trans_virtio.c
security/integrity/evm/evm_crypto.c
security/integrity/evm/evm_main.c
security/integrity/integrity_audit.c
security/lsm_audit.c
security/tomoyo/realpath.c
sound/i2c/cs8427.c
sound/mips/au1x00.c
sound/oss/ad1848.c
sound/oss/dmasound/dmasound_paula.c
sound/oss/opl3.c
sound/oss/pas2_mixer.c
sound/oss/pas2_pcm.c
sound/oss/sb_common.c
sound/oss/sb_ess.c
sound/oss/sequencer.c
sound/oss/sound_config.h
sound/oss/soundcard.c
sound/oss/uart401.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_controller.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/delta.c
sound/pci/ice1712/ice1712.c
sound/soc/codecs/alc5623.c
sound/soc/codecs/alc5632.c
sound/soc/codecs/cs42l52.c
sound/soc/codecs/cs42l52.h
sound/soc/codecs/cs42xx8.c
sound/soc/codecs/da732x.c
sound/soc/codecs/max98090.c
sound/soc/codecs/rt5640.c
sound/soc/codecs/tlv320aic23-i2c.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/fsl/fsl_sai.c
sound/soc/fsl/fsl_sai.h
sound/soc/samsung/ac97.c
sound/soc/samsung/dma.h
sound/soc/samsung/i2s.c
sound/soc/samsung/pcm.c
sound/soc/samsung/s3c2412-i2s.c
sound/soc/samsung/s3c24xx-i2s.c
sound/soc/samsung/spdif.c
sound/usb/pcm.c
tools/power/x86/turbostat/turbostat.8
tools/power/x86/turbostat/turbostat.c
virt/kvm/arm/arch_timer.c
virt/kvm/arm/vgic.c

index 7dbf96b724edb7c2035e358c5736a1bea03c6bb5..676fdf5f2a99af0ee623eeef4b2938c1c1c8924f 100644 (file)
@@ -83,8 +83,10 @@ Contact:     Rafael J. Wysocki <rjw@rjwysocki.net>
 Description:
                The /sys/devices/.../wakeup_count attribute contains the number
                of signaled wakeup events associated with the device.  This
-               attribute is read-only.  If the device is not enabled to wake up
+               attribute is read-only.  If the device is not capable to wake up
                the system from sleep states, this attribute is not present.
+               If the device is not enabled to wake up the system from sleep
+               states, this attribute is empty.
 
 What:          /sys/devices/.../power/wakeup_active_count
 Date:          September 2010
@@ -93,8 +95,10 @@ Description:
                The /sys/devices/.../wakeup_active_count attribute contains the
                number of times the processing of wakeup events associated with
                the device was completed (at the kernel level).  This attribute
-               is read-only.  If the device is not enabled to wake up the
-               system from sleep states, this attribute is not present.
+               is read-only.  If the device is not capable to wake up the
+               system from sleep states, this attribute is not present.  If
+               the device is not enabled to wake up the system from sleep
+               states, this attribute is empty.
 
 What:          /sys/devices/.../power/wakeup_abort_count
 Date:          February 2012
@@ -104,8 +108,9 @@ Description:
                number of times the processing of a wakeup event associated with
                the device might have aborted system transition into a sleep
                state in progress.  This attribute is read-only.  If the device
-               is not enabled to wake up the system from sleep states, this
-               attribute is not present.
+               is not capable to wake up the system from sleep states, this
+               attribute is not present.  If the device is not enabled to wake
+               up the system from sleep states, this attribute is empty.
 
 What:          /sys/devices/.../power/wakeup_expire_count
 Date:          February 2012
@@ -114,8 +119,10 @@ Description:
                The /sys/devices/.../wakeup_expire_count attribute contains the
                number of times a wakeup event associated with the device has
                been reported with a timeout that expired.  This attribute is
-               read-only.  If the device is not enabled to wake up the system
-               from sleep states, this attribute is not present.
+               read-only.  If the device is not capable to wake up the system
+               from sleep states, this attribute is not present.  If the
+               device is not enabled to wake up the system from sleep states,
+               this attribute is empty.
 
 What:          /sys/devices/.../power/wakeup_active
 Date:          September 2010
@@ -124,8 +131,10 @@ Description:
                The /sys/devices/.../wakeup_active attribute contains either 1,
                or 0, depending on whether or not a wakeup event associated with
                the device is being processed (1).  This attribute is read-only.
-               If the device is not enabled to wake up the system from sleep
-               states, this attribute is not present.
+               If the device is not capable to wake up the system from sleep
+               states, this attribute is not present.  If the device is not
+               enabled to wake up the system from sleep states, this attribute
+               is empty.
 
 What:          /sys/devices/.../power/wakeup_total_time_ms
 Date:          September 2010
@@ -134,8 +143,9 @@ Description:
                The /sys/devices/.../wakeup_total_time_ms attribute contains
                the total time of processing wakeup events associated with the
                device, in milliseconds.  This attribute is read-only.  If the
-               device is not enabled to wake up the system from sleep states,
-               this attribute is not present.
+               device is not capable to wake up the system from sleep states,
+               this attribute is not present.  If the device is not enabled to
+               wake up the system from sleep states, this attribute is empty.
 
 What:          /sys/devices/.../power/wakeup_max_time_ms
 Date:          September 2010
@@ -144,8 +154,10 @@ Description:
                The /sys/devices/.../wakeup_max_time_ms attribute contains
                the maximum time of processing a single wakeup event associated
                with the device, in milliseconds.  This attribute is read-only.
-               If the device is not enabled to wake up the system from sleep
-               states, this attribute is not present.
+               If the device is not capable to wake up the system from sleep
+               states, this attribute is not present.  If the device is not
+               enabled to wake up the system from sleep states, this attribute
+               is empty.
 
 What:          /sys/devices/.../power/wakeup_last_time_ms
 Date:          September 2010
@@ -156,7 +168,8 @@ Description:
                signaling the last wakeup event associated with the device, in
                milliseconds.  This attribute is read-only.  If the device is
                not enabled to wake up the system from sleep states, this
-               attribute is not present.
+               attribute is not present.  If the device is not enabled to wake
+               up the system from sleep states, this attribute is empty.
 
 What:          /sys/devices/.../power/wakeup_prevent_sleep_time_ms
 Date:          February 2012
@@ -165,9 +178,10 @@ Description:
                The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute
                contains the total time the device has been preventing
                opportunistic transitions to sleep states from occurring.
-               This attribute is read-only.  If the device is not enabled to
+               This attribute is read-only.  If the device is not capable to
                wake up the system from sleep states, this attribute is not
-               present.
+               present.  If the device is not enabled to wake up the system
+               from sleep states, this attribute is empty.
 
 What:          /sys/devices/.../power/autosuspend_delay_ms
 Date:          September 2010
diff --git a/Documentation/devicetree/bindings/dma/fsl-edma.txt b/Documentation/devicetree/bindings/dma/fsl-edma.txt
new file mode 100644 (file)
index 0000000..191d7bd
--- /dev/null
@@ -0,0 +1,76 @@
+* Freescale enhanced Direct Memory Access(eDMA) Controller
+
+  The eDMA channels have multiplex capability by programmble memory-mapped
+registers. channels are split into two groups, called DMAMUX0 and DMAMUX1,
+specific DMA request source can only be multiplexed by any channel of certain
+group, DMAMUX0 or DMAMUX1, but not both.
+
+* eDMA Controller
+Required properties:
+- compatible :
+       - "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
+- reg : Specifies base physical address(s) and size of the eDMA registers.
+       The 1st region is eDMA control register's address and size.
+       The 2nd and the 3rd regions are programmable channel multiplexing
+       control register's address and size.
+- interrupts : A list of interrupt-specifiers, one for each entry in
+       interrupt-names.
+- interrupt-names : Should contain:
+       "edma-tx" - the transmission interrupt
+       "edma-err" - the error interrupt
+- #dma-cells : Must be <2>.
+       The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1).
+       Specific request source can only be multiplexed by specific channels
+       group called DMAMUX.
+       The 2nd cell specifies the request source(slot) ID.
+       See the SoC's reference manual for all the supported request sources.
+- dma-channels : Number of channels supported by the controller
+- clock-names : A list of channel group clock names. Should contain:
+       "dmamux0" - clock name of mux0 group
+       "dmamux1" - clock name of mux1 group
+- clocks : A list of phandle and clock-specifier pairs, one for each entry in
+       clock-names.
+
+Optional properties:
+- big-endian: If present registers and hardware scatter/gather descriptors
+       of the eDMA are implemented in big endian mode, otherwise in little
+       mode.
+
+
+Examples:
+
+edma0: dma-controller@40018000 {
+       #dma-cells = <2>;
+       compatible = "fsl,vf610-edma";
+       reg = <0x40018000 0x2000>,
+               <0x40024000 0x1000>,
+               <0x40025000 0x1000>;
+       interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>,
+               <0 9 IRQ_TYPE_LEVEL_HIGH>;
+       interrupt-names = "edma-tx", "edma-err";
+       dma-channels = <32>;
+       clock-names = "dmamux0", "dmamux1";
+       clocks = <&clks VF610_CLK_DMAMUX0>,
+               <&clks VF610_CLK_DMAMUX1>;
+};
+
+
+* DMA clients
+DMA client drivers that uses the DMA function must use the format described
+in the dma.txt file, using a two-cell specifier for each channel: the 1st
+specifies the channel group(DMAMUX) in which this request can be multiplexed,
+and the 2nd specifies the request source.
+
+Examples:
+
+sai2: sai@40031000 {
+       compatible = "fsl,vf610-sai";
+       reg = <0x40031000 0x1000>;
+       interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+       clock-names = "sai";
+       clocks = <&clks VF610_CLK_SAI2>;
+       dma-names = "tx", "rx";
+       dmas = <&edma0 0 21>,
+               <&edma0 0 20>;
+       status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
new file mode 100644 (file)
index 0000000..d75a9d7
--- /dev/null
@@ -0,0 +1,41 @@
+QCOM BAM DMA controller
+
+Required properties:
+- compatible: must contain "qcom,bam-v1.4.0" for MSM8974
+- reg: Address range for DMA registers
+- interrupts: Should contain the one interrupt shared by all channels
+- #dma-cells: must be <1>, the cell in the dmas property of the client device
+  represents the channel number
+- clocks: required clock
+- clock-names: must contain "bam_clk" entry
+- qcom,ee : indicates the active Execution Environment identifier (0-7) used in
+  the secure world.
+
+Example:
+
+       uart-bam: dma@f9984000 = {
+               compatible = "qcom,bam-v1.4.0";
+               reg = <0xf9984000 0x15000>;
+               interrupts = <0 94 0>;
+               clocks = <&gcc GCC_BAM_DMA_AHB_CLK>;
+               clock-names = "bam_clk";
+               #dma-cells = <1>;
+               qcom,ee = <0>;
+       };
+
+DMA clients must use the format described in the dma.txt file, using a two cell
+specifier for each channel.
+
+Example:
+       serial@f991e000 {
+               compatible = "qcom,msm-uart";
+               reg = <0xf991e000 0x1000>
+                       <0xf9944000 0x19000>;
+               interrupts = <0 108 0>;
+               clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>,
+                       <&gcc GCC_BLSP1_AHB_CLK>;
+               clock-names = "core", "iface";
+
+               dmas = <&uart-bam 0>, <&uart-bam 1>;
+               dma-names = "rx", "tx";
+       };
diff --git a/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt b/Documentation/devicetree/bindings/dma/sirfsoc-dma.txt
new file mode 100644 (file)
index 0000000..ecbc96a
--- /dev/null
@@ -0,0 +1,43 @@
+* CSR SiRFSoC DMA controller
+
+See dma.txt first
+
+Required properties:
+- compatible: Should be "sirf,prima2-dmac" or "sirf,marco-dmac"
+- reg: Should contain DMA registers location and length.
+- interrupts: Should contain one interrupt shared by all channel
+- #dma-cells: must be <1>. used to represent the number of integer
+    cells in the dmas property of client device.
+- clocks: clock required
+
+Example:
+
+Controller:
+dmac0: dma-controller@b00b0000 {
+       compatible = "sirf,prima2-dmac";
+       reg = <0xb00b0000 0x10000>;
+       interrupts = <12>;
+       clocks = <&clks 24>;
+       #dma-cells = <1>;
+};
+
+
+Client:
+Fill the specific dma request line in dmas. In the below example, spi0 read
+channel request line is 9 of the 2nd dma controller, while write channel uses
+4 of the 2nd dma controller; spi1 read channel request line is 12 of the 1st
+dma controller, while write channel uses 13 of the 1st dma controller:
+
+spi0: spi@b00d0000 {
+       compatible = "sirf,prima2-spi";
+       dmas = <&dmac1 9>,
+               <&dmac1 4>;
+       dma-names = "rx", "tx";
+};
+
+spi1: spi@b0170000 {
+       compatible = "sirf,prima2-spi";
+       dmas = <&dmac0 12>,
+               <&dmac0 13>;
+       dma-names = "rx", "tx";
+};
index df1b3080f6b88f22f5844d02c7c2f717dac50175..f77148f372ea2b5f986a96e25078c515889b48a3 100644 (file)
@@ -21,6 +21,8 @@ LED sub-node properties:
   on).  The "keep" setting will keep the LED at whatever its current
   state is, without producing a glitch.  The default is off if this
   property is not present.
+- retain-state-suspended: (optional) The suspend state can be retained.Such
+  as charge-led gpio.
 
 Examples:
 
@@ -50,3 +52,13 @@ run-control {
                default-state = "on";
        };
 };
+
+leds {
+       compatible = "gpio-leds";
+
+       charger-led {
+               gpios = <&gpio1 2 0>;
+               linux,default-trigger = "max8903-charger-charging";
+               retain-state-suspended;
+       };
+};
index abd9e3cb2db786323307b5ea0ee39a5e40ce9ac0..1413f39912d3acc3fbb8b5557909557e7500a1c0 100644 (file)
@@ -10,9 +10,44 @@ Optional properties:
 - fsl,mc13xxx-uses-touch : Indicate the touchscreen controller is being used
 
 Sub-nodes:
+- leds : Contain the led nodes and initial register values in property
+  "led-control". Number of register depends of used IC, for MC13783 is 6,
+  for MC13892 is 4, for MC34708 is 1. See datasheet for bits definitions of
+  these registers.
+  - #address-cells: Must be 1.
+  - #size-cells: Must be 0.
+  Each led node should contain "reg", which used as LED ID (described below).
+  Optional properties "label" and "linux,default-trigger" is described in
+  Documentation/devicetree/bindings/leds/common.txt.
 - regulators : Contain the regulator nodes. The regulators are bound using
   their names as listed below with their registers and bits for enabling.
 
+MC13783 LED IDs:
+    0  : Main display
+    1  : AUX display
+    2  : Keypad
+    3  : Red 1
+    4  : Green 1
+    5  : Blue 1
+    6  : Red 2
+    7  : Green 2
+    8  : Blue 2
+    9  : Red 3
+    10 : Green 3
+    11 : Blue 3
+
+MC13892 LED IDs:
+    0  : Main display
+    1  : AUX display
+    2  : Keypad
+    3  : Red
+    4  : Green
+    5  : Blue
+
+MC34708 LED IDs:
+    0  : Charger Red
+    1  : Charger Green
+
 MC13783 regulators:
     sw1a      : regulator SW1A      (register 24, bit 0)
     sw1b      : regulator SW1B      (register 25, bit 0)
@@ -89,6 +124,18 @@ ecspi@70010000 { /* ECSPI1 */
                interrupt-parent = <&gpio0>;
                interrupts = <8>;
 
+               leds {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       led-control = <0x000 0x000 0x0e0 0x000>;
+
+                       sysled {
+                               reg = <3>;
+                               label = "system:red:live";
+                               linux,default-trigger = "heartbeat";
+                       };
+               };
+
                regulators {
                        sw1_reg: mc13892__sw1 {
                                regulator-min-microvolt = <600000>;
index b93e9a91e30e2a5fc35e28dba0c5282d2e2c7a0e..3aa4a8f528f486b1b647e3720adbfef6068bbf6c 100644 (file)
@@ -20,15 +20,6 @@ Required properties:
                     have.
 - interrupt-parent: The phandle for the interrupt controller that
                     services interrupts for this device.
-- fsl,mode:         The operating mode for the SSI interface.
-                    "i2s-slave" - I2S mode, SSI is clock slave
-                    "i2s-master" - I2S mode, SSI is clock master
-                    "lj-slave" - left-justified mode, SSI is clock slave
-                    "lj-master" - l.j. mode, SSI is clock master
-                    "rj-slave" - right-justified mode, SSI is clock slave
-                    "rj-master" - r.j., SSI is clock master
-                    "ac97-slave" - AC97 mode, SSI is clock slave
-                    "ac97-master" - AC97 mode, SSI is clock master
 - fsl,playback-dma: Phandle to a node for the DMA channel to use for
                     playback of audio.  This is typically dictated by SOC
                     design.  See the notes below.
@@ -47,6 +38,9 @@ Required properties:
                     be connected together, and SRFS and STFS be connected
                     together.  This would still allow different sample sizes,
                     but not different sample rates.
+ - clocks:          "ipg" - Required clock for the SSI unit
+                    "baud" - Required clock for SSI master mode. Otherwise this
+                     clock is not used
 
 Required are also ac97 link bindings if ac97 is used. See
 Documentation/devicetree/bindings/sound/soc-ac97link.txt for the necessary
@@ -64,6 +58,15 @@ Optional properties:
                    Documentation/devicetree/bindings/dma/dma.txt.
 - dma-names:       Two dmas have to be defined, "tx" and "rx", if fsl,imx-fiq
                    is not defined.
+- fsl,mode:         The operating mode for the SSI interface.
+                    "i2s-slave" - I2S mode, SSI is clock slave
+                    "i2s-master" - I2S mode, SSI is clock master
+                    "lj-slave" - left-justified mode, SSI is clock slave
+                    "lj-master" - l.j. mode, SSI is clock master
+                    "rj-slave" - right-justified mode, SSI is clock slave
+                    "rj-master" - r.j., SSI is clock master
+                    "ac97-slave" - AC97 mode, SSI is clock slave
+                    "ac97-master" - AC97 mode, SSI is clock master
 
 Child 'codec' node required properties:
 - compatible:       Compatible list, contains the name of the codec
index 8f081c96a4fa96845edba3d060583ddc974bb239..130cd17e3680d825777d52e464b8a28e3597b9b8 100644 (file)
@@ -8,7 +8,13 @@ Required properties:
 - interrupts: pair specifying rx and tx irq
 - clocks: phandle to the spi clock
 - cs-gpios: see spi-bus.txt
-- efm32,location: Value to write to the ROUTE register's LOCATION bitfield to configure the pinmux for the device, see datasheet for values.
+
+Recommended properties :
+- efm32,location: Value to write to the ROUTE register's LOCATION bitfield to
+                  configure the pinmux for the device, see datasheet for values.
+                  If "efm32,location" property is not provided, keeping what is
+                  already configured in the hardware, so its either the reset
+                  default 0 or whatever the bootloader did.
 
 Example:
 
diff --git a/Documentation/devicetree/bindings/video/backlight/gpio-backlight.txt b/Documentation/devicetree/bindings/video/backlight/gpio-backlight.txt
new file mode 100644 (file)
index 0000000..321be66
--- /dev/null
@@ -0,0 +1,16 @@
+gpio-backlight bindings
+
+Required properties:
+  - compatible: "gpio-backlight"
+  - gpios: describes the gpio that is used for enabling/disabling the backlight.
+    refer to bindings/gpio/gpio.txt for more details.
+
+Optional properties:
+  - default-on: enable the backlight at boot.
+
+Example:
+       backlight {
+               compatible = "gpio-backlight";
+               gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>;
+               default-on;
+       };
index efca5c1bbb1028245a6d75e846e96a6ecb4150bd..eba7901342531d2dc089c9a39d990aa924b86526 100644 (file)
@@ -202,7 +202,7 @@ prototypes:
                                unsigned long *);
        int (*migratepage)(struct address_space *, struct page *, struct page *);
        int (*launder_page)(struct page *);
-       int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long);
+       int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
        int (*error_remove_page)(struct address_space *, struct page *);
        int (*swap_activate)(struct file *);
        int (*swap_deactivate)(struct file *);
index 94eb86287bcb08f3ebc0fa826438fed1af8ded1a..617f6d70c0778ce37716d25fde6f0c158f492707 100644 (file)
@@ -596,7 +596,7 @@ struct address_space_operations {
        /* migrate the contents of a page to the specified target */
        int (*migratepage) (struct page *, struct page *);
        int (*launder_page) (struct page *);
-       int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+       int (*is_partially_uptodate) (struct page *, unsigned long,
                                        unsigned long);
        void (*is_dirty_writeback) (struct page *, bool *, bool *);
        int (*error_remove_page) (struct mapping *mapping, struct page *page);
index b6c67d592be5abee31b7bed72beca0cdc273b7e6..03e50b4883a8e982dbe1ebe919a42f85b4755025 100644 (file)
@@ -2563,6 +2563,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        pcmv=           [HW,PCMCIA] BadgePAD 4
 
+       pd_ignore_unused
+                       [PM]
+                       Keep all power-domains already enabled by bootloader on,
+                       even if no driver has claimed them. This is useful
+                       for debug and development, but should not be
+                       needed on a platform with proper driver support.
+
        pd.             [PARIDE]
                        See Documentation/blockdev/paride.txt.
 
index beaa87a6e7fa9ad450ac120c3fd176785c2a4b34..6dc67b1fdb507016d0d0d77bf66f9875ace00f99 100644 (file)
@@ -6543,7 +6543,7 @@ F:        drivers/net/wireless/orinoco/
 
 OSD LIBRARY and FILESYSTEM
 M:     Boaz Harrosh <bharrosh@panasas.com>
-M:     Benny Halevy <bhalevy@tonian.com>
+M:     Benny Halevy <bhalevy@primarydata.com>
 L:     osd-dev@open-osd.org
 W:     http://open-osd.org
 T:     git git://git.open-osd.org/open-osd.git
index f6c6b345388c85a8172d3558d098ed925161a9a8..b7ff9a318c31c3f849c5a2223582132ebde83f72 100644 (file)
@@ -22,6 +22,7 @@ config ALPHA
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
+       select HAVE_ARCH_AUDITSYSCALL
        select HAVE_MOD_ARCH_SPECIFIC
        select MODULES_USE_ELF_RELA
        select ODD_RT_SIGACTION
index 5db05f6a041289676617cdf713385f3e30238055..ab438cb5af5570f5aae9b3215b9c73586ce80427 100644 (file)
@@ -24,6 +24,7 @@ config ARM
        select GENERIC_STRNCPY_FROM_USER
        select GENERIC_STRNLEN_USER
        select HARDIRQS_SW_RESEND
+       select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
index 55d3f79c2ef599f3d898905b86f16161cafc3e80..9d72674049d6ba7decd0678cd09380fa89634c08 100644 (file)
                                reg = <0xb00b0000 0x10000>;
                                interrupts = <12>;
                                clocks = <&clks 24>;
+                               #dma-cells = <1>;
                        };
 
                        dmac1: dma-controller@b0160000 {
                                reg = <0xb0160000 0x10000>;
                                interrupts = <13>;
                                clocks = <&clks 25>;
+                               #dma-cells = <1>;
                        };
 
                        vip@b00C0000 {
index 20145526cd7b23fe649751643b950ef03a6b7b82..1e82571d6823ef55169af9827efab952189e952c 100644 (file)
                                reg = <0xb00b0000 0x10000>;
                                interrupts = <12>;
                                clocks = <&clks 24>;
+                               #dma-cells = <1>;
                        };
 
                        dmac1: dma-controller@b0160000 {
                                reg = <0xb0160000 0x10000>;
                                interrupts = <13>;
                                clocks = <&clks 25>;
+                               #dma-cells = <1>;
                        };
 
                        vip@b00C0000 {
index 380ac4f20000c8c33d1744da3898d71e3f4b7c68..b974184f9941883339c0480df1290a6a6f66a4ae 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/ptrace.h>
 #include <asm/domain.h>
 #include <asm/opcodes-virt.h>
+#include <asm/asm-offsets.h>
 
 #define IOMEM(x)       (x)
 
        restore_irqs_notrace \oldcpsr
        .endm
 
+/*
+ * Get current thread_info.
+ */
+       .macro  get_thread_info, rd
+ ARM(  mov     \rd, sp, lsr #13        )
+ THUMB(        mov     \rd, sp                 )
+ THUMB(        lsr     \rd, \rd, #13           )
+       mov     \rd, \rd, lsl #13
+       .endm
+
+/*
+ * Increment/decrement the preempt count.
+ */
+#ifdef CONFIG_PREEMPT_COUNT
+       .macro  inc_preempt_count, ti, tmp
+       ldr     \tmp, [\ti, #TI_PREEMPT]        @ get preempt count
+       add     \tmp, \tmp, #1                  @ increment it
+       str     \tmp, [\ti, #TI_PREEMPT]
+       .endm
+
+       .macro  dec_preempt_count, ti, tmp
+       ldr     \tmp, [\ti, #TI_PREEMPT]        @ get preempt count
+       sub     \tmp, \tmp, #1                  @ decrement it
+       str     \tmp, [\ti, #TI_PREEMPT]
+       .endm
+
+       .macro  dec_preempt_count_ti, ti, tmp
+       get_thread_info \ti
+       dec_preempt_count \ti, \tmp
+       .endm
+#else
+       .macro  inc_preempt_count, ti, tmp
+       .endm
+
+       .macro  dec_preempt_count, ti, tmp
+       .endm
+
+       .macro  dec_preempt_count_ti, ti, tmp
+       .endm
+#endif
+
 #define USER(x...)                             \
 9999:  x;                                      \
        .pushsection __ex_table,"a";            \
index 42f0889f058456be8e1c34705ae0e2aa12364baf..c651e3b26ec703b08cea0128eb76c23d9aa94d46 100644 (file)
@@ -221,4 +221,23 @@ static inline int cpu_is_xsc3(void)
 #define        cpu_is_xscale() 1
 #endif
 
+/*
+ * Marvell's PJ4 core is based on V7 version. It has some modification
+ * for coprocessor setting. For this reason, we need a way to distinguish
+ * it.
+ */
+#ifndef CONFIG_CPU_PJ4
+#define cpu_is_pj4()   0
+#else
+static inline int cpu_is_pj4(void)
+{
+       unsigned int id;
+
+       id = read_cpuid_id();
+       if ((id & 0xfffffff0) == 0x562f5840)
+               return 1;
+
+       return 0;
+}
+#endif
 #endif
index 73ddd7239b33aa77d178ae1341c0c46c736a08e5..4651f6999b7de9351de6259ab5b184a1898b32c1 100644 (file)
@@ -7,7 +7,7 @@
 #ifndef _ASM_ARM_SYSCALL_H
 #define _ASM_ARM_SYSCALL_H
 
-#include <linux/audit.h> /* for AUDIT_ARCH_* */
+#include <uapi/linux/audit.h> /* for AUDIT_ARCH_* */
 #include <linux/elf.h> /* for ELF_EM */
 #include <linux/err.h>
 #include <linux/sched.h>
@@ -103,8 +103,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
        memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-                                  struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
        /* ARM tasks don't change audit architectures on the fly. */
        return AUDIT_ARCH_ARM;
index 90c50d4b43f74089b8a7eae08753c41ec00b4461..5d1286d51154cdc09d269ee292a0541cde367a5b 100644 (file)
@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
        if (!csize)
                return 0;
 
-       vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+       vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
        if (!vaddr)
                return -ENOMEM;
 
index 39f89fbd5111ee9f0a96d6712e1f0f2ba308147d..1420725142cab1817d26bc3ff7003ae4a4833732 100644 (file)
        movs    pc, lr                          @ return & move spsr_svc into cpsr
        .endm
 
-       .macro  get_thread_info, rd
-       mov     \rd, sp, lsr #13
-       mov     \rd, \rd, lsl #13
-       .endm
-
        @
        @ 32-bit wide "mov pc, reg"
        @
        .endm
 #endif /* ifdef CONFIG_CPU_V7M / else */
 
-       .macro  get_thread_info, rd
-       mov     \rd, sp
-       lsr     \rd, \rd, #13
-       mov     \rd, \rd, lsl #13
-       .endm
-
        @
        @ 32-bit wide "mov pc, reg"
        @
index c311ed94ff1cbfbfca554345e2603c687021bd0e..0bf5d64eba1d269f1f3c36ec1184e33320da6219 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/kernel.h>
 #include <linux/kprobes.h>
+#include <asm/opcodes.h>
 
 #include "kprobes.h"
 
@@ -153,7 +154,8 @@ kprobe_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi,
 
        if (handler) {
                /* We can emulate the instruction in (possibly) modified form */
-               asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
+               asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) |
+                                                  (rn << 16) | reglist);
                asi->insn_handler = handler;
                return INSN_GOOD;
        }
index 87839de77e5f6a0237745cdbc9dd4e57a36fac33..9db4b659d03ef7eb94ae962fd7af82ae5f4622c5 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <asm/system_info.h>
+#include <asm/opcodes.h>
 
 #include "kprobes-test.h"
 
@@ -159,9 +160,9 @@ void kprobe_arm_test_cases(void)
        TEST_SUPPORTED("cmp     sp, #0x1000");
 
        /* Data-processing with PC as shift*/
-       TEST_UNSUPPORTED(".word 0xe15c0f1e      @ cmp   r12, r14, asl pc")
-       TEST_UNSUPPORTED(".word 0xe1a0cf1e      @ mov   r12, r14, asl pc")
-       TEST_UNSUPPORTED(".word 0xe08caf1e      @ add   r10, r12, r14, asl pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) "       @ cmp   r12, r14, asl pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) "       @ mov   r12, r14, asl pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) "       @ add   r10, r12, r14, asl pc")
 
        /* Data-processing with PC as shift*/
        TEST_UNSUPPORTED("movs  pc, r1")
@@ -203,7 +204,7 @@ void kprobe_arm_test_cases(void)
        TEST("mrs       r0, cpsr")
        TEST("mrspl     r7, cpsr")
        TEST("mrs       r14, cpsr")
-       TEST_UNSUPPORTED(".word 0xe10ff000      @ mrs r15, cpsr")
+       TEST_UNSUPPORTED(__inst_arm(0xe10ff000) "       @ mrs r15, cpsr")
        TEST_UNSUPPORTED("mrs   r0, spsr")
        TEST_UNSUPPORTED("mrs   lr, spsr")
 
@@ -219,8 +220,8 @@ void kprobe_arm_test_cases(void)
        TEST_R("clzeq   r7, r",14,0x1,"")
        TEST_R("clz     lr, r",7, 0xffffffff,"")
        TEST(  "clz     r4, sp")
-       TEST_UNSUPPORTED(".word 0x016fff10      @ clz pc, r0")
-       TEST_UNSUPPORTED(".word 0x016f0f1f      @ clz r0, pc")
+       TEST_UNSUPPORTED(__inst_arm(0x016fff10) "       @ clz pc, r0")
+       TEST_UNSUPPORTED(__inst_arm(0x016f0f1f) "       @ clz r0, pc")
 
 #if __LINUX_ARM_ARCH__ >= 6
        TEST_UNSUPPORTED("bxj   r0")
@@ -229,7 +230,7 @@ void kprobe_arm_test_cases(void)
        TEST_BF_R("blx  r",0,2f,"")
        TEST_BB_R("blx  r",7,2f,"")
        TEST_BF_R("blxeq        r",14,2f,"")
-       TEST_UNSUPPORTED(".word 0x0120003f      @ blx pc")
+       TEST_UNSUPPORTED(__inst_arm(0x0120003f) "       @ blx pc")
 
        TEST_RR(   "qadd        r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(   "qaddvs      lr, r",9, VAL2,", r",8, VAL1,"")
@@ -243,190 +244,190 @@ void kprobe_arm_test_cases(void)
        TEST_RR(   "qdsub       r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(   "qdsubvs     lr, r",9, VAL2,", r",8, VAL1,"")
        TEST_R(    "qdsub       lr, r",9, VAL2,", r13")
-       TEST_UNSUPPORTED(".word 0xe101f050      @ qadd pc, r0, r1")
-       TEST_UNSUPPORTED(".word 0xe121f050      @ qsub pc, r0, r1")
-       TEST_UNSUPPORTED(".word 0xe141f050      @ qdadd pc, r0, r1")
-       TEST_UNSUPPORTED(".word 0xe161f050      @ qdsub pc, r0, r1")
-       TEST_UNSUPPORTED(".word 0xe16f2050      @ qdsub r2, r0, pc")
-       TEST_UNSUPPORTED(".word 0xe161205f      @ qdsub r2, pc, r1")
+       TEST_UNSUPPORTED(__inst_arm(0xe101f050) "       @ qadd pc, r0, r1")
+       TEST_UNSUPPORTED(__inst_arm(0xe121f050) "       @ qsub pc, r0, r1")
+       TEST_UNSUPPORTED(__inst_arm(0xe141f050) "       @ qdadd pc, r0, r1")
+       TEST_UNSUPPORTED(__inst_arm(0xe161f050) "       @ qdsub pc, r0, r1")
+       TEST_UNSUPPORTED(__inst_arm(0xe16f2050) "       @ qdsub r2, r0, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe161205f) "       @ qdsub r2, pc, r1")
 
        TEST_UNSUPPORTED("bkpt  0xffff")
        TEST_UNSUPPORTED("bkpt  0x0000")
 
-       TEST_UNSUPPORTED(".word 0xe1600070 @ smc #0")
+       TEST_UNSUPPORTED(__inst_arm(0xe1600070) " @ smc #0")
 
        TEST_GROUP("Halfword multiply and multiply-accumulate")
 
        TEST_RRR(    "smlabb    r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(    "smlabbge  r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
        TEST_RR(     "smlabb    lr, r",1, VAL2,", r",2, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe10f3281 @ smlabb pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe10f3281) " @ smlabb pc, r1, r2, r3")
        TEST_RRR(    "smlatb    r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(    "smlatbge  r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
        TEST_RR(     "smlatb    lr, r",1, VAL2,", r",2, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe10f32a1 @ smlatb pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe10f32a1) " @ smlatb pc, r1, r2, r3")
        TEST_RRR(    "smlabt    r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(    "smlabtge  r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
        TEST_RR(     "smlabt    lr, r",1, VAL2,", r",2, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe10f32c1 @ smlabt pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe10f32c1) " @ smlabt pc, r1, r2, r3")
        TEST_RRR(    "smlatt    r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(    "smlattge  r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
        TEST_RR(     "smlatt    lr, r",1, VAL2,", r",2, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe10f32e1 @ smlatt pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe10f32e1) " @ smlatt pc, r1, r2, r3")
 
        TEST_RRR(    "smlawb    r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(    "smlawbge  r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
        TEST_RR(     "smlawb    lr, r",1, VAL2,", r",2, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe12f3281 @ smlawb pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe12f3281) " @ smlawb pc, r1, r2, r3")
        TEST_RRR(    "smlawt    r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(    "smlawtge  r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
        TEST_RR(     "smlawt    lr, r",1, VAL2,", r",2, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe12f32c1 @ smlawt pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe12032cf @ smlawt r0, pc, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe1203fc1 @ smlawt r0, r1, pc, r3")
-       TEST_UNSUPPORTED(".word 0xe120f2c1 @ smlawt r0, r1, r2, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe12f32c1) " @ smlawt pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe12032cf) " @ smlawt r0, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe1203fc1) " @ smlawt r0, r1, pc, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe120f2c1) " @ smlawt r0, r1, r2, pc")
 
        TEST_RR(    "smulwb     r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(    "smulwbge   r7, r",8, VAL3,", r",9, VAL1,"")
        TEST_R(     "smulwb     lr, r",1, VAL2,", r13")
-       TEST_UNSUPPORTED(".word 0xe12f02a1 @ smulwb pc, r1, r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe12f02a1) " @ smulwb pc, r1, r2")
        TEST_RR(    "smulwt     r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(    "smulwtge   r7, r",8, VAL3,", r",9, VAL1,"")
        TEST_R(     "smulwt     lr, r",1, VAL2,", r13")
-       TEST_UNSUPPORTED(".word 0xe12f02e1 @ smulwt pc, r1, r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe12f02e1) " @ smulwt pc, r1, r2")
 
        TEST_RRRR(  "smlalbb    r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
        TEST_RRRR(  "smlalbble  r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
        TEST_RRR(   "smlalbb    r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-       TEST_UNSUPPORTED(".word 0xe14f1382 @ smlalbb pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe141f382 @ smlalbb r1, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe14f1382) " @ smlalbb pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe141f382) " @ smlalbb r1, pc, r2, r3")
        TEST_RRRR(  "smlaltb    r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
        TEST_RRRR(  "smlaltble  r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
        TEST_RRR(   "smlaltb    r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-       TEST_UNSUPPORTED(".word 0xe14f13a2 @ smlaltb pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe141f3a2 @ smlaltb r1, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe14f13a2) " @ smlaltb pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe141f3a2) " @ smlaltb r1, pc, r2, r3")
        TEST_RRRR(  "smlalbt    r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
        TEST_RRRR(  "smlalbtle  r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
        TEST_RRR(   "smlalbt    r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-       TEST_UNSUPPORTED(".word 0xe14f13c2 @ smlalbt pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe141f3c2 @ smlalbt r1, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe14f13c2) " @ smlalbt pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe141f3c2) " @ smlalbt r1, pc, r2, r3")
        TEST_RRRR(  "smlaltt    r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
        TEST_RRRR(  "smlalttle  r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
        TEST_RRR(   "smlaltt    r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-       TEST_UNSUPPORTED(".word 0xe14f13e2 @ smlalbb pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe140f3e2 @ smlalbb r0, pc, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe14013ef @ smlalbb r0, r1, pc, r3")
-       TEST_UNSUPPORTED(".word 0xe1401fe2 @ smlalbb r0, r1, r2, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe14f13e2) " @ smlalbb pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe140f3e2) " @ smlalbb r0, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe14013ef) " @ smlalbb r0, r1, pc, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe1401fe2) " @ smlalbb r0, r1, r2, pc")
 
        TEST_RR(    "smulbb     r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(    "smulbbge   r7, r",8, VAL3,", r",9, VAL1,"")
        TEST_R(     "smulbb     lr, r",1, VAL2,", r13")
-       TEST_UNSUPPORTED(".word 0xe16f0281 @ smulbb pc, r1, r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe16f0281) " @ smulbb pc, r1, r2")
        TEST_RR(    "smultb     r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(    "smultbge   r7, r",8, VAL3,", r",9, VAL1,"")
        TEST_R(     "smultb     lr, r",1, VAL2,", r13")
-       TEST_UNSUPPORTED(".word 0xe16f02a1 @ smultb pc, r1, r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe16f02a1) " @ smultb pc, r1, r2")
        TEST_RR(    "smulbt     r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(    "smulbtge   r7, r",8, VAL3,", r",9, VAL1,"")
        TEST_R(     "smulbt     lr, r",1, VAL2,", r13")
-       TEST_UNSUPPORTED(".word 0xe16f02c1 @ smultb pc, r1, r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe16f02c1) " @ smultb pc, r1, r2")
        TEST_RR(    "smultt     r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(    "smulttge   r7, r",8, VAL3,", r",9, VAL1,"")
        TEST_R(     "smultt     lr, r",1, VAL2,", r13")
-       TEST_UNSUPPORTED(".word 0xe16f02e1 @ smultt pc, r1, r2")
-       TEST_UNSUPPORTED(".word 0xe16002ef @ smultt r0, pc, r2")
-       TEST_UNSUPPORTED(".word 0xe1600fe1 @ smultt r0, r1, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc")
 
        TEST_GROUP("Multiply and multiply-accumulate")
 
        TEST_RR(    "mul        r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(    "mulls      r7, r",8, VAL2,", r",9, VAL2,"")
        TEST_R(     "mul        lr, r",4, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe00f0291 @ mul pc, r1, r2")
-       TEST_UNSUPPORTED(".word 0xe000029f @ mul r0, pc, r2")
-       TEST_UNSUPPORTED(".word 0xe0000f91 @ mul r0, r1, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe00f0291) " @ mul pc, r1, r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe000029f) " @ mul r0, pc, r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe0000f91) " @ mul r0, r1, pc")
        TEST_RR(    "muls       r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(    "mullss     r7, r",8, VAL2,", r",9, VAL2,"")
        TEST_R(     "muls       lr, r",4, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe01f0291 @ muls pc, r1, r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe01f0291) " @ muls pc, r1, r2")
 
        TEST_RRR(    "mla       r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(    "mlahi     r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
        TEST_RR(     "mla       lr, r",1, VAL2,", r",2, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe02f3291 @ mla pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe02f3291) " @ mla pc, r1, r2, r3")
        TEST_RRR(    "mlas      r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(    "mlahis    r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
        TEST_RR(     "mlas      lr, r",1, VAL2,", r",2, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe03f3291 @ mlas pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe03f3291) " @ mlas pc, r1, r2, r3")
 
 #if __LINUX_ARM_ARCH__ >= 6
        TEST_RR(  "umaal        r0, r1, r",2, VAL1,", r",3, VAL2,"")
        TEST_RR(  "umaalls      r7, r8, r",9, VAL2,", r",10, VAL1,"")
        TEST_R(   "umaal        lr, r12, r",11,VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe041f392 @ umaal pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe0500090 @ undef")
-       TEST_UNSUPPORTED(".word 0xe05fff9f @ undef")
+       TEST_UNSUPPORTED(__inst_arm(0xe041f392) " @ umaal pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe04f0392) " @ umaal r0, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0500090) " @ undef")
+       TEST_UNSUPPORTED(__inst_arm(0xe05fff9f) " @ undef")
 #endif
 
 #if __LINUX_ARM_ARCH__ >= 7
        TEST_RRR(  "mls         r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(  "mlshi       r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
        TEST_RR(   "mls         lr, r",1, VAL2,", r",2, VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe06f3291 @ mls pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe060329f @ mls r0, pc, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe0603f91 @ mls r0, r1, pc, r3")
-       TEST_UNSUPPORTED(".word 0xe060f291 @ mls r0, r1, r2, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe06f3291) " @ mls pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe060329f) " @ mls r0, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0603f91) " @ mls r0, r1, pc, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe060f291) " @ mls r0, r1, r2, pc")
 #endif
 
-       TEST_UNSUPPORTED(".word 0xe0700090 @ undef")
-       TEST_UNSUPPORTED(".word 0xe07fff9f @ undef")
+       TEST_UNSUPPORTED(__inst_arm(0xe0700090) " @ undef")
+       TEST_UNSUPPORTED(__inst_arm(0xe07fff9f) " @ undef")
 
        TEST_RR(  "umull        r0, r1, r",2, VAL1,", r",3, VAL2,"")
        TEST_RR(  "umullls      r7, r8, r",9, VAL2,", r",10, VAL1,"")
        TEST_R(   "umull        lr, r12, r",11,VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe081f392 @ umull pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe08f1392 @ umull r1, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe081f392) " @ umull pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe08f1392) " @ umull r1, pc, r2, r3")
        TEST_RR(  "umulls       r0, r1, r",2, VAL1,", r",3, VAL2,"")
        TEST_RR(  "umulllss     r7, r8, r",9, VAL2,", r",10, VAL1,"")
        TEST_R(   "umulls       lr, r12, r",11,VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe091f392 @ umulls pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe09f1392 @ umulls r1, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe091f392) " @ umulls pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe09f1392) " @ umulls r1, pc, r2, r3")
 
        TEST_RRRR(  "umlal      r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
        TEST_RRRR(  "umlalle    r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
        TEST_RRR(   "umlal      r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-       TEST_UNSUPPORTED(".word 0xe0af1392 @ umlal pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe0a1f392 @ umlal r1, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0af1392) " @ umlal pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0a1f392) " @ umlal r1, pc, r2, r3")
        TEST_RRRR(  "umlals     r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
        TEST_RRRR(  "umlalles   r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
        TEST_RRR(   "umlals     r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-       TEST_UNSUPPORTED(".word 0xe0bf1392 @ umlals pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe0b1f392 @ umlals r1, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0bf1392) " @ umlals pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0b1f392) " @ umlals r1, pc, r2, r3")
 
        TEST_RR(  "smull        r0, r1, r",2, VAL1,", r",3, VAL2,"")
        TEST_RR(  "smullls      r7, r8, r",9, VAL2,", r",10, VAL1,"")
        TEST_R(   "smull        lr, r12, r",11,VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe0c1f392 @ smull pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe0cf1392 @ smull r1, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0c1f392) " @ smull pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0cf1392) " @ smull r1, pc, r2, r3")
        TEST_RR(  "smulls       r0, r1, r",2, VAL1,", r",3, VAL2,"")
        TEST_RR(  "smulllss     r7, r8, r",9, VAL2,", r",10, VAL1,"")
        TEST_R(   "smulls       lr, r12, r",11,VAL3,", r13")
-       TEST_UNSUPPORTED(".word 0xe0d1f392 @ smulls pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe0df1392 @ smulls r1, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0d1f392) " @ smulls pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0df1392) " @ smulls r1, pc, r2, r3")
 
        TEST_RRRR(  "smlal      r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
        TEST_RRRR(  "smlalle    r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
        TEST_RRR(   "smlal      r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-       TEST_UNSUPPORTED(".word 0xe0ef1392 @ smlal pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe0e1f392 @ smlal r1, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0ef1392) " @ smlal pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0e1f392) " @ smlal r1, pc, r2, r3")
        TEST_RRRR(  "smlals     r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
        TEST_RRRR(  "smlalles   r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
        TEST_RRR(   "smlals     r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
-       TEST_UNSUPPORTED(".word 0xe0ff1392 @ smlals pc, r1, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe0f0f392 @ smlals r0, pc, r2, r3")
-       TEST_UNSUPPORTED(".word 0xe0f0139f @ smlals r0, r1, pc, r3")
-       TEST_UNSUPPORTED(".word 0xe0f01f92 @ smlals r0, r1, r2, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe0ff1392) " @ smlals pc, r1, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0f0f392) " @ smlals r0, pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0f0139f) " @ smlals r0, r1, pc, r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe0f01f92) " @ smlals r0, r1, r2, pc")
 
        TEST_GROUP("Synchronization primitives")
 
@@ -435,28 +436,28 @@ void kprobe_arm_test_cases(void)
        TEST_R( "swpvs  r0, r",1,VAL1,", [sp]")
        TEST_RP("swp    sp, r",14,VAL2,", [r",12,13*4,"]")
 #else
-       TEST_UNSUPPORTED(".word 0xe108e097 @ swp        lr, r7, [r8]")
-       TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs      r0, r1, [sp]")
-       TEST_UNSUPPORTED(".word 0xe10cd09e @ swp        sp, r14 [r12]")
+       TEST_UNSUPPORTED(__inst_arm(0xe108e097) " @ swp lr, r7, [r8]")
+       TEST_UNSUPPORTED(__inst_arm(0x610d0091) " @ swpvs       r0, r1, [sp]")
+       TEST_UNSUPPORTED(__inst_arm(0xe10cd09e) " @ swp sp, r14 [r12]")
 #endif
-       TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
-       TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
-       TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
+       TEST_UNSUPPORTED(__inst_arm(0xe102f091) " @ swp pc, r1, [r2]")
+       TEST_UNSUPPORTED(__inst_arm(0xe102009f) " @ swp r0, pc, [r2]")
+       TEST_UNSUPPORTED(__inst_arm(0xe10f0091) " @ swp r0, r1, [pc]")
 #if __LINUX_ARM_ARCH__ < 6
        TEST_RP("swpb   lr, r",7,VAL2,", [r",8,0,"]")
        TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
 #else
-       TEST_UNSUPPORTED(".word 0xe148e097 @ swpb       lr, r7, [r8]")
-       TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb     r0, r1, [sp]")
+       TEST_UNSUPPORTED(__inst_arm(0xe148e097) " @ swpb        lr, r7, [r8]")
+       TEST_UNSUPPORTED(__inst_arm(0x614d0091) " @ swpvsb      r0, r1, [sp]")
 #endif
-       TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
-
-       TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe1200090") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe1300090") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe1500090") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe1600090") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe1700090") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe142f091) " @ swpb pc, r1, [r2]")
+
+       TEST_UNSUPPORTED(__inst_arm(0xe1100090)) /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe1200090)) /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe1300090)) /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe1500090)) /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe1600090)) /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe1700090)) /* Unallocated space */
 #if __LINUX_ARM_ARCH__ >= 6
        TEST_UNSUPPORTED("ldrex r2, [sp]")
 #endif
@@ -476,9 +477,9 @@ void kprobe_arm_test_cases(void)
        TEST_RPR(  "strneh      r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
        TEST_RPR(  "strh        r",2, VAL1,", [r",3, 24,"], r",4, 48,"")
        TEST_RPR(  "strh        r",10,VAL2,", [r",9, 48,"], -r",11,24,"")
-       TEST_UNSUPPORTED(".word 0xe1afc0ba      @ strh r12, [pc, r10]!")
-       TEST_UNSUPPORTED(".word 0xe089f0bb      @ strh pc, [r9], r11")
-       TEST_UNSUPPORTED(".word 0xe089a0bf      @ strh r10, [r9], pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe1afc0ba) "       @ strh r12, [pc, r10]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe089f0bb) "       @ strh pc, [r9], r11")
+       TEST_UNSUPPORTED(__inst_arm(0xe089a0bf) "       @ strh r10, [r9], pc")
 
        TEST_PR(   "ldrh        r0, [r",0,  48,", -r",2, 24,"]")
        TEST_PR(   "ldrcsh      r14, [r",13,0, ", r",12, 48,"]")
@@ -486,9 +487,9 @@ void kprobe_arm_test_cases(void)
        TEST_PR(   "ldrcch      r12, [r",11,48,", -r",10,24,"]!")
        TEST_PR(   "ldrh        r2, [r",3,  24,"], r",4, 48,"")
        TEST_PR(   "ldrh        r10, [r",9, 48,"], -r",11,24,"")
-       TEST_UNSUPPORTED(".word 0xe1bfc0ba      @ ldrh r12, [pc, r10]!")
-       TEST_UNSUPPORTED(".word 0xe099f0bb      @ ldrh pc, [r9], r11")
-       TEST_UNSUPPORTED(".word 0xe099a0bf      @ ldrh r10, [r9], pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe1bfc0ba) "       @ ldrh r12, [pc, r10]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe099f0bb) "       @ ldrh pc, [r9], r11")
+       TEST_UNSUPPORTED(__inst_arm(0xe099a0bf) "       @ ldrh r10, [r9], pc")
 
        TEST_RP(   "strh        r",0, VAL1,", [r",1, 24,", #-2]")
        TEST_RP(   "strmih      r",14,VAL2,", [r",13,0, ", #2]")
@@ -496,8 +497,8 @@ void kprobe_arm_test_cases(void)
        TEST_RP(   "strplh      r",12,VAL2,", [r",11,24,", #-4]!")
        TEST_RP(   "strh        r",2, VAL1,", [r",3, 24,"], #48")
        TEST_RP(   "strh        r",10,VAL2,", [r",9, 64,"], #-48")
-       TEST_UNSUPPORTED(".word 0xe1efc3b0      @ strh r12, [pc, #48]!")
-       TEST_UNSUPPORTED(".word 0xe0c9f3b0      @ strh pc, [r9], #48")
+       TEST_UNSUPPORTED(__inst_arm(0xe1efc3b0) "       @ strh r12, [pc, #48]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe0c9f3b0) "       @ strh pc, [r9], #48")
 
        TEST_P(    "ldrh        r0, [r",0,  24,", #-2]")
        TEST_P(    "ldrvsh      r14, [r",13,0, ", #2]")
@@ -506,8 +507,8 @@ void kprobe_arm_test_cases(void)
        TEST_P(    "ldrh        r2, [r",3,  24,"], #48")
        TEST_P(    "ldrh        r10, [r",9, 64,"], #-48")
        TEST(      "ldrh        r0, [pc, #0]")
-       TEST_UNSUPPORTED(".word 0xe1ffc3b0      @ ldrh r12, [pc, #48]!")
-       TEST_UNSUPPORTED(".word 0xe0d9f3b0      @ ldrh pc, [r9], #48")
+       TEST_UNSUPPORTED(__inst_arm(0xe1ffc3b0) "       @ ldrh r12, [pc, #48]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe0d9f3b0) "       @ ldrh pc, [r9], #48")
 
        TEST_PR(   "ldrsb       r0, [r",0,  48,", -r",2, 24,"]")
        TEST_PR(   "ldrhisb     r14, [r",13,0,", r",12,  48,"]")
@@ -515,8 +516,8 @@ void kprobe_arm_test_cases(void)
        TEST_PR(   "ldrlssb     r12, [r",11,48,", -r",10,24,"]!")
        TEST_PR(   "ldrsb       r2, [r",3,  24,"], r",4, 48,"")
        TEST_PR(   "ldrsb       r10, [r",9, 48,"], -r",11,24,"")
-       TEST_UNSUPPORTED(".word 0xe1bfc0da      @ ldrsb r12, [pc, r10]!")
-       TEST_UNSUPPORTED(".word 0xe099f0db      @ ldrsb pc, [r9], r11")
+       TEST_UNSUPPORTED(__inst_arm(0xe1bfc0da) "       @ ldrsb r12, [pc, r10]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe099f0db) "       @ ldrsb pc, [r9], r11")
 
        TEST_P(    "ldrsb       r0, [r",0,  24,", #-1]")
        TEST_P(    "ldrgesb     r14, [r",13,0, ", #1]")
@@ -525,8 +526,8 @@ void kprobe_arm_test_cases(void)
        TEST_P(    "ldrsb       r2, [r",3,  24,"], #48")
        TEST_P(    "ldrsb       r10, [r",9, 64,"], #-48")
        TEST(      "ldrsb       r0, [pc, #0]")
-       TEST_UNSUPPORTED(".word 0xe1ffc3d0      @ ldrsb r12, [pc, #48]!")
-       TEST_UNSUPPORTED(".word 0xe0d9f3d0      @ ldrsb pc, [r9], #48")
+       TEST_UNSUPPORTED(__inst_arm(0xe1ffc3d0) "       @ ldrsb r12, [pc, #48]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe0d9f3d0) "       @ ldrsb pc, [r9], #48")
 
        TEST_PR(   "ldrsh       r0, [r",0,  48,", -r",2, 24,"]")
        TEST_PR(   "ldrgtsh     r14, [r",13,0, ", r",12, 48,"]")
@@ -534,8 +535,8 @@ void kprobe_arm_test_cases(void)
        TEST_PR(   "ldrlesh     r12, [r",11,48,", -r",10,24,"]!")
        TEST_PR(   "ldrsh       r2, [r",3,  24,"], r",4, 48,"")
        TEST_PR(   "ldrsh       r10, [r",9, 48,"], -r",11,24,"")
-       TEST_UNSUPPORTED(".word 0xe1bfc0fa      @ ldrsh r12, [pc, r10]!")
-       TEST_UNSUPPORTED(".word 0xe099f0fb      @ ldrsh pc, [r9], r11")
+       TEST_UNSUPPORTED(__inst_arm(0xe1bfc0fa) "       @ ldrsh r12, [pc, r10]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe099f0fb) "       @ ldrsh pc, [r9], r11")
 
        TEST_P(    "ldrsh       r0, [r",0,  24,", #-1]")
        TEST_P(    "ldreqsh     r14, [r",13,0 ,", #1]")
@@ -544,8 +545,8 @@ void kprobe_arm_test_cases(void)
        TEST_P(    "ldrsh       r2, [r",3,  24,"], #48")
        TEST_P(    "ldrsh       r10, [r",9, 64,"], #-48")
        TEST(      "ldrsh       r0, [pc, #0]")
-       TEST_UNSUPPORTED(".word 0xe1ffc3f0      @ ldrsh r12, [pc, #48]!")
-       TEST_UNSUPPORTED(".word 0xe0d9f3f0      @ ldrsh pc, [r9], #48")
+       TEST_UNSUPPORTED(__inst_arm(0xe1ffc3f0) "       @ ldrsh r12, [pc, #48]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe0d9f3f0) "       @ ldrsh pc, [r9], #48")
 
 #if __LINUX_ARM_ARCH__ >= 7
        TEST_UNSUPPORTED("strht r1, [r2], r3")
@@ -564,7 +565,7 @@ void kprobe_arm_test_cases(void)
        TEST_RPR(  "strcsd      r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
        TEST_RPR(  "strd        r",2, VAL1,", [r",5, 24,"], r",4,48,"")
        TEST_RPR(  "strd        r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
-       TEST_UNSUPPORTED(".word 0xe1afc0fa      @ strd r12, [pc, r10]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe1afc0fa) "       @ strd r12, [pc, r10]!")
 
        TEST_PR(   "ldrd        r0, [r",0, 48,", -r",2,24,"]")
        TEST_PR(   "ldrmid      r8, [r",13,0, ", r",12,48,"]")
@@ -572,10 +573,10 @@ void kprobe_arm_test_cases(void)
        TEST_PR(   "ldrpld      r6, [r",11,48,", -r",10,24,"]!")
        TEST_PR(   "ldrd        r2, [r",5, 24,"], r",4,48,"")
        TEST_PR(   "ldrd        r10, [r",9,48,"], -r",7,24,"")
-       TEST_UNSUPPORTED(".word 0xe1afc0da      @ ldrd r12, [pc, r10]!")
-       TEST_UNSUPPORTED(".word 0xe089f0db      @ ldrd pc, [r9], r11")
-       TEST_UNSUPPORTED(".word 0xe089e0db      @ ldrd lr, [r9], r11")
-       TEST_UNSUPPORTED(".word 0xe089c0df      @ ldrd r12, [r9], pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe1afc0da) "       @ ldrd r12, [pc, r10]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe089f0db) "       @ ldrd pc, [r9], r11")
+       TEST_UNSUPPORTED(__inst_arm(0xe089e0db) "       @ ldrd lr, [r9], r11")
+       TEST_UNSUPPORTED(__inst_arm(0xe089c0df) "       @ ldrd r12, [r9], pc")
 
        TEST_RP(   "strd        r",0, VAL1,", [r",1, 24,", #-8]")
        TEST_RP(   "strvsd      r",8, VAL2,", [r",13,0, ", #8]")
@@ -583,7 +584,7 @@ void kprobe_arm_test_cases(void)
        TEST_RP(   "strvcd      r",12,VAL2,", [r",11,24,", #-16]!")
        TEST_RP(   "strd        r",2, VAL1,", [r",4, 24,"], #48")
        TEST_RP(   "strd        r",10,VAL2,", [r",9, 64,"], #-48")
-       TEST_UNSUPPORTED(".word 0xe1efc3f0      @ strd r12, [pc, #48]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe1efc3f0) "       @ strd r12, [pc, #48]!")
 
        TEST_P(    "ldrd        r0, [r",0, 24,", #-8]")
        TEST_P(    "ldrhid      r8, [r",13,0, ", #8]")
@@ -591,9 +592,9 @@ void kprobe_arm_test_cases(void)
        TEST_P(    "ldrlsd      r6, [r",11,24,", #-16]!")
        TEST_P(    "ldrd        r2, [r",5, 24,"], #48")
        TEST_P(    "ldrd        r10, [r",9,6,"], #-48")
-       TEST_UNSUPPORTED(".word 0xe1efc3d0      @ ldrd r12, [pc, #48]!")
-       TEST_UNSUPPORTED(".word 0xe0c9f3d0      @ ldrd pc, [r9], #48")
-       TEST_UNSUPPORTED(".word 0xe0c9e3d0      @ ldrd lr, [r9], #48")
+       TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) "       @ ldrd r12, [pc, #48]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) "       @ ldrd pc, [r9], #48")
+       TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) "       @ ldrd lr, [r9], #48")
 
        TEST_GROUP("Miscellaneous")
 
@@ -601,11 +602,11 @@ void kprobe_arm_test_cases(void)
        TEST("movw      r0, #0")
        TEST("movw      r0, #0xffff")
        TEST("movw      lr, #0xffff")
-       TEST_UNSUPPORTED(".word 0xe300f000      @ movw pc, #0")
+       TEST_UNSUPPORTED(__inst_arm(0xe300f000) "       @ movw pc, #0")
        TEST_R("movt    r",0, VAL1,", #0")
        TEST_R("movt    r",0, VAL2,", #0xffff")
        TEST_R("movt    r",14,VAL1,", #0xffff")
-       TEST_UNSUPPORTED(".word 0xe340f000      @ movt pc, #0")
+       TEST_UNSUPPORTED(__inst_arm(0xe340f000) "       @ movt pc, #0")
 #endif
 
        TEST_UNSUPPORTED("msr   cpsr, 0x13")
@@ -673,20 +674,20 @@ void kprobe_arm_test_cases(void)
 #ifdef CONFIG_THUMB2_KERNEL
        TEST_ARM_TO_THUMB_INTERWORK_P("ldr      pc, [r",0,0,", #15*4]")
 #endif
-       TEST_UNSUPPORTED(".word 0xe5af6008      @ str r6, [pc, #8]!")
-       TEST_UNSUPPORTED(".word 0xe7af6008      @ str r6, [pc, r8]!")
-       TEST_UNSUPPORTED(".word 0xe5bf6008      @ ldr r6, [pc, #8]!")
-       TEST_UNSUPPORTED(".word 0xe7bf6008      @ ldr r6, [pc, r8]!")
-       TEST_UNSUPPORTED(".word 0xe788600f      @ str r6, [r8, pc]")
-       TEST_UNSUPPORTED(".word 0xe798600f      @ ldr r6, [r8, pc]")
+       TEST_UNSUPPORTED(__inst_arm(0xe5af6008) "       @ str r6, [pc, #8]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe7af6008) "       @ str r6, [pc, r8]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe5bf6008) "       @ ldr r6, [pc, #8]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe7bf6008) "       @ ldr r6, [pc, r8]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe788600f) "       @ str r6, [r8, pc]")
+       TEST_UNSUPPORTED(__inst_arm(0xe798600f) "       @ ldr r6, [r8, pc]")
 
        LOAD_STORE("b")
-       TEST_UNSUPPORTED(".word 0xe5f7f008      @ ldrb pc, [r7, #8]!")
-       TEST_UNSUPPORTED(".word 0xe7f7f008      @ ldrb pc, [r7, r8]!")
-       TEST_UNSUPPORTED(".word 0xe5ef6008      @ strb r6, [pc, #8]!")
-       TEST_UNSUPPORTED(".word 0xe7ef6008      @ strb r6, [pc, r3]!")
-       TEST_UNSUPPORTED(".word 0xe5ff6008      @ ldrb r6, [pc, #8]!")
-       TEST_UNSUPPORTED(".word 0xe7ff6008      @ ldrb r6, [pc, r3]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe5f7f008) "       @ ldrb pc, [r7, #8]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe7f7f008) "       @ ldrb pc, [r7, r8]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe5ef6008) "       @ strb r6, [pc, #8]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe7ef6008) "       @ strb r6, [pc, r3]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe5ff6008) "       @ ldrb r6, [pc, #8]!")
+       TEST_UNSUPPORTED(__inst_arm(0xe7ff6008) "       @ ldrb r6, [pc, r3]!")
 
        TEST_UNSUPPORTED("ldrt  r0, [r1], #4")
        TEST_UNSUPPORTED("ldrt  r1, [r2], r3")
@@ -700,153 +701,153 @@ void kprobe_arm_test_cases(void)
 #if __LINUX_ARM_ARCH__ >= 7
        TEST_GROUP("Parallel addition and subtraction, signed")
 
-       TEST_UNSUPPORTED(".word 0xe6000010") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe60fffff") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe6000010) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe60fffff) "") /* Unallocated space */
 
        TEST_RR(    "sadd16     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "sadd16     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe61cff1a      @ sadd16        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe61cff1a) "       @ sadd16        pc, r12, r10")
        TEST_RR(    "sasx       r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "sasx       r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe61cff3a      @ sasx  pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe61cff3a) "       @ sasx  pc, r12, r10")
        TEST_RR(    "ssax       r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "ssax       r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe61cff5a      @ ssax  pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe61cff5a) "       @ ssax  pc, r12, r10")
        TEST_RR(    "ssub16     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "ssub16     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe61cff7a      @ ssub16        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe61cff7a) "       @ ssub16        pc, r12, r10")
        TEST_RR(    "sadd8      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "sadd8      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe61cff9a      @ sadd8 pc, r12, r10")
-       TEST_UNSUPPORTED(".word 0xe61000b0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe61fffbf") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe61000d0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe61fffdf") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe61cff9a) "       @ sadd8 pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe61000b0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe61fffbf) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe61000d0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe61fffdf) "") /* Unallocated space */
        TEST_RR(    "ssub8      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "ssub8      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe61cfffa      @ ssub8 pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe61cfffa) "       @ ssub8 pc, r12, r10")
 
        TEST_RR(    "qadd16     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "qadd16     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe62cff1a      @ qadd16        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe62cff1a) "       @ qadd16        pc, r12, r10")
        TEST_RR(    "qasx       r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "qasx       r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe62cff3a      @ qasx  pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe62cff3a) "       @ qasx  pc, r12, r10")
        TEST_RR(    "qsax       r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "qsax       r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe62cff5a      @ qsax  pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe62cff5a) "       @ qsax  pc, r12, r10")
        TEST_RR(    "qsub16     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "qsub16     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe62cff7a      @ qsub16        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe62cff7a) "       @ qsub16        pc, r12, r10")
        TEST_RR(    "qadd8      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "qadd8      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe62cff9a      @ qadd8 pc, r12, r10")
-       TEST_UNSUPPORTED(".word 0xe62000b0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe62fffbf") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe62000d0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe62fffdf") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe62cff9a) "       @ qadd8 pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe62000b0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe62fffbf) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe62000d0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe62fffdf) "") /* Unallocated space */
        TEST_RR(    "qsub8      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "qsub8      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe62cfffa      @ qsub8 pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe62cfffa) "       @ qsub8 pc, r12, r10")
 
        TEST_RR(    "shadd16    r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "shadd16    r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe63cff1a      @ shadd16       pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe63cff1a) "       @ shadd16       pc, r12, r10")
        TEST_RR(    "shasx      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "shasx      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe63cff3a      @ shasx pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe63cff3a) "       @ shasx pc, r12, r10")
        TEST_RR(    "shsax      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "shsax      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe63cff5a      @ shsax pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe63cff5a) "       @ shsax pc, r12, r10")
        TEST_RR(    "shsub16    r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "shsub16    r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe63cff7a      @ shsub16       pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe63cff7a) "       @ shsub16       pc, r12, r10")
        TEST_RR(    "shadd8     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "shadd8     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe63cff9a      @ shadd8        pc, r12, r10")
-       TEST_UNSUPPORTED(".word 0xe63000b0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe63fffbf") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe63000d0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe63fffdf") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe63cff9a) "       @ shadd8        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe63000b0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe63fffbf) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe63000d0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe63fffdf) "") /* Unallocated space */
        TEST_RR(    "shsub8     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "shsub8     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe63cfffa      @ shsub8        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe63cfffa) "       @ shsub8        pc, r12, r10")
 
        TEST_GROUP("Parallel addition and subtraction, unsigned")
 
-       TEST_UNSUPPORTED(".word 0xe6400010") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe64fffff") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe6400010) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe64fffff) "") /* Unallocated space */
 
        TEST_RR(    "uadd16     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uadd16     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe65cff1a      @ uadd16        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe65cff1a) "       @ uadd16        pc, r12, r10")
        TEST_RR(    "uasx       r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uasx       r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe65cff3a      @ uasx  pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe65cff3a) "       @ uasx  pc, r12, r10")
        TEST_RR(    "usax       r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "usax       r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe65cff5a      @ usax  pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe65cff5a) "       @ usax  pc, r12, r10")
        TEST_RR(    "usub16     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "usub16     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe65cff7a      @ usub16        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe65cff7a) "       @ usub16        pc, r12, r10")
        TEST_RR(    "uadd8      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uadd8      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe65cff9a      @ uadd8 pc, r12, r10")
-       TEST_UNSUPPORTED(".word 0xe65000b0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe65fffbf") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe65000d0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe65fffdf") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe65cff9a) "       @ uadd8 pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe65000b0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe65fffbf) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe65000d0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe65fffdf) "") /* Unallocated space */
        TEST_RR(    "usub8      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "usub8      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe65cfffa      @ usub8 pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe65cfffa) "       @ usub8 pc, r12, r10")
 
        TEST_RR(    "uqadd16    r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uqadd16    r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe66cff1a      @ uqadd16       pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe66cff1a) "       @ uqadd16       pc, r12, r10")
        TEST_RR(    "uqasx      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uqasx      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe66cff3a      @ uqasx pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe66cff3a) "       @ uqasx pc, r12, r10")
        TEST_RR(    "uqsax      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uqsax      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe66cff5a      @ uqsax pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe66cff5a) "       @ uqsax pc, r12, r10")
        TEST_RR(    "uqsub16    r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uqsub16    r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe66cff7a      @ uqsub16       pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe66cff7a) "       @ uqsub16       pc, r12, r10")
        TEST_RR(    "uqadd8     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uqadd8     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe66cff9a      @ uqadd8        pc, r12, r10")
-       TEST_UNSUPPORTED(".word 0xe66000b0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe66fffbf") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe66000d0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe66fffdf") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe66cff9a) "       @ uqadd8        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe66000b0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe66fffbf) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe66000d0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe66fffdf) "") /* Unallocated space */
        TEST_RR(    "uqsub8     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uqsub8     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe66cfffa      @ uqsub8        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe66cfffa) "       @ uqsub8        pc, r12, r10")
 
        TEST_RR(    "uhadd16    r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uhadd16    r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe67cff1a      @ uhadd16       pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe67cff1a) "       @ uhadd16       pc, r12, r10")
        TEST_RR(    "uhasx      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uhasx      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe67cff3a      @ uhasx pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe67cff3a) "       @ uhasx pc, r12, r10")
        TEST_RR(    "uhsax      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uhsax      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe67cff5a      @ uhsax pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe67cff5a) "       @ uhsax pc, r12, r10")
        TEST_RR(    "uhsub16    r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uhsub16    r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe67cff7a      @ uhsub16       pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe67cff7a) "       @ uhsub16       pc, r12, r10")
        TEST_RR(    "uhadd8     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uhadd8     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe67cff9a      @ uhadd8        pc, r12, r10")
-       TEST_UNSUPPORTED(".word 0xe67000b0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe67fffbf") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe67000d0") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe67fffdf") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe67cff9a) "       @ uhadd8        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe67000b0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe67fffbf) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe67000d0) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe67fffdf) "") /* Unallocated space */
        TEST_RR(    "uhsub8     r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uhsub8     r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe67cfffa      @ uhsub8        pc, r12, r10")
-       TEST_UNSUPPORTED(".word 0xe67feffa      @ uhsub8        r14, pc, r10")
-       TEST_UNSUPPORTED(".word 0xe67cefff      @ uhsub8        r14, r12, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe67cfffa) "       @ uhsub8        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe67feffa) "       @ uhsub8        r14, pc, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe67cefff) "       @ uhsub8        r14, r12, pc")
 #endif /* __LINUX_ARM_ARCH__ >= 7 */
 
 #if __LINUX_ARM_ARCH__ >= 6
@@ -854,99 +855,99 @@ void kprobe_arm_test_cases(void)
 
        TEST_RR(    "pkhbt      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "pkhbt      r14,r",12, HH1,", r",10,HH2,", lsl #2")
-       TEST_UNSUPPORTED(".word 0xe68cf11a      @ pkhbt pc, r12, r10, lsl #2")
+       TEST_UNSUPPORTED(__inst_arm(0xe68cf11a) "       @ pkhbt pc, r12, r10, lsl #2")
        TEST_RR(    "pkhtb      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "pkhtb      r14,r",12, HH1,", r",10,HH2,", asr #2")
-       TEST_UNSUPPORTED(".word 0xe68cf15a      @ pkhtb pc, r12, r10, asr #2")
-       TEST_UNSUPPORTED(".word 0xe68fe15a      @ pkhtb r14, pc, r10, asr #2")
-       TEST_UNSUPPORTED(".word 0xe68ce15f      @ pkhtb r14, r12, pc, asr #2")
-       TEST_UNSUPPORTED(".word 0xe6900010") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe69fffdf") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe68cf15a) "       @ pkhtb pc, r12, r10, asr #2")
+       TEST_UNSUPPORTED(__inst_arm(0xe68fe15a) "       @ pkhtb r14, pc, r10, asr #2")
+       TEST_UNSUPPORTED(__inst_arm(0xe68ce15f) "       @ pkhtb r14, r12, pc, asr #2")
+       TEST_UNSUPPORTED(__inst_arm(0xe6900010) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe69fffdf) "") /* Unallocated space */
 
        TEST_R(     "ssat       r0, #24, r",0,   VAL1,"")
        TEST_R(     "ssat       r14, #24, r",12, VAL2,"")
        TEST_R(     "ssat       r0, #24, r",0,   VAL1,", lsl #8")
        TEST_R(     "ssat       r14, #24, r",12, VAL2,", asr #8")
-       TEST_UNSUPPORTED(".word 0xe6b7f01c      @ ssat  pc, #24, r12")
+       TEST_UNSUPPORTED(__inst_arm(0xe6b7f01c) "       @ ssat  pc, #24, r12")
 
        TEST_R(     "usat       r0, #24, r",0,   VAL1,"")
        TEST_R(     "usat       r14, #24, r",12, VAL2,"")
        TEST_R(     "usat       r0, #24, r",0,   VAL1,", lsl #8")
        TEST_R(     "usat       r14, #24, r",12, VAL2,", asr #8")
-       TEST_UNSUPPORTED(".word 0xe6f7f01c      @ usat  pc, #24, r12")
+       TEST_UNSUPPORTED(__inst_arm(0xe6f7f01c) "       @ usat  pc, #24, r12")
 
        TEST_RR(    "sxtab16    r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "sxtab16    r14,r",12, HH2,", r",10,HH1,", ror #8")
        TEST_R(     "sxtb16     r8, r",7,  HH1,"")
-       TEST_UNSUPPORTED(".word 0xe68cf47a      @ sxtab16       pc,r12, r10, ror #8")
+       TEST_UNSUPPORTED(__inst_arm(0xe68cf47a) "       @ sxtab16       pc,r12, r10, ror #8")
 
        TEST_RR(    "sel        r0, r",0,  VAL1,", r",1, VAL2,"")
        TEST_RR(    "sel        r14, r",12,VAL1,", r",10, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe68cffba      @ sel   pc, r12, r10")
-       TEST_UNSUPPORTED(".word 0xe68fefba      @ sel   r14, pc, r10")
-       TEST_UNSUPPORTED(".word 0xe68cefbf      @ sel   r14, r12, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe68cffba) "       @ sel   pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe68fefba) "       @ sel   r14, pc, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe68cefbf) "       @ sel   r14, r12, pc")
 
        TEST_R(     "ssat16     r0, #12, r",0,   HH1,"")
        TEST_R(     "ssat16     r14, #12, r",12, HH2,"")
-       TEST_UNSUPPORTED(".word 0xe6abff3c      @ ssat16        pc, #12, r12")
+       TEST_UNSUPPORTED(__inst_arm(0xe6abff3c) "       @ ssat16        pc, #12, r12")
 
        TEST_RR(    "sxtab      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "sxtab      r14,r",12, HH2,", r",10,HH1,", ror #8")
        TEST_R(     "sxtb       r8, r",7,  HH1,"")
-       TEST_UNSUPPORTED(".word 0xe6acf47a      @ sxtab pc,r12, r10, ror #8")
+       TEST_UNSUPPORTED(__inst_arm(0xe6acf47a) "       @ sxtab pc,r12, r10, ror #8")
 
        TEST_R(     "rev        r0, r",0,   VAL1,"")
        TEST_R(     "rev        r14, r",12, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe6bfff3c      @ rev   pc, r12")
+       TEST_UNSUPPORTED(__inst_arm(0xe6bfff3c) "       @ rev   pc, r12")
 
        TEST_RR(    "sxtah      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "sxtah      r14,r",12, HH2,", r",10,HH1,", ror #8")
        TEST_R(     "sxth       r8, r",7,  HH1,"")
-       TEST_UNSUPPORTED(".word 0xe6bcf47a      @ sxtah pc,r12, r10, ror #8")
+       TEST_UNSUPPORTED(__inst_arm(0xe6bcf47a) "       @ sxtah pc,r12, r10, ror #8")
 
        TEST_R(     "rev16      r0, r",0,   VAL1,"")
        TEST_R(     "rev16      r14, r",12, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe6bfffbc      @ rev16 pc, r12")
+       TEST_UNSUPPORTED(__inst_arm(0xe6bfffbc) "       @ rev16 pc, r12")
 
        TEST_RR(    "uxtab16    r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uxtab16    r14,r",12, HH2,", r",10,HH1,", ror #8")
        TEST_R(     "uxtb16     r8, r",7,  HH1,"")
-       TEST_UNSUPPORTED(".word 0xe6ccf47a      @ uxtab16       pc,r12, r10, ror #8")
+       TEST_UNSUPPORTED(__inst_arm(0xe6ccf47a) "       @ uxtab16       pc,r12, r10, ror #8")
 
        TEST_R(     "usat16     r0, #12, r",0,   HH1,"")
        TEST_R(     "usat16     r14, #12, r",12, HH2,"")
-       TEST_UNSUPPORTED(".word 0xe6ecff3c      @ usat16        pc, #12, r12")
-       TEST_UNSUPPORTED(".word 0xe6ecef3f      @ usat16        r14, #12, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe6ecff3c) "       @ usat16        pc, #12, r12")
+       TEST_UNSUPPORTED(__inst_arm(0xe6ecef3f) "       @ usat16        r14, #12, pc")
 
        TEST_RR(    "uxtab      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uxtab      r14,r",12, HH2,", r",10,HH1,", ror #8")
        TEST_R(     "uxtb       r8, r",7,  HH1,"")
-       TEST_UNSUPPORTED(".word 0xe6ecf47a      @ uxtab pc,r12, r10, ror #8")
+       TEST_UNSUPPORTED(__inst_arm(0xe6ecf47a) "       @ uxtab pc,r12, r10, ror #8")
 
 #if __LINUX_ARM_ARCH__ >= 7
        TEST_R(     "rbit       r0, r",0,   VAL1,"")
        TEST_R(     "rbit       r14, r",12, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe6ffff3c      @ rbit  pc, r12")
+       TEST_UNSUPPORTED(__inst_arm(0xe6ffff3c) "       @ rbit  pc, r12")
 #endif
 
        TEST_RR(    "uxtah      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uxtah      r14,r",12, HH2,", r",10,HH1,", ror #8")
        TEST_R(     "uxth       r8, r",7,  HH1,"")
-       TEST_UNSUPPORTED(".word 0xe6fff077      @ uxth  pc, r7")
-       TEST_UNSUPPORTED(".word 0xe6ff807f      @ uxth  r8, pc")
-       TEST_UNSUPPORTED(".word 0xe6fcf47a      @ uxtah pc, r12, r10, ror #8")
-       TEST_UNSUPPORTED(".word 0xe6fce47f      @ uxtah r14, r12, pc, ror #8")
+       TEST_UNSUPPORTED(__inst_arm(0xe6fff077) "       @ uxth  pc, r7")
+       TEST_UNSUPPORTED(__inst_arm(0xe6ff807f) "       @ uxth  r8, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe6fcf47a) "       @ uxtah pc, r12, r10, ror #8")
+       TEST_UNSUPPORTED(__inst_arm(0xe6fce47f) "       @ uxtah r14, r12, pc, ror #8")
 
        TEST_R(     "revsh      r0, r",0,   VAL1,"")
        TEST_R(     "revsh      r14, r",12, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe6ffff3c      @ revsh pc, r12")
-       TEST_UNSUPPORTED(".word 0xe6ffef3f      @ revsh r14, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe6ffff3c) "       @ revsh pc, r12")
+       TEST_UNSUPPORTED(__inst_arm(0xe6ffef3f) "       @ revsh r14, pc")
 
-       TEST_UNSUPPORTED(".word 0xe6900070") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe69fff7f") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe6900070) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe69fff7f) "") /* Unallocated space */
 
-       TEST_UNSUPPORTED(".word 0xe6d00070") /* Unallocated space */
-       TEST_UNSUPPORTED(".word 0xe6dfff7f") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe6d00070) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_arm(0xe6dfff7f) "") /* Unallocated space */
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
 #if __LINUX_ARM_ARCH__ >= 6
@@ -954,79 +955,79 @@ void kprobe_arm_test_cases(void)
 
        TEST_RRR(   "smlad      r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
        TEST_RRR(   "smlad      r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe70f8a1c      @ smlad pc, r12, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe70f8a1c) "       @ smlad pc, r12, r10, r8")
        TEST_RRR(   "smladx     r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
        TEST_RRR(   "smladx     r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe70f8a3c      @ smladx        pc, r12, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe70f8a3c) "       @ smladx        pc, r12, r10, r8")
 
        TEST_RR(   "smuad       r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(   "smuad       r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe70ffa1c      @ smuad pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe70ffa1c) "       @ smuad pc, r12, r10")
        TEST_RR(   "smuadx      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(   "smuadx      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe70ffa3c      @ smuadx        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe70ffa3c) "       @ smuadx        pc, r12, r10")
 
        TEST_RRR(   "smlsd      r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
        TEST_RRR(   "smlsd      r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe70f8a5c      @ smlsd pc, r12, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe70f8a5c) "       @ smlsd pc, r12, r10, r8")
        TEST_RRR(   "smlsdx     r0, r",0,  HH1,", r",1, HH2,", r",2, VAL1,"")
        TEST_RRR(   "smlsdx     r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe70f8a7c      @ smlsdx        pc, r12, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe70f8a7c) "       @ smlsdx        pc, r12, r10, r8")
 
        TEST_RR(   "smusd       r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(   "smusd       r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe70ffa5c      @ smusd pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe70ffa5c) "       @ smusd pc, r12, r10")
        TEST_RR(   "smusdx      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(   "smusdx      r14, r",12,HH2,", r",10,HH1,"")
-       TEST_UNSUPPORTED(".word 0xe70ffa7c      @ smusdx        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe70ffa7c) "       @ smusdx        pc, r12, r10")
 
        TEST_RRRR( "smlald      r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
        TEST_RRRR( "smlald      r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
-       TEST_UNSUPPORTED(".word 0xe74af819      @ smlald        pc, r10, r9, r8")
-       TEST_UNSUPPORTED(".word 0xe74fb819      @ smlald        r11, pc, r9, r8")
-       TEST_UNSUPPORTED(".word 0xe74ab81f      @ smlald        r11, r10, pc, r8")
-       TEST_UNSUPPORTED(".word 0xe74abf19      @ smlald        r11, r10, r9, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe74af819) "       @ smlald        pc, r10, r9, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe74fb819) "       @ smlald        r11, pc, r9, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe74ab81f) "       @ smlald        r11, r10, pc, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe74abf19) "       @ smlald        r11, r10, r9, pc")
 
        TEST_RRRR( "smlaldx     r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
        TEST_RRRR( "smlaldx     r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
-       TEST_UNSUPPORTED(".word 0xe74af839      @ smlaldx       pc, r10, r9, r8")
-       TEST_UNSUPPORTED(".word 0xe74fb839      @ smlaldx       r11, pc, r9, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe74af839) "       @ smlaldx       pc, r10, r9, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe74fb839) "       @ smlaldx       r11, pc, r9, r8")
 
        TEST_RRR(  "smmla       r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
        TEST_RRR(  "smmla       r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe75f8a1c      @ smmla pc, r12, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe75f8a1c) "       @ smmla pc, r12, r10, r8")
        TEST_RRR(  "smmlar      r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
        TEST_RRR(  "smmlar      r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe75f8a3c      @ smmlar        pc, r12, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe75f8a3c) "       @ smmlar        pc, r12, r10, r8")
 
        TEST_RR(   "smmul       r0, r",0,  VAL1,", r",1, VAL2,"")
        TEST_RR(   "smmul       r14, r",12,VAL2,", r",10,VAL1,"")
-       TEST_UNSUPPORTED(".word 0xe75ffa1c      @ smmul pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe75ffa1c) "       @ smmul pc, r12, r10")
        TEST_RR(   "smmulr      r0, r",0,  VAL1,", r",1, VAL2,"")
        TEST_RR(   "smmulr      r14, r",12,VAL2,", r",10,VAL1,"")
-       TEST_UNSUPPORTED(".word 0xe75ffa3c      @ smmulr        pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe75ffa3c) "       @ smmulr        pc, r12, r10")
 
        TEST_RRR(  "smmls       r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
        TEST_RRR(  "smmls       r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe75f8adc      @ smmls pc, r12, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe75f8adc) "       @ smmls pc, r12, r10, r8")
        TEST_RRR(  "smmlsr      r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL1,"")
        TEST_RRR(  "smmlsr      r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
-       TEST_UNSUPPORTED(".word 0xe75f8afc      @ smmlsr        pc, r12, r10, r8")
-       TEST_UNSUPPORTED(".word 0xe75e8aff      @ smmlsr        r14, pc, r10, r8")
-       TEST_UNSUPPORTED(".word 0xe75e8ffc      @ smmlsr        r14, r12, pc, r8")
-       TEST_UNSUPPORTED(".word 0xe75efafc      @ smmlsr        r14, r12, r10, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe75f8afc) "       @ smmlsr        pc, r12, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe75e8aff) "       @ smmlsr        r14, pc, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe75e8ffc) "       @ smmlsr        r14, r12, pc, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe75efafc) "       @ smmlsr        r14, r12, r10, pc")
 
        TEST_RR(   "usad8       r0, r",0,  VAL1,", r",1, VAL2,"")
        TEST_RR(   "usad8       r14, r",12,VAL2,", r",10,VAL1,"")
-       TEST_UNSUPPORTED(".word 0xe75ffa1c      @ usad8 pc, r12, r10")
-       TEST_UNSUPPORTED(".word 0xe75efa1f      @ usad8 r14, pc, r10")
-       TEST_UNSUPPORTED(".word 0xe75eff1c      @ usad8 r14, r12, pc")
+       TEST_UNSUPPORTED(__inst_arm(0xe75ffa1c) "       @ usad8 pc, r12, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe75efa1f) "       @ usad8 r14, pc, r10")
+       TEST_UNSUPPORTED(__inst_arm(0xe75eff1c) "       @ usad8 r14, r12, pc")
 
        TEST_RRR(  "usada8      r0, r",0,  VAL1,", r",1, VAL2,", r",2, VAL3,"")
        TEST_RRR(  "usada8      r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"")
-       TEST_UNSUPPORTED(".word 0xe78f8a1c      @ usada8        pc, r12, r10, r8")
-       TEST_UNSUPPORTED(".word 0xe78e8a1f      @ usada8        r14, pc, r10, r8")
-       TEST_UNSUPPORTED(".word 0xe78e8f1c      @ usada8        r14, r12, pc, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe78f8a1c) "       @ usada8        pc, r12, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe78e8a1f) "       @ usada8        r14, pc, r10, r8")
+       TEST_UNSUPPORTED(__inst_arm(0xe78e8f1c) "       @ usada8        r14, r12, pc, r8")
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
 #if __LINUX_ARM_ARCH__ >= 7
@@ -1035,26 +1036,26 @@ void kprobe_arm_test_cases(void)
        TEST_R(     "sbfx       r0, r",0  , VAL1,", #0, #31")
        TEST_R(     "sbfxeq     r14, r",12, VAL2,", #8, #16")
        TEST_R(     "sbfx       r4, r",10,  VAL1,", #16, #15")
-       TEST_UNSUPPORTED(".word 0xe7aff45c      @ sbfx  pc, r12, #8, #16")
+       TEST_UNSUPPORTED(__inst_arm(0xe7aff45c) "       @ sbfx  pc, r12, #8, #16")
 
        TEST_R(     "ubfx       r0, r",0  , VAL1,", #0, #31")
        TEST_R(     "ubfxcs     r14, r",12, VAL2,", #8, #16")
        TEST_R(     "ubfx       r4, r",10,  VAL1,", #16, #15")
-       TEST_UNSUPPORTED(".word 0xe7eff45c      @ ubfx  pc, r12, #8, #16")
-       TEST_UNSUPPORTED(".word 0xe7efc45f      @ ubfx  r12, pc, #8, #16")
+       TEST_UNSUPPORTED(__inst_arm(0xe7eff45c) "       @ ubfx  pc, r12, #8, #16")
+       TEST_UNSUPPORTED(__inst_arm(0xe7efc45f) "       @ ubfx  r12, pc, #8, #16")
 
        TEST_R(     "bfc        r",0, VAL1,", #4, #20")
        TEST_R(     "bfcvs      r",14,VAL2,", #4, #20")
        TEST_R(     "bfc        r",7, VAL1,", #0, #31")
        TEST_R(     "bfc        r",8, VAL2,", #0, #31")
-       TEST_UNSUPPORTED(".word 0xe7def01f      @ bfc   pc, #0, #31");
+       TEST_UNSUPPORTED(__inst_arm(0xe7def01f) "       @ bfc   pc, #0, #31");
 
        TEST_RR(    "bfi        r",0, VAL1,", r",0  , VAL2,", #0, #31")
        TEST_RR(    "bfipl      r",12,VAL1,", r",14 , VAL2,", #4, #20")
-       TEST_UNSUPPORTED(".word 0xe7d7f21e      @ bfi   pc, r14, #4, #20")
+       TEST_UNSUPPORTED(__inst_arm(0xe7d7f21e) "       @ bfi   pc, r14, #4, #20")
 
-       TEST_UNSUPPORTED(".word 0x07f000f0")  /* Permanently UNDEFINED */
-       TEST_UNSUPPORTED(".word 0x07ffffff")  /* Permanently UNDEFINED */
+       TEST_UNSUPPORTED(__inst_arm(0x07f000f0) "")  /* Permanently UNDEFINED */
+       TEST_UNSUPPORTED(__inst_arm(0x07ffffff) "")  /* Permanently UNDEFINED */
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
        TEST_GROUP("Branch, branch with link, and block data transfer")
@@ -1181,43 +1182,43 @@ void kprobe_arm_test_cases(void)
                                                                                \
        TEST_COPROCESSOR( "stc"two"     0, cr0, [r15, #4]")                     \
        TEST_COPROCESSOR( "stc"two"     0, cr0, [r15, #-4]")                    \
-       TEST_UNSUPPORTED(".word 0x"cc"daf0001   @ stc"two"      0, cr0, [r15, #4]!")    \
-       TEST_UNSUPPORTED(".word 0x"cc"d2f0001   @ stc"two"      0, cr0, [r15, #-4]!")   \
-       TEST_UNSUPPORTED(".word 0x"cc"caf0001   @ stc"two"      0, cr0, [r15], #4")     \
-       TEST_UNSUPPORTED(".word 0x"cc"c2f0001   @ stc"two"      0, cr0, [r15], #-4")    \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##daf0001) "  @ stc"two"      0, cr0, [r15, #4]!")    \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##d2f0001) "  @ stc"two"      0, cr0, [r15, #-4]!")   \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##caf0001) "  @ stc"two"      0, cr0, [r15], #4")     \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##c2f0001) "  @ stc"two"      0, cr0, [r15], #-4")    \
        TEST_COPROCESSOR( "stc"two"     0, cr0, [r15], {1}")                    \
        TEST_COPROCESSOR( "stc"two"l    0, cr0, [r15, #4]")                     \
        TEST_COPROCESSOR( "stc"two"l    0, cr0, [r15, #-4]")                    \
-       TEST_UNSUPPORTED(".word 0x"cc"def0001   @ stc"two"l     0, cr0, [r15, #4]!")    \
-       TEST_UNSUPPORTED(".word 0x"cc"d6f0001   @ stc"two"l     0, cr0, [r15, #-4]!")   \
-       TEST_UNSUPPORTED(".word 0x"cc"cef0001   @ stc"two"l     0, cr0, [r15], #4")     \
-       TEST_UNSUPPORTED(".word 0x"cc"c6f0001   @ stc"two"l     0, cr0, [r15], #-4")    \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##def0001) "  @ stc"two"l     0, cr0, [r15, #4]!")    \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##d6f0001) "  @ stc"two"l     0, cr0, [r15, #-4]!")   \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##cef0001) "  @ stc"two"l     0, cr0, [r15], #4")     \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##c6f0001) "  @ stc"two"l     0, cr0, [r15], #-4")    \
        TEST_COPROCESSOR( "stc"two"l    0, cr0, [r15], {1}")                    \
        TEST_COPROCESSOR( "ldc"two"     0, cr0, [r15, #4]")                     \
        TEST_COPROCESSOR( "ldc"two"     0, cr0, [r15, #-4]")                    \
-       TEST_UNSUPPORTED(".word 0x"cc"dbf0001   @ ldc"two"      0, cr0, [r15, #4]!")    \
-       TEST_UNSUPPORTED(".word 0x"cc"d3f0001   @ ldc"two"      0, cr0, [r15, #-4]!")   \
-       TEST_UNSUPPORTED(".word 0x"cc"cbf0001   @ ldc"two"      0, cr0, [r15], #4")     \
-       TEST_UNSUPPORTED(".word 0x"cc"c3f0001   @ ldc"two"      0, cr0, [r15], #-4")    \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##dbf0001) "  @ ldc"two"      0, cr0, [r15, #4]!")    \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##d3f0001) "  @ ldc"two"      0, cr0, [r15, #-4]!")   \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##cbf0001) "  @ ldc"two"      0, cr0, [r15], #4")     \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##c3f0001) "  @ ldc"two"      0, cr0, [r15], #-4")    \
        TEST_COPROCESSOR( "ldc"two"     0, cr0, [r15], {1}")                    \
        TEST_COPROCESSOR( "ldc"two"l    0, cr0, [r15, #4]")                     \
        TEST_COPROCESSOR( "ldc"two"l    0, cr0, [r15, #-4]")                    \
-       TEST_UNSUPPORTED(".word 0x"cc"dff0001   @ ldc"two"l     0, cr0, [r15, #4]!")    \
-       TEST_UNSUPPORTED(".word 0x"cc"d7f0001   @ ldc"two"l     0, cr0, [r15, #-4]!")   \
-       TEST_UNSUPPORTED(".word 0x"cc"cff0001   @ ldc"two"l     0, cr0, [r15], #4")     \
-       TEST_UNSUPPORTED(".word 0x"cc"c7f0001   @ ldc"two"l     0, cr0, [r15], #-4")    \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##dff0001) "  @ ldc"two"l     0, cr0, [r15, #4]!")    \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##d7f0001) "  @ ldc"two"l     0, cr0, [r15, #-4]!")   \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##cff0001) "  @ ldc"two"l     0, cr0, [r15], #4")     \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##c7f0001) "  @ ldc"two"l     0, cr0, [r15], #-4")    \
        TEST_COPROCESSOR( "ldc"two"l    0, cr0, [r15], {1}")
 
 #define COPROCESSOR_INSTRUCTIONS_MC_MR(two,cc)                                 \
                                                                                \
        TEST_COPROCESSOR( "mcrr"two"    0, 15, r0, r14, cr0")                   \
        TEST_COPROCESSOR( "mcrr"two"    15, 0, r14, r0, cr15")                  \
-       TEST_UNSUPPORTED(".word 0x"cc"c4f00f0   @ mcrr"two"     0, 15, r0, r15, cr0")   \
-       TEST_UNSUPPORTED(".word 0x"cc"c40ff0f   @ mcrr"two"     15, 0, r15, r0, cr15")  \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##c4f00f0) "  @ mcrr"two"     0, 15, r0, r15, cr0")   \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##c40ff0f) "  @ mcrr"two"     15, 0, r15, r0, cr15")  \
        TEST_COPROCESSOR( "mrrc"two"    0, 15, r0, r14, cr0")                   \
        TEST_COPROCESSOR( "mrrc"two"    15, 0, r14, r0, cr15")                  \
-       TEST_UNSUPPORTED(".word 0x"cc"c5f00f0   @ mrrc"two"     0, 15, r0, r15, cr0")   \
-       TEST_UNSUPPORTED(".word 0x"cc"c50ff0f   @ mrrc"two"     15, 0, r15, r0, cr15")  \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##c5f00f0) "  @ mrrc"two"     0, 15, r0, r15, cr0")   \
+       TEST_UNSUPPORTED(__inst_arm(0x##cc##c50ff0f) "  @ mrrc"two"     15, 0, r15, r0, cr15")  \
        TEST_COPROCESSOR( "cdp"two"     15, 15, cr15, cr15, cr15, 7")           \
        TEST_COPROCESSOR( "cdp"two"     0, 0, cr0, cr0, cr0, 0")                \
        TEST_COPROCESSOR( "mcr"two"     15, 7, r15, cr15, cr15, 7")             \
@@ -1225,8 +1226,8 @@ void kprobe_arm_test_cases(void)
        TEST_COPROCESSOR( "mrc"two"     15, 7, r15, cr15, cr15, 7")             \
        TEST_COPROCESSOR( "mrc"two"     0, 0, r0, cr0, cr0, 0")
 
-       COPROCESSOR_INSTRUCTIONS_ST_LD("","e")
-       COPROCESSOR_INSTRUCTIONS_MC_MR("","e")
+       COPROCESSOR_INSTRUCTIONS_ST_LD("",e)
+       COPROCESSOR_INSTRUCTIONS_MC_MR("",e)
        TEST_UNSUPPORTED("svc   0")
        TEST_UNSUPPORTED("svc   0xffffff")
 
@@ -1252,14 +1253,14 @@ void kprobe_arm_test_cases(void)
        TEST_UNSUPPORTED("rfedb sp!")
        TEST_UNSUPPORTED("rfeia sp!")
        TEST_UNSUPPORTED("rfeib sp!")
-       TEST_UNSUPPORTED(".word 0xf81d0a00      @ rfeda pc")
-       TEST_UNSUPPORTED(".word 0xf91d0a00      @ rfedb pc")
-       TEST_UNSUPPORTED(".word 0xf89d0a00      @ rfeia pc")
-       TEST_UNSUPPORTED(".word 0xf99d0a00      @ rfeib pc")
-       TEST_UNSUPPORTED(".word 0xf83d0a00      @ rfeda pc!")
-       TEST_UNSUPPORTED(".word 0xf93d0a00      @ rfedb pc!")
-       TEST_UNSUPPORTED(".word 0xf8bd0a00      @ rfeia pc!")
-       TEST_UNSUPPORTED(".word 0xf9bd0a00      @ rfeib pc!")
+       TEST_UNSUPPORTED(__inst_arm(0xf81d0a00) "       @ rfeda pc")
+       TEST_UNSUPPORTED(__inst_arm(0xf91d0a00) "       @ rfedb pc")
+       TEST_UNSUPPORTED(__inst_arm(0xf89d0a00) "       @ rfeia pc")
+       TEST_UNSUPPORTED(__inst_arm(0xf99d0a00) "       @ rfeib pc")
+       TEST_UNSUPPORTED(__inst_arm(0xf83d0a00) "       @ rfeda pc!")
+       TEST_UNSUPPORTED(__inst_arm(0xf93d0a00) "       @ rfedb pc!")
+       TEST_UNSUPPORTED(__inst_arm(0xf8bd0a00) "       @ rfeia pc!")
+       TEST_UNSUPPORTED(__inst_arm(0xf9bd0a00) "       @ rfeib pc!")
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
 #if __LINUX_ARM_ARCH__ >= 6
@@ -1286,9 +1287,9 @@ void kprobe_arm_test_cases(void)
        TEST(   "blx    __dummy_thumb_subroutine_odd")
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
-       COPROCESSOR_INSTRUCTIONS_ST_LD("2","f")
+       COPROCESSOR_INSTRUCTIONS_ST_LD("2",f)
 #if __LINUX_ARM_ARCH__ >= 6
-       COPROCESSOR_INSTRUCTIONS_MC_MR("2","f")
+       COPROCESSOR_INSTRUCTIONS_MC_MR("2",f)
 #endif
 
        TEST_GROUP("Miscellaneous instructions, memory hints, and Advanced SIMD instructions")
@@ -1318,9 +1319,9 @@ void kprobe_arm_test_cases(void)
 #endif
 
 #if __LINUX_ARM_ARCH__ >= 7
-       TEST_SUPPORTED(  ".word 0xf590f000      @ pldw [r0, #0]")
-       TEST_SUPPORTED(  ".word 0xf797f000      @ pldw  [r7, r0]")
-       TEST_SUPPORTED(  ".word 0xf798f18c      @ pldw  [r8, r12, lsl #3]");
+       TEST_SUPPORTED(  __inst_arm(0xf590f000) "       @ pldw [r0, #0]")
+       TEST_SUPPORTED(  __inst_arm(0xf797f000) "       @ pldw  [r7, r0]")
+       TEST_SUPPORTED(  __inst_arm(0xf798f18c) "       @ pldw  [r8, r12, lsl #3]");
 #endif
 
 #if __LINUX_ARM_ARCH__ >= 7
index 5d8b857922220b4be4e9babf6e050c94625a56eb..844dd10d85939617963d09f47237d2151db58578 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <asm/opcodes.h>
 
 #include "kprobes-test.h"
 
@@ -119,7 +120,7 @@ void kprobe_thumb16_test_cases(void)
        TEST_R(   "add  sp"        ", r",8,-8,  "")
        TEST_R(   "add  r",14,VAL1,", pc")
        TEST_BF_R("add  pc"        ", r",0,2f-1f-8,"")
-       TEST_UNSUPPORTED(".short 0x44ff @ add pc, pc")
+       TEST_UNSUPPORTED(__inst_thumb16(0x44ff) "       @ add pc, pc")
 
        TEST_RR(  "cmp  r",3,VAL1,", r",8,VAL2,"")
        TEST_RR(  "cmp  r",8,VAL2,", r",0,VAL1,"")
@@ -150,7 +151,7 @@ void kprobe_thumb16_test_cases(void)
 
        TEST_BF_R("blx  r",0, 2f+1,"")
        TEST_BB_R("blx  r",14,2f+1,"")
-       TEST_UNSUPPORTED(".short 0x47f8 @ blx pc")
+       TEST_UNSUPPORTED(__inst_thumb16(0x47f8) "       @ blx pc")
 
        TEST_GROUP("Load from Literal Pool")
 
@@ -237,8 +238,8 @@ DONT_TEST_IN_ITBLOCK(
        TEST_R("rev     r7, r",0, VAL2,"")
        TEST_R("rev16   r0, r",7, VAL1,"")
        TEST_R("rev16   r7, r",0, VAL2,"")
-       TEST_UNSUPPORTED(".short 0xba80")
-       TEST_UNSUPPORTED(".short 0xbabf")
+       TEST_UNSUPPORTED(__inst_thumb16(0xba80) "")
+       TEST_UNSUPPORTED(__inst_thumb16(0xbabf) "")
        TEST_R("revsh   r0, r",7, VAL1,"")
        TEST_R("revsh   r7, r",0, VAL2,"")
 
@@ -272,8 +273,8 @@ DONT_TEST_IN_ITBLOCK(
        TEST("nop")
        TEST("wfi")
        TEST_SUPPORTED("wfe")
-       TEST_UNSUPPORTED(".short 0xbf50") /* Unassigned hints */
-       TEST_UNSUPPORTED(".short 0xbff0") /* Unassigned hints */
+       TEST_UNSUPPORTED(__inst_thumb16(0xbf50) "") /* Unassigned hints */
+       TEST_UNSUPPORTED(__inst_thumb16(0xbff0) "") /* Unassigned hints */
 
 #define TEST_IT(code, code2)                   \
        TESTCASE_START(code)                    \
@@ -310,8 +311,8 @@ CONDITION_INSTRUCTIONS(8,
        TEST_BF("bgt    2f")
        TEST_BB("blt    2b")
 )
-       TEST_UNSUPPORTED(".short 0xde00")
-       TEST_UNSUPPORTED(".short 0xdeff")
+       TEST_UNSUPPORTED(__inst_thumb16(0xde00) "")
+       TEST_UNSUPPORTED(__inst_thumb16(0xdeff) "")
        TEST_UNSUPPORTED("svc   #0x00")
        TEST_UNSUPPORTED("svc   #0xff")
 
@@ -380,13 +381,13 @@ void kprobe_thumb32_test_cases(void)
        TEST_THUMB_TO_ARM_INTERWORK_P("ldmia    r",0,14*4,", {r12,pc}")
        TEST_THUMB_TO_ARM_INTERWORK_P("ldmia    r",13,2*4,", {r0-r12,pc}")
 
-       TEST_UNSUPPORTED(".short 0xe88f,0x0101  @ stmia pc, {r0,r8}")
-       TEST_UNSUPPORTED(".short 0xe92f,0x5f00  @ stmdb pc!, {r8-r12,r14}")
-       TEST_UNSUPPORTED(".short 0xe8bd,0xc000  @ ldmia r13!, {r14,pc}")
-       TEST_UNSUPPORTED(".short 0xe93e,0xc000  @ ldmdb r14!, {r14,pc}")
-       TEST_UNSUPPORTED(".short 0xe8a7,0x3f00  @ stmia r7!, {r8-r12,sp}")
-       TEST_UNSUPPORTED(".short 0xe8a7,0x9f00  @ stmia r7!, {r8-r12,pc}")
-       TEST_UNSUPPORTED(".short 0xe93e,0x2010  @ ldmdb r14!, {r4,sp}")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe88f0101) "   @ stmia pc, {r0,r8}")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe92f5f00) "   @ stmdb pc!, {r8-r12,r14}")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe8bdc000) "   @ ldmia r13!, {r14,pc}")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe93ec000) "   @ ldmdb r14!, {r14,pc}")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe8a73f00) "   @ stmia r7!, {r8-r12,sp}")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe8a79f00) "   @ stmia r7!, {r8-r12,pc}")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe93e2010) "   @ ldmdb r14!, {r4,sp}")
 
        TEST_GROUP("Load/store double or exclusive, table branch")
 
@@ -402,12 +403,12 @@ void kprobe_thumb32_test_cases(void)
                "3:     .word   "__stringify(VAL1)"     \n\t"
                "       .word   "__stringify(VAL2))
 
-       TEST_UNSUPPORTED(".short 0xe9ff,0xec04  @ ldrd  r14, r12, [pc, #16]!")
-       TEST_UNSUPPORTED(".short 0xe8ff,0xec04  @ ldrd  r14, r12, [pc], #16")
-       TEST_UNSUPPORTED(".short 0xe9d4,0xd800  @ ldrd  sp, r8, [r4]")
-       TEST_UNSUPPORTED(".short 0xe9d4,0xf800  @ ldrd  pc, r8, [r4]")
-       TEST_UNSUPPORTED(".short 0xe9d4,0x7d00  @ ldrd  r7, sp, [r4]")
-       TEST_UNSUPPORTED(".short 0xe9d4,0x7f00  @ ldrd  r7, pc, [r4]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe9ffec04) "   @ ldrd  r14, r12, [pc, #16]!")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe8ffec04) "   @ ldrd  r14, r12, [pc], #16")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe9d4d800) "   @ ldrd  sp, r8, [r4]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe9d4f800) "   @ ldrd  pc, r8, [r4]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe9d47d00) "   @ ldrd  r7, sp, [r4]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe9d47f00) "   @ ldrd  r7, pc, [r4]")
 
        TEST_RRP("strd  r",0, VAL1,", r",1, VAL2,", [r",1, 24,", #-16]")
        TEST_RR( "strd  r",12,VAL2,", r",14,VAL1,", [sp, #16]")
@@ -415,8 +416,8 @@ void kprobe_thumb32_test_cases(void)
        TEST_RR( "strd  r",14,VAL2,", r",12,VAL1,", [sp, #16]!")
        TEST_RRP("strd  r",1, VAL1,", r",0, VAL2,", [r",7, 24,"], #16")
        TEST_RR( "strd  r",7, VAL2,", r",8, VAL1,", [sp], #-16")
-       TEST_UNSUPPORTED(".short 0xe9ef,0xec04  @ strd  r14, r12, [pc, #16]!")
-       TEST_UNSUPPORTED(".short 0xe8ef,0xec04  @ strd  r14, r12, [pc], #16")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe9efec04) "   @ strd  r14, r12, [pc, #16]!")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe8efec04) "   @ strd  r14, r12, [pc], #16")
 
        TEST_RX("tbb    [pc, r",0, (9f-(1f+4)),"]",
                "9:                     \n\t"
@@ -460,9 +461,9 @@ void kprobe_thumb32_test_cases(void)
                "3:     mvn     r0, r0  \n\t"
                "2:     nop             \n\t")
 
-       TEST_UNSUPPORTED(".short 0xe8d1,0xf01f  @ tbh [r1, pc]")
-       TEST_UNSUPPORTED(".short 0xe8d1,0xf01d  @ tbh [r1, sp]")
-       TEST_UNSUPPORTED(".short 0xe8dd,0xf012  @ tbh [sp, r2]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe8d1f01f) "   @ tbh [r1, pc]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe8d1f01d) "   @ tbh [r1, sp]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xe8ddf012) "   @ tbh [sp, r2]")
 
        TEST_UNSUPPORTED("strexb        r0, r1, [r2]")
        TEST_UNSUPPORTED("strexh        r0, r1, [r2]")
@@ -540,40 +541,40 @@ void kprobe_thumb32_test_cases(void)
        TEST_RR("pkhtb  r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR("pkhtb  r14,r",12, HH1,", r",10,HH2,", asr #2")
 
-       TEST_UNSUPPORTED(".short 0xea17,0x0f0d  @ tst.w r7, sp")
-       TEST_UNSUPPORTED(".short 0xea17,0x0f0f  @ tst.w r7, pc")
-       TEST_UNSUPPORTED(".short 0xea1d,0x0f07  @ tst.w sp, r7")
-       TEST_UNSUPPORTED(".short 0xea1f,0x0f07  @ tst.w pc, r7")
-       TEST_UNSUPPORTED(".short 0xf01d,0x1f08  @ tst sp, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf01f,0x1f08  @ tst pc, #0x00080008")
-
-       TEST_UNSUPPORTED(".short 0xea97,0x0f0d  @ teq.w r7, sp")
-       TEST_UNSUPPORTED(".short 0xea97,0x0f0f  @ teq.w r7, pc")
-       TEST_UNSUPPORTED(".short 0xea9d,0x0f07  @ teq.w sp, r7")
-       TEST_UNSUPPORTED(".short 0xea9f,0x0f07  @ teq.w pc, r7")
-       TEST_UNSUPPORTED(".short 0xf09d,0x1f08  @ tst sp, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf09f,0x1f08  @ tst pc, #0x00080008")
-
-       TEST_UNSUPPORTED(".short 0xeb17,0x0f0d  @ cmn.w r7, sp")
-       TEST_UNSUPPORTED(".short 0xeb17,0x0f0f  @ cmn.w r7, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea170f0d) "   @ tst.w r7, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea170f0f) "   @ tst.w r7, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea1d0f07) "   @ tst.w sp, r7")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea1f0f07) "   @ tst.w pc, r7")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf01d1f08) "   @ tst sp, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf01f1f08) "   @ tst pc, #0x00080008")
+
+       TEST_UNSUPPORTED(__inst_thumb32(0xea970f0d) "   @ teq.w r7, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea970f0f) "   @ teq.w r7, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea9d0f07) "   @ teq.w sp, r7")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea9f0f07) "   @ teq.w pc, r7")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf09d1f08) "   @ tst sp, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf09f1f08) "   @ tst pc, #0x00080008")
+
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb170f0d) "   @ cmn.w r7, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb170f0f) "   @ cmn.w r7, pc")
        TEST_P("cmn.w   sp, r",7,0,"")
-       TEST_UNSUPPORTED(".short 0xeb1f,0x0f07  @ cmn.w pc, r7")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb1f0f07) "   @ cmn.w pc, r7")
        TEST(  "cmn     sp, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf11f,0x1f08  @ cmn pc, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf11f1f08) "   @ cmn pc, #0x00080008")
 
-       TEST_UNSUPPORTED(".short 0xebb7,0x0f0d  @ cmp.w r7, sp")
-       TEST_UNSUPPORTED(".short 0xebb7,0x0f0f  @ cmp.w r7, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xebb70f0d) "   @ cmp.w r7, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xebb70f0f) "   @ cmp.w r7, pc")
        TEST_P("cmp.w   sp, r",7,0,"")
-       TEST_UNSUPPORTED(".short 0xebbf,0x0f07  @ cmp.w pc, r7")
+       TEST_UNSUPPORTED(__inst_thumb32(0xebbf0f07) "   @ cmp.w pc, r7")
        TEST(  "cmp     sp, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf1bf,0x1f08  @ cmp pc, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf1bf1f08) "   @ cmp pc, #0x00080008")
 
-       TEST_UNSUPPORTED(".short 0xea5f,0x070d  @ movs.w r7, sp")
-       TEST_UNSUPPORTED(".short 0xea5f,0x070f  @ movs.w r7, pc")
-       TEST_UNSUPPORTED(".short 0xea5f,0x0d07  @ movs.w sp, r7")
-       TEST_UNSUPPORTED(".short 0xea4f,0x0f07  @ mov.w  pc, r7")
-       TEST_UNSUPPORTED(".short 0xf04f,0x1d08  @ mov sp, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf04f,0x1f08  @ mov pc, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea5f070d) "   @ movs.w r7, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea5f070f) "   @ movs.w r7, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea5f0d07) "   @ movs.w sp, r7")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea4f0f07) "   @ mov.w  pc, r7")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf04f1d08) "   @ mov sp, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf04f1f08) "   @ mov pc, #0x00080008")
 
        TEST_R("add.w   r0, sp, r",1, 4,"")
        TEST_R("adds    r0, sp, r",1, 4,", asl #3")
@@ -581,15 +582,15 @@ void kprobe_thumb32_test_cases(void)
        TEST_R("add     r0, sp, r",1, 16,", ror #1")
        TEST_R("add.w   sp, sp, r",1, 4,"")
        TEST_R("add     sp, sp, r",1, 4,", asl #3")
-       TEST_UNSUPPORTED(".short 0xeb0d,0x1d01  @ add sp, sp, r1, asl #4")
-       TEST_UNSUPPORTED(".short 0xeb0d,0x0d71  @ add sp, sp, r1, ror #1")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb0d1d01) "   @ add sp, sp, r1, asl #4")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d71) "   @ add sp, sp, r1, ror #1")
        TEST(  "add.w   r0, sp, #24")
        TEST(  "add.w   sp, sp, #24")
-       TEST_UNSUPPORTED(".short 0xeb0d,0x0f01  @ add pc, sp, r1")
-       TEST_UNSUPPORTED(".short 0xeb0d,0x000f  @ add r0, sp, pc")
-       TEST_UNSUPPORTED(".short 0xeb0d,0x000d  @ add r0, sp, sp")
-       TEST_UNSUPPORTED(".short 0xeb0d,0x0d0f  @ add sp, sp, pc")
-       TEST_UNSUPPORTED(".short 0xeb0d,0x0d0d  @ add sp, sp, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0f01) "   @ add pc, sp, r1")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb0d000f) "   @ add r0, sp, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb0d000d) "   @ add r0, sp, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d0f) "   @ add sp, sp, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d0d) "   @ add sp, sp, sp")
 
        TEST_R("sub.w   r0, sp, r",1, 4,"")
        TEST_R("subs    r0, sp, r",1, 4,", asl #3")
@@ -597,54 +598,54 @@ void kprobe_thumb32_test_cases(void)
        TEST_R("sub     r0, sp, r",1, 16,", ror #1")
        TEST_R("sub.w   sp, sp, r",1, 4,"")
        TEST_R("sub     sp, sp, r",1, 4,", asl #3")
-       TEST_UNSUPPORTED(".short 0xebad,0x1d01  @ sub sp, sp, r1, asl #4")
-       TEST_UNSUPPORTED(".short 0xebad,0x0d71  @ sub sp, sp, r1, ror #1")
-       TEST_UNSUPPORTED(".short 0xebad,0x0f01  @ sub pc, sp, r1")
+       TEST_UNSUPPORTED(__inst_thumb32(0xebad1d01) "   @ sub sp, sp, r1, asl #4")
+       TEST_UNSUPPORTED(__inst_thumb32(0xebad0d71) "   @ sub sp, sp, r1, ror #1")
+       TEST_UNSUPPORTED(__inst_thumb32(0xebad0f01) "   @ sub pc, sp, r1")
        TEST(  "sub.w   r0, sp, #24")
        TEST(  "sub.w   sp, sp, #24")
 
-       TEST_UNSUPPORTED(".short 0xea02,0x010f  @ and r1, r2, pc")
-       TEST_UNSUPPORTED(".short 0xea0f,0x0103  @ and r1, pc, r3")
-       TEST_UNSUPPORTED(".short 0xea02,0x0f03  @ and pc, r2, r3")
-       TEST_UNSUPPORTED(".short 0xea02,0x010d  @ and r1, r2, sp")
-       TEST_UNSUPPORTED(".short 0xea0d,0x0103  @ and r1, sp, r3")
-       TEST_UNSUPPORTED(".short 0xea02,0x0d03  @ and sp, r2, r3")
-       TEST_UNSUPPORTED(".short 0xf00d,0x1108  @ and r1, sp, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf00f,0x1108  @ and r1, pc, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf002,0x1d08  @ and sp, r8, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf002,0x1f08  @ and pc, r8, #0x00080008")
-
-       TEST_UNSUPPORTED(".short 0xeb02,0x010f  @ add r1, r2, pc")
-       TEST_UNSUPPORTED(".short 0xeb0f,0x0103  @ add r1, pc, r3")
-       TEST_UNSUPPORTED(".short 0xeb02,0x0f03  @ add pc, r2, r3")
-       TEST_UNSUPPORTED(".short 0xeb02,0x010d  @ add r1, r2, sp")
-       TEST_SUPPORTED(  ".short 0xeb0d,0x0103  @ add r1, sp, r3")
-       TEST_UNSUPPORTED(".short 0xeb02,0x0d03  @ add sp, r2, r3")
-       TEST_SUPPORTED(  ".short 0xf10d,0x1108  @ add r1, sp, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf10d,0x1f08  @ add pc, sp, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf10f,0x1108  @ add r1, pc, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf102,0x1d08  @ add sp, r8, #0x00080008")
-       TEST_UNSUPPORTED(".short 0xf102,0x1f08  @ add pc, r8, #0x00080008")
-
-       TEST_UNSUPPORTED(".short 0xeaa0,0x0000")
-       TEST_UNSUPPORTED(".short 0xeaf0,0x0000")
-       TEST_UNSUPPORTED(".short 0xeb20,0x0000")
-       TEST_UNSUPPORTED(".short 0xeb80,0x0000")
-       TEST_UNSUPPORTED(".short 0xebe0,0x0000")
-
-       TEST_UNSUPPORTED(".short 0xf0a0,0x0000")
-       TEST_UNSUPPORTED(".short 0xf0c0,0x0000")
-       TEST_UNSUPPORTED(".short 0xf0f0,0x0000")
-       TEST_UNSUPPORTED(".short 0xf120,0x0000")
-       TEST_UNSUPPORTED(".short 0xf180,0x0000")
-       TEST_UNSUPPORTED(".short 0xf1e0,0x0000")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea02010f) "   @ and r1, r2, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea0f0103) "   @ and r1, pc, r3")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea020f03) "   @ and pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea02010d) "   @ and r1, r2, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea0d0103) "   @ and r1, sp, r3")
+       TEST_UNSUPPORTED(__inst_thumb32(0xea020d03) "   @ and sp, r2, r3")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf00d1108) "   @ and r1, sp, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf00f1108) "   @ and r1, pc, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf0021d08) "   @ and sp, r8, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf0021f08) "   @ and pc, r8, #0x00080008")
+
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb02010f) "   @ add r1, r2, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb0f0103) "   @ add r1, pc, r3")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb020f03) "   @ add pc, r2, r3")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb02010d) "   @ add r1, r2, sp")
+       TEST_SUPPORTED(  __inst_thumb32(0xeb0d0103) "   @ add r1, sp, r3")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb020d03) "   @ add sp, r2, r3")
+       TEST_SUPPORTED(  __inst_thumb32(0xf10d1108) "   @ add r1, sp, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf10d1f08) "   @ add pc, sp, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf10f1108) "   @ add r1, pc, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf1021d08) "   @ add sp, r8, #0x00080008")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf1021f08) "   @ add pc, r8, #0x00080008")
+
+       TEST_UNSUPPORTED(__inst_thumb32(0xeaa00000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeaf00000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb200000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeb800000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xebe00000) "")
+
+       TEST_UNSUPPORTED(__inst_thumb32(0xf0a00000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf0c00000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf0f00000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf1200000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf1800000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf1e00000) "")
 
        TEST_GROUP("Coprocessor instructions")
 
-       TEST_UNSUPPORTED(".short 0xec00,0x0000")
-       TEST_UNSUPPORTED(".short 0xeff0,0x0000")
-       TEST_UNSUPPORTED(".short 0xfc00,0x0000")
-       TEST_UNSUPPORTED(".short 0xfff0,0x0000")
+       TEST_UNSUPPORTED(__inst_thumb32(0xec000000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xeff00000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfc000000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfff00000) "")
 
        TEST_GROUP("Data-processing (plain binary immediate)")
 
@@ -652,92 +653,92 @@ void kprobe_thumb32_test_cases(void)
        TEST(  "addw    r14, sp, #0xf5a")
        TEST(  "addw    sp, sp, #0x20")
        TEST(  "addw    r7,  pc, #0x888")
-       TEST_UNSUPPORTED(".short 0xf20f,0x1f20  @ addw pc, pc, #0x120")
-       TEST_UNSUPPORTED(".short 0xf20d,0x1f20  @ addw pc, sp, #0x120")
-       TEST_UNSUPPORTED(".short 0xf20f,0x1d20  @ addw sp, pc, #0x120")
-       TEST_UNSUPPORTED(".short 0xf200,0x1d20  @ addw sp, r0, #0x120")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf20f1f20) "   @ addw pc, pc, #0x120")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf20d1f20) "   @ addw pc, sp, #0x120")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf20f1d20) "   @ addw sp, pc, #0x120")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf2001d20) "   @ addw sp, r0, #0x120")
 
        TEST_R("subw    r0,  r",1, VAL1,", #0x123")
        TEST(  "subw    r14, sp, #0xf5a")
        TEST(  "subw    sp, sp, #0x20")
        TEST(  "subw    r7,  pc, #0x888")
-       TEST_UNSUPPORTED(".short 0xf2af,0x1f20  @ subw pc, pc, #0x120")
-       TEST_UNSUPPORTED(".short 0xf2ad,0x1f20  @ subw pc, sp, #0x120")
-       TEST_UNSUPPORTED(".short 0xf2af,0x1d20  @ subw sp, pc, #0x120")
-       TEST_UNSUPPORTED(".short 0xf2a0,0x1d20  @ subw sp, r0, #0x120")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf2af1f20) "   @ subw pc, pc, #0x120")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf2ad1f20) "   @ subw pc, sp, #0x120")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf2af1d20) "   @ subw sp, pc, #0x120")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf2a01d20) "   @ subw sp, r0, #0x120")
 
        TEST("movw      r0, #0")
        TEST("movw      r0, #0xffff")
        TEST("movw      lr, #0xffff")
-       TEST_UNSUPPORTED(".short 0xf240,0x0d00  @ movw sp, #0")
-       TEST_UNSUPPORTED(".short 0xf240,0x0f00  @ movw pc, #0")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf2400d00) "   @ movw sp, #0")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf2400f00) "   @ movw pc, #0")
 
        TEST_R("movt    r",0, VAL1,", #0")
        TEST_R("movt    r",0, VAL2,", #0xffff")
        TEST_R("movt    r",14,VAL1,", #0xffff")
-       TEST_UNSUPPORTED(".short 0xf2c0,0x0d00  @ movt sp, #0")
-       TEST_UNSUPPORTED(".short 0xf2c0,0x0f00  @ movt pc, #0")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf2c00d00) "   @ movt sp, #0")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf2c00f00) "   @ movt pc, #0")
 
        TEST_R(     "ssat       r0, #24, r",0,   VAL1,"")
        TEST_R(     "ssat       r14, #24, r",12, VAL2,"")
        TEST_R(     "ssat       r0, #24, r",0,   VAL1,", lsl #8")
        TEST_R(     "ssat       r14, #24, r",12, VAL2,", asr #8")
-       TEST_UNSUPPORTED(".short 0xf30c,0x0d17  @ ssat  sp, #24, r12")
-       TEST_UNSUPPORTED(".short 0xf30c,0x0f17  @ ssat  pc, #24, r12")
-       TEST_UNSUPPORTED(".short 0xf30d,0x0c17  @ ssat  r12, #24, sp")
-       TEST_UNSUPPORTED(".short 0xf30f,0x0c17  @ ssat  r12, #24, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf30c0d17) "   @ ssat  sp, #24, r12")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf30c0f17) "   @ ssat  pc, #24, r12")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf30d0c17) "   @ ssat  r12, #24, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf30f0c17) "   @ ssat  r12, #24, pc")
 
        TEST_R(     "usat       r0, #24, r",0,   VAL1,"")
        TEST_R(     "usat       r14, #24, r",12, VAL2,"")
        TEST_R(     "usat       r0, #24, r",0,   VAL1,", lsl #8")
        TEST_R(     "usat       r14, #24, r",12, VAL2,", asr #8")
-       TEST_UNSUPPORTED(".short 0xf38c,0x0d17  @ usat  sp, #24, r12")
-       TEST_UNSUPPORTED(".short 0xf38c,0x0f17  @ usat  pc, #24, r12")
-       TEST_UNSUPPORTED(".short 0xf38d,0x0c17  @ usat  r12, #24, sp")
-       TEST_UNSUPPORTED(".short 0xf38f,0x0c17  @ usat  r12, #24, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf38c0d17) "   @ usat  sp, #24, r12")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf38c0f17) "   @ usat  pc, #24, r12")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf38d0c17) "   @ usat  r12, #24, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf38f0c17) "   @ usat  r12, #24, pc")
 
        TEST_R(     "ssat16     r0, #12, r",0,   HH1,"")
        TEST_R(     "ssat16     r14, #12, r",12, HH2,"")
-       TEST_UNSUPPORTED(".short 0xf32c,0x0d0b  @ ssat16        sp, #12, r12")
-       TEST_UNSUPPORTED(".short 0xf32c,0x0f0b  @ ssat16        pc, #12, r12")
-       TEST_UNSUPPORTED(".short 0xf32d,0x0c0b  @ ssat16        r12, #12, sp")
-       TEST_UNSUPPORTED(".short 0xf32f,0x0c0b  @ ssat16        r12, #12, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf32c0d0b) "   @ ssat16        sp, #12, r12")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf32c0f0b) "   @ ssat16        pc, #12, r12")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf32d0c0b) "   @ ssat16        r12, #12, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf32f0c0b) "   @ ssat16        r12, #12, pc")
 
        TEST_R(     "usat16     r0, #12, r",0,   HH1,"")
        TEST_R(     "usat16     r14, #12, r",12, HH2,"")
-       TEST_UNSUPPORTED(".short 0xf3ac,0x0d0b  @ usat16        sp, #12, r12")
-       TEST_UNSUPPORTED(".short 0xf3ac,0x0f0b  @ usat16        pc, #12, r12")
-       TEST_UNSUPPORTED(".short 0xf3ad,0x0c0b  @ usat16        r12, #12, sp")
-       TEST_UNSUPPORTED(".short 0xf3af,0x0c0b  @ usat16        r12, #12, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf3ac0d0b) "   @ usat16        sp, #12, r12")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf3ac0f0b) "   @ usat16        pc, #12, r12")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf3ad0c0b) "   @ usat16        r12, #12, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf3af0c0b) "   @ usat16        r12, #12, pc")
 
        TEST_R(     "sbfx       r0, r",0  , VAL1,", #0, #31")
        TEST_R(     "sbfx       r14, r",12, VAL2,", #8, #16")
        TEST_R(     "sbfx       r4, r",10,  VAL1,", #16, #15")
-       TEST_UNSUPPORTED(".short 0xf34c,0x2d0f  @ sbfx  sp, r12, #8, #16")
-       TEST_UNSUPPORTED(".short 0xf34c,0x2f0f  @ sbfx  pc, r12, #8, #16")
-       TEST_UNSUPPORTED(".short 0xf34d,0x2c0f  @ sbfx  r12, sp, #8, #16")
-       TEST_UNSUPPORTED(".short 0xf34f,0x2c0f  @ sbfx  r12, pc, #8, #16")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf34c2d0f) "   @ sbfx  sp, r12, #8, #16")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf34c2f0f) "   @ sbfx  pc, r12, #8, #16")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf34d2c0f) "   @ sbfx  r12, sp, #8, #16")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf34f2c0f) "   @ sbfx  r12, pc, #8, #16")
 
        TEST_R(     "ubfx       r0, r",0  , VAL1,", #0, #31")
        TEST_R(     "ubfx       r14, r",12, VAL2,", #8, #16")
        TEST_R(     "ubfx       r4, r",10,  VAL1,", #16, #15")
-       TEST_UNSUPPORTED(".short 0xf3cc,0x2d0f  @ ubfx  sp, r12, #8, #16")
-       TEST_UNSUPPORTED(".short 0xf3cc,0x2f0f  @ ubfx  pc, r12, #8, #16")
-       TEST_UNSUPPORTED(".short 0xf3cd,0x2c0f  @ ubfx  r12, sp, #8, #16")
-       TEST_UNSUPPORTED(".short 0xf3cf,0x2c0f  @ ubfx  r12, pc, #8, #16")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf3cc2d0f) "   @ ubfx  sp, r12, #8, #16")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf3cc2f0f) "   @ ubfx  pc, r12, #8, #16")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf3cd2c0f) "   @ ubfx  r12, sp, #8, #16")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf3cf2c0f) "   @ ubfx  r12, pc, #8, #16")
 
        TEST_R(     "bfc        r",0, VAL1,", #4, #20")
        TEST_R(     "bfc        r",14,VAL2,", #4, #20")
        TEST_R(     "bfc        r",7, VAL1,", #0, #31")
        TEST_R(     "bfc        r",8, VAL2,", #0, #31")
-       TEST_UNSUPPORTED(".short 0xf36f,0x0d1e  @ bfc   sp, #0, #31")
-       TEST_UNSUPPORTED(".short 0xf36f,0x0f1e  @ bfc   pc, #0, #31")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf36f0d1e) "   @ bfc   sp, #0, #31")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf36f0f1e) "   @ bfc   pc, #0, #31")
 
        TEST_RR(    "bfi        r",0, VAL1,", r",0  , VAL2,", #0, #31")
        TEST_RR(    "bfi        r",12,VAL1,", r",14 , VAL2,", #4, #20")
-       TEST_UNSUPPORTED(".short 0xf36e,0x1d17  @ bfi   sp, r14, #4, #20")
-       TEST_UNSUPPORTED(".short 0xf36e,0x1f17  @ bfi   pc, r14, #4, #20")
-       TEST_UNSUPPORTED(".short 0xf36d,0x1e17  @ bfi   r14, sp, #4, #20")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf36e1d17) "   @ bfi   sp, r14, #4, #20")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf36e1f17) "   @ bfi   pc, r14, #4, #20")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf36d1e17) "   @ bfi   r14, sp, #4, #20")
 
        TEST_GROUP("Branches and miscellaneous control")
 
@@ -775,14 +776,14 @@ CONDITION_INSTRUCTIONS(22,
 
        TEST("mrs       r0, cpsr")
        TEST("mrs       r14, cpsr")
-       TEST_UNSUPPORTED(".short 0xf3ef,0x8d00  @ mrs   sp, spsr")
-       TEST_UNSUPPORTED(".short 0xf3ef,0x8f00  @ mrs   pc, spsr")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8d00) "   @ mrs   sp, spsr")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8f00) "   @ mrs   pc, spsr")
        TEST_UNSUPPORTED("mrs   r0, spsr")
        TEST_UNSUPPORTED("mrs   lr, spsr")
 
-       TEST_UNSUPPORTED(".short 0xf7f0,0x8000 @ smc #0")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf7f08000) " @ smc #0")
 
-       TEST_UNSUPPORTED(".short 0xf7f0,0xa000 @ undefeined")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf7f0a000) " @ undefeined")
 
        TEST_BF(  "b.w  2f")
        TEST_BB(  "b.w  2b")
@@ -829,15 +830,15 @@ CONDITION_INSTRUCTIONS(22,
        SINGLE_STORE("")
 
        TEST("str       sp, [sp]")
-       TEST_UNSUPPORTED(".short 0xf8cf,0xe000  @ str   r14, [pc]")
-       TEST_UNSUPPORTED(".short 0xf8ce,0xf000  @ str   pc, [r14]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf8cfe000) "   @ str   r14, [pc]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf8cef000) "   @ str   pc, [r14]")
 
        TEST_GROUP("Advanced SIMD element or structure load/store instructions")
 
-       TEST_UNSUPPORTED(".short 0xf900,0x0000")
-       TEST_UNSUPPORTED(".short 0xf92f,0xffff")
-       TEST_UNSUPPORTED(".short 0xf980,0x0000")
-       TEST_UNSUPPORTED(".short 0xf9ef,0xffff")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf9000000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf92fffff) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf9800000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf9efffff) "")
 
        TEST_GROUP("Load single data item and memory hints")
 
@@ -881,20 +882,20 @@ CONDITION_INSTRUCTIONS(22,
        TEST_SUPPORTED("ldr     sp, 99f")
        TEST_SUPPORTED("ldr     pc, 99f")
 
-       TEST_UNSUPPORTED(".short 0xf854,0x700d  @ ldr   r7, [r4, sp]")
-       TEST_UNSUPPORTED(".short 0xf854,0x700f  @ ldr   r7, [r4, pc]")
-       TEST_UNSUPPORTED(".short 0xf814,0x700d  @ ldrb  r7, [r4, sp]")
-       TEST_UNSUPPORTED(".short 0xf814,0x700f  @ ldrb  r7, [r4, pc]")
-       TEST_UNSUPPORTED(".short 0xf89f,0xd004  @ ldrb  sp, 99f")
-       TEST_UNSUPPORTED(".short 0xf814,0xd008  @ ldrb  sp, [r4, r8]")
-       TEST_UNSUPPORTED(".short 0xf894,0xd000  @ ldrb  sp, [r4]")
-
-       TEST_UNSUPPORTED(".short 0xf860,0x0000") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xf9ff,0xffff") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xf950,0x0000") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xf95f,0xffff") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xf800,0x0800") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xf97f,0xfaff") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xf854700d) "   @ ldr   r7, [r4, sp]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf854700f) "   @ ldr   r7, [r4, pc]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf814700d) "   @ ldrb  r7, [r4, sp]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf814700f) "   @ ldrb  r7, [r4, pc]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf89fd004) "   @ ldrb  sp, 99f")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf814d008) "   @ ldrb  sp, [r4, r8]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf894d000) "   @ ldrb  sp, [r4]")
+
+       TEST_UNSUPPORTED(__inst_thumb32(0xf8600000) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xf9ffffff) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xf9500000) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xf95fffff) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xf8000800) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xf97ffaff) "") /* Unallocated space */
 
        TEST(   "pli    [pc, #4]")
        TEST(   "pli    [pc, #-4]")
@@ -902,22 +903,22 @@ CONDITION_INSTRUCTIONS(22,
        TEST(   "pld    [pc, #-4]")
 
        TEST_P( "pld    [r",0,-1024,", #1024]")
-       TEST(   ".short 0xf8b0,0xf400   @ pldw  [r0, #1024]")
+       TEST(   __inst_thumb32(0xf8b0f400) "    @ pldw  [r0, #1024]")
        TEST_P( "pli    [r",4, 0b,", #1024]")
        TEST_P( "pld    [r",7, 120,", #-120]")
-       TEST(   ".short 0xf837,0xfc78   @ pldw  [r7, #-120]")
+       TEST(   __inst_thumb32(0xf837fc78) "    @ pldw  [r7, #-120]")
        TEST_P( "pli    [r",11,120,", #-120]")
        TEST(   "pld    [sp, #0]")
 
        TEST_PR("pld    [r",7, 24, ", r",0, 16,"]")
        TEST_PR("pld    [r",8, 24, ", r",12,16,", lsl #3]")
-       TEST_SUPPORTED(".short 0xf837,0xf000    @ pldw  [r7, r0]")
-       TEST_SUPPORTED(".short 0xf838,0xf03c    @ pldw  [r8, r12, lsl #3]");
+       TEST_SUPPORTED(__inst_thumb32(0xf837f000) "     @ pldw  [r7, r0]")
+       TEST_SUPPORTED(__inst_thumb32(0xf838f03c) "     @ pldw  [r8, r12, lsl #3]");
        TEST_RR("pli    [r",12,0b,", r",0, 16,"]")
        TEST_RR("pli    [r",0, 0b,", r",12,16,", lsl #3]")
        TEST_R( "pld    [sp, r",1, 16,"]")
-       TEST_UNSUPPORTED(".short 0xf817,0xf00d  @pld    [r7, sp]")
-       TEST_UNSUPPORTED(".short 0xf817,0xf00f  @pld    [r7, pc]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf817f00d) "  @pld     [r7, sp]")
+       TEST_UNSUPPORTED(__inst_thumb32(0xf817f00f) "  @pld     [r7, pc]")
 
        TEST_GROUP("Data-processing (register)")
 
@@ -934,21 +935,21 @@ CONDITION_INSTRUCTIONS(22,
        SHIFTS32("ror")
        SHIFTS32("rors")
 
-       TEST_UNSUPPORTED(".short 0xfa01,0xff02  @ lsl   pc, r1, r2")
-       TEST_UNSUPPORTED(".short 0xfa01,0xfd02  @ lsl   sp, r1, r2")
-       TEST_UNSUPPORTED(".short 0xfa0f,0xf002  @ lsl   r0, pc, r2")
-       TEST_UNSUPPORTED(".short 0xfa0d,0xf002  @ lsl   r0, sp, r2")
-       TEST_UNSUPPORTED(".short 0xfa01,0xf00f  @ lsl   r0, r1, pc")
-       TEST_UNSUPPORTED(".short 0xfa01,0xf00d  @ lsl   r0, r1, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa01ff02) "   @ lsl   pc, r1, r2")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa01fd02) "   @ lsl   sp, r1, r2")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff002) "   @ lsl   r0, pc, r2")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa0df002) "   @ lsl   r0, sp, r2")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa01f00f) "   @ lsl   r0, r1, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa01f00d) "   @ lsl   r0, r1, sp")
 
        TEST_RR(    "sxtah      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "sxtah      r14,r",12, HH2,", r",10,HH1,", ror #8")
        TEST_R(     "sxth       r8, r",7,  HH1,"")
 
-       TEST_UNSUPPORTED(".short 0xfa0f,0xff87  @ sxth  pc, r7");
-       TEST_UNSUPPORTED(".short 0xfa0f,0xfd87  @ sxth  sp, r7");
-       TEST_UNSUPPORTED(".short 0xfa0f,0xf88f  @ sxth  r8, pc");
-       TEST_UNSUPPORTED(".short 0xfa0f,0xf88d  @ sxth  r8, sp");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa0fff87) "   @ sxth  pc, r7");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa0ffd87) "   @ sxth  sp, r7");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff88f) "   @ sxth  r8, pc");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff88d) "   @ sxth  r8, sp");
 
        TEST_RR(    "uxtah      r0, r",0,  HH1,", r",1, HH2,"")
        TEST_RR(    "uxtah      r14,r",12, HH2,", r",10,HH1,", ror #8")
@@ -970,8 +971,8 @@ CONDITION_INSTRUCTIONS(22,
        TEST_RR(    "uxtab      r14,r",12, HH2,", r",10,HH1,", ror #8")
        TEST_R(     "uxtb       r8, r",7,  HH1,"")
 
-       TEST_UNSUPPORTED(".short 0xfa60,0x00f0")
-       TEST_UNSUPPORTED(".short 0xfa7f,0xffff")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa6000f0) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa7fffff) "")
 
 #define PARALLEL_ADD_SUB(op)                                   \
        TEST_RR(  op"add16      r0, r",0,  HH1,", r",1, HH2,"") \
@@ -1019,10 +1020,10 @@ CONDITION_INSTRUCTIONS(22,
        TEST_R("revsh.w r0, r",0,   VAL1,"")
        TEST_R("revsh   r14, r",12, VAL2,"")
 
-       TEST_UNSUPPORTED(".short 0xfa9c,0xff8c  @ rev   pc, r12");
-       TEST_UNSUPPORTED(".short 0xfa9c,0xfd8c  @ rev   sp, r12");
-       TEST_UNSUPPORTED(".short 0xfa9f,0xfe8f  @ rev   r14, pc");
-       TEST_UNSUPPORTED(".short 0xfa9d,0xfe8d  @ rev   r14, sp");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa9cff8c) "   @ rev   pc, r12");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa9cfd8c) "   @ rev   sp, r12");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa9ffe8f) "   @ rev   r14, pc");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa9dfe8d) "   @ rev   r14, sp");
 
        TEST_RR("sel    r0, r",0,  VAL1,", r",1, VAL2,"")
        TEST_RR("sel    r14, r",12,VAL1,", r",10, VAL2,"")
@@ -1031,31 +1032,31 @@ CONDITION_INSTRUCTIONS(22,
        TEST_R("clz     r7, r",14,0x1,"")
        TEST_R("clz     lr, r",7, 0xffffffff,"")
 
-       TEST_UNSUPPORTED(".short 0xfa80,0xf030") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xfab0,0xf000") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xfa80f030) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xfaffff7f) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xfab0f000) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xfaffff7f) "") /* Unallocated space */
 
        TEST_GROUP("Multiply, multiply accumulate, and absolute difference operations")
 
        TEST_RR(    "mul        r0, r",1, VAL1,", r",2, VAL2,"")
        TEST_RR(    "mul        r7, r",8, VAL2,", r",9, VAL2,"")
-       TEST_UNSUPPORTED(".short 0xfb08,0xff09  @ mul   pc, r8, r9")
-       TEST_UNSUPPORTED(".short 0xfb08,0xfd09  @ mul   sp, r8, r9")
-       TEST_UNSUPPORTED(".short 0xfb0f,0xf709  @ mul   r7, pc, r9")
-       TEST_UNSUPPORTED(".short 0xfb0d,0xf709  @ mul   r7, sp, r9")
-       TEST_UNSUPPORTED(".short 0xfb08,0xf70f  @ mul   r7, r8, pc")
-       TEST_UNSUPPORTED(".short 0xfb08,0xf70d  @ mul   r7, r8, sp")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb08ff09) "   @ mul   pc, r8, r9")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb08fd09) "   @ mul   sp, r8, r9")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb0ff709) "   @ mul   r7, pc, r9")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb0df709) "   @ mul   r7, sp, r9")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb08f70f) "   @ mul   r7, r8, pc")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb08f70d) "   @ mul   r7, r8, sp")
 
        TEST_RRR(   "mla        r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(   "mla        r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
-       TEST_UNSUPPORTED(".short 0xfb08,0xaf09  @ mla   pc, r8, r9, r10");
-       TEST_UNSUPPORTED(".short 0xfb08,0xad09  @ mla   sp, r8, r9, r10");
-       TEST_UNSUPPORTED(".short 0xfb0f,0xa709  @ mla   r7, pc, r9, r10");
-       TEST_UNSUPPORTED(".short 0xfb0d,0xa709  @ mla   r7, sp, r9, r10");
-       TEST_UNSUPPORTED(".short 0xfb08,0xa70f  @ mla   r7, r8, pc, r10");
-       TEST_UNSUPPORTED(".short 0xfb08,0xa70d  @ mla   r7, r8, sp, r10");
-       TEST_UNSUPPORTED(".short 0xfb08,0xd709  @ mla   r7, r8, r9, sp");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb08af09) "   @ mla   pc, r8, r9, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb08ad09) "   @ mla   sp, r8, r9, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb0fa709) "   @ mla   r7, pc, r9, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb0da709) "   @ mla   r7, sp, r9, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb08a70f) "   @ mla   r7, r8, pc, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb08a70d) "   @ mla   r7, r8, sp, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb08d709) "   @ mla   r7, r8, r9, sp");
 
        TEST_RRR(   "mls        r0, r",1, VAL1,", r",2, VAL2,", r",3,  VAL3,"")
        TEST_RRR(   "mls        r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
@@ -1123,25 +1124,25 @@ CONDITION_INSTRUCTIONS(22,
        TEST_RR(    "usad8      r0, r",0,  VAL1,", r",1, VAL2,"")
        TEST_RR(    "usad8      r14, r",12,VAL2,", r",10,VAL1,"")
 
-       TEST_UNSUPPORTED(".short 0xfb00,0xf010") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xfb0f,0xff1f") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xfb70,0xf010") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xfb70,0x0010") /* Unallocated space */
-       TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb00f010) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb0fff1f) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb70f010) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb7fff1f) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb700010) "") /* Unallocated space */
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb7fff1f) "") /* Unallocated space */
 
        TEST_GROUP("Long multiply, long multiply accumulate, and divide")
 
        TEST_RR(   "smull       r0, r1, r",2, VAL1,", r",3, VAL2,"")
        TEST_RR(   "smull       r7, r8, r",9, VAL2,", r",10, VAL1,"")
-       TEST_UNSUPPORTED(".short 0xfb89,0xf80a  @ smull pc, r8, r9, r10");
-       TEST_UNSUPPORTED(".short 0xfb89,0xd80a  @ smull sp, r8, r9, r10");
-       TEST_UNSUPPORTED(".short 0xfb89,0x7f0a  @ smull r7, pc, r9, r10");
-       TEST_UNSUPPORTED(".short 0xfb89,0x7d0a  @ smull r7, sp, r9, r10");
-       TEST_UNSUPPORTED(".short 0xfb8f,0x780a  @ smull r7, r8, pc, r10");
-       TEST_UNSUPPORTED(".short 0xfb8d,0x780a  @ smull r7, r8, sp, r10");
-       TEST_UNSUPPORTED(".short 0xfb89,0x780f  @ smull r7, r8, r9, pc");
-       TEST_UNSUPPORTED(".short 0xfb89,0x780d  @ smull r7, r8, r9, sp");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb89f80a) "   @ smull pc, r8, r9, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb89d80a) "   @ smull sp, r8, r9, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb897f0a) "   @ smull r7, pc, r9, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb897d0a) "   @ smull r7, sp, r9, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb8f780a) "   @ smull r7, r8, pc, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb8d780a) "   @ smull r7, r8, sp, r10");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb89780f) "   @ smull r7, r8, r9, pc");
+       TEST_UNSUPPORTED(__inst_thumb32(0xfb89780d) "   @ smull r7, r8, r9, sp");
 
        TEST_RR(   "umull       r0, r1, r",2, VAL1,", r",3, VAL2,"")
        TEST_RR(   "umull       r7, r8, r",9, VAL2,", r",10, VAL1,"")
@@ -1175,8 +1176,8 @@ CONDITION_INSTRUCTIONS(22,
 
        TEST_GROUP("Coprocessor instructions")
 
-       TEST_UNSUPPORTED(".short 0xfc00,0x0000")
-       TEST_UNSUPPORTED(".short 0xffff,0xffff")
+       TEST_UNSUPPORTED(__inst_thumb32(0xfc000000) "")
+       TEST_UNSUPPORTED(__inst_thumb32(0xffffffff) "")
 
        TEST_GROUP("Testing instructions in IT blocks")
 
index c2fd06b4c3894bd5e5a901e70abd5411d00691ae..379639998d5a804266986b2c5f0141a33ebc8ffa 100644 (file)
  *     @ start of inline data...
  *     .ascii "mov r0, r7"     @ text title for test case
  *     .byte   0
- *     .align  2
+ *     .align  2, 0
  *
  *     @ TEST_ARG_REG
  *     .byte   ARG_TYPE_REG
@@ -1333,7 +1333,8 @@ static void test_case_failed(const char *message)
 static unsigned long next_instruction(unsigned long pc)
 {
 #ifdef CONFIG_THUMB2_KERNEL
-       if ((pc & 1) && !is_wide_instruction(*(u16 *)(pc - 1)))
+       if ((pc & 1) &&
+           !is_wide_instruction(__mem_to_opcode_thumb16(*(u16 *)(pc - 1))))
                return pc + 2;
        else
 #endif
@@ -1378,13 +1379,13 @@ static uintptr_t __used kprobes_test_case_start(const char *title, void *stack)
 
        if (test_case_is_thumb) {
                u16 *p = (u16 *)(test_code & ~1);
-               current_instruction = p[0];
+               current_instruction = __mem_to_opcode_thumb16(p[0]);
                if (is_wide_instruction(current_instruction)) {
-                       current_instruction <<= 16;
-                       current_instruction |= p[1];
+                       u16 instr2 = __mem_to_opcode_thumb16(p[1]);
+                       current_instruction = __opcode_thumb32_compose(current_instruction, instr2);
                }
        } else {
-               current_instruction = *(u32 *)test_code;
+               current_instruction = __mem_to_opcode_arm(*(u32 *)test_code);
        }
 
        if (current_title[0] == '.')
index e28a869b1ae4b7be5abfb7d088c3214bfe2caab7..eecc90a0fd912e7a0de0a50c7a707df8864d59a3 100644 (file)
@@ -115,7 +115,7 @@ struct test_arg_end {
        /* multiple strings to be concatenated.  */             \
        ".ascii "#title"                                \n\t"   \
        ".byte  0                                       \n\t"   \
-       ".align 2                                       \n\t"
+       ".align 2, 0                                    \n\t"
 
 #define        TEST_ARG_REG(reg, val)                                  \
        ".byte  "__stringify(ARG_TYPE_REG)"             \n\t"   \
index 6619188619ae1409ef4c75c4909d7c58675c3f08..9495d7f3516fca54012691b642ea22380fc64ffd 100644 (file)
@@ -149,9 +149,9 @@ t32_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi,
        enum probes_insn ret = kprobe_decode_ldmstm(insn, asi, d);
 
        /* Fixup modified instruction to have halfwords in correct order...*/
-       insn = asi->insn[0];
-       ((u16 *)asi->insn)[0] = insn >> 16;
-       ((u16 *)asi->insn)[1] = insn & 0xffff;
+       insn = __mem_to_opcode_arm(asi->insn[0]);
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16);
+       ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff);
 
        return ret;
 }
@@ -516,7 +516,7 @@ t16_decode_hiregs(probes_opcode_t insn, struct arch_probes_insn *asi,
 {
        insn &= ~0x00ff;
        insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
-       ((u16 *)asi->insn)[0] = insn;
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn);
        asi->insn_handler = t16_emulate_hiregs;
        return INSN_GOOD;
 }
@@ -547,8 +547,10 @@ t16_decode_push(probes_opcode_t insn, struct arch_probes_insn *asi,
         * and call it with R9=SP and LR in the register list represented
         * by R8.
         */
-       ((u16 *)asi->insn)[0] = 0xe929;         /* 1st half STMDB R9!,{} */
-       ((u16 *)asi->insn)[1] = insn & 0x1ff;   /* 2nd half (register list) */
+       /* 1st half STMDB R9!,{} */
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929);
+       /* 2nd half (register list) */
+       ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
        asi->insn_handler = t16_emulate_push;
        return INSN_GOOD;
 }
@@ -600,8 +602,10 @@ t16_decode_pop(probes_opcode_t insn, struct arch_probes_insn *asi,
         * and call it with R9=SP and PC in the register list represented
         * by R8.
         */
-       ((u16 *)asi->insn)[0] = 0xe8b9;         /* 1st half LDMIA R9!,{} */
-       ((u16 *)asi->insn)[1] = insn & 0x1ff;   /* 2nd half (register list) */
+       /* 1st half LDMIA R9!,{} */
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9);
+       /* 2nd half (register list) */
+       ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
        asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
                                         : t16_emulate_pop_nopc;
        return INSN_GOOD;
index 8795f9f819d5820285bd5e737456ddb7a3f802bb..6d644202c8dcb164248f6fedb934eb5b3c0d58e4 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/stop_machine.h>
 #include <linux/stringify.h>
 #include <asm/traps.h>
+#include <asm/opcodes.h>
 #include <asm/cacheflush.h>
 #include <linux/percpu.h>
 #include <linux/bug.h>
@@ -67,10 +68,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 #ifdef CONFIG_THUMB2_KERNEL
        thumb = true;
        addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
-       insn = ((u16 *)addr)[0];
+       insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
        if (is_wide_instruction(insn)) {
-               insn <<= 16;
-               insn |= ((u16 *)addr)[1];
+               u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
+               insn = __opcode_thumb32_compose(insn, inst2);
                decode_insn = thumb32_probes_decode_insn;
                actions = kprobes_t32_actions;
        } else {
@@ -81,7 +82,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
        thumb = false;
        if (addr & 0x3)
                return -EINVAL;
-       insn = *p->addr;
+       insn = __mem_to_opcode_arm(*p->addr);
        decode_insn = arm_probes_decode_insn;
        actions = kprobes_arm_actions;
 #endif
index 679cf4d18c08bfa99fec75cadd24027c192a1706..fc72086362842436381d0595c1afea648eb7b830 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <asm/thread_notify.h>
+#include <asm/cputype.h>
 
 static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
 {
@@ -80,6 +81,9 @@ static int __init pj4_cp0_init(void)
 {
        u32 cp_access;
 
+       if (!cpu_is_pj4())
+               return 0;
+
        cp_access = pj4_cp_access_read() & ~0xf;
        pj4_cp_access_write(cp_access);
 
index b41873f33e699b780362eabc5793db34de0dc397..a8ab540d7e73a0b34533e8b22979fffa221501a8 100644 (file)
@@ -202,13 +202,14 @@ prepare_emulated_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
 #ifdef CONFIG_THUMB2_KERNEL
        if (thumb) {
                u16 *thumb_insn = (u16 *)asi->insn;
-               thumb_insn[1] = 0x4770; /* Thumb bx lr */
-               thumb_insn[2] = 0x4770; /* Thumb bx lr */
+               /* Thumb bx lr */
+               thumb_insn[1] = __opcode_to_mem_thumb16(0x4770);
+               thumb_insn[2] = __opcode_to_mem_thumb16(0x4770);
                return insn;
        }
-       asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
+       asi->insn[1] = __opcode_to_mem_arm(0xe12fff1e); /* ARM bx lr */
 #else
-       asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
+       asi->insn[1] = __opcode_to_mem_arm(0xe1a0f00e); /* mov pc, lr */
 #endif
        /* Make an ARM instruction unconditional */
        if (insn < 0xe0000000)
@@ -228,12 +229,12 @@ set_emulated_insn(probes_opcode_t insn, struct arch_probes_insn *asi,
        if (thumb) {
                u16 *ip = (u16 *)asi->insn;
                if (is_wide_instruction(insn))
-                       *ip++ = insn >> 16;
-               *ip++ = insn;
+                       *ip++ = __opcode_to_mem_thumb16(insn >> 16);
+               *ip++ = __opcode_to_mem_thumb16(insn);
                return;
        }
 #endif
-       asi->insn[0] = insn;
+       asi->insn[0] = __opcode_to_mem_arm(insn);
 }
 
 /*
index 806d287e3e5383972bf08b9d389fbf8e5e8573d3..81ef686a91ca18dccfbb85cd75b5a2aa6c303ecc 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/processor.h>
 #include <asm/thread_notify.h>
 #include <asm/stacktrace.h>
+#include <asm/system_misc.h>
 #include <asm/mach/time.h>
 #include <asm/tls.h>
 
@@ -99,7 +100,7 @@ void soft_restart(unsigned long addr)
        u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
 
        /* Disable interrupts first */
-       local_irq_disable();
+       raw_local_irq_disable();
        local_fiq_disable();
 
        /* Disable the L2 if we're the last man standing. */
index 172ee18ff1247b3159c355d899bf09bfc06579c8..abd2fc0677364a529d4c12479890f6b97770b41a 100644 (file)
@@ -445,6 +445,7 @@ die_sig:
        if (user_debug & UDBG_UNDEFINED) {
                printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
                        current->comm, task_pid_nr(current), pc);
+               __show_regs(regs);
                dump_instr(KERN_INFO, regs);
        }
 #endif
index 14d499688736b3c816c082f3216aa12503005b64..788495d35cf9ea6d920a69a8fd6cc2a7b46fd7c0 100644 (file)
@@ -137,11 +137,16 @@ static void dcscb_power_down(void)
                v7_exit_coherency_flush(all);
 
                /*
-                * This is a harmless no-op.  On platforms with a real
-                * outer cache this might either be needed or not,
-                * depending on where the outer cache sits.
+                * A full outer cache flush could be needed at this point
+                * on platforms with such a cache, depending on where the
+                * outer cache sits. In some cases the notion of a "last
+                * cluster standing" would need to be implemented if the
+                * outer cache is shared across clusters. In any case, when
+                * the outer cache needs flushing, there is no concurrent
+                * access to the cache controller to worry about and no
+                * special locking besides what is already provided by the
+                * MCPM state machinery is needed.
                 */
-               outer_flush_all();
 
                /*
                 * Disable cluster-level coherency by masking
index ef69152f9b52e473796829545bf5cf5d1e83926a..c508f41a43bcb9f2f97c9d4afaf131ef77b4dc44 100644 (file)
@@ -120,34 +120,51 @@ static const struct prot_bits pte_bits[] = {
 };
 
 static const struct prot_bits section_bits[] = {
-#ifndef CONFIG_ARM_LPAE
-       /* These are approximate */
+#ifdef CONFIG_ARM_LPAE
+       {
+               .mask   = PMD_SECT_USER,
+               .val    = PMD_SECT_USER,
+               .set    = "USR",
+       }, {
+               .mask   = PMD_SECT_RDONLY,
+               .val    = PMD_SECT_RDONLY,
+               .set    = "ro",
+               .clear  = "RW",
+#elif __LINUX_ARM_ARCH__ >= 6
        {
-               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
-               .val    = 0,
+               .mask   = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .val    = PMD_SECT_APX | PMD_SECT_AP_WRITE,
                .set    = "    ro",
        }, {
-               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .mask   = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
                .val    = PMD_SECT_AP_WRITE,
                .set    = "    RW",
        }, {
-               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .mask   = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
                .val    = PMD_SECT_AP_READ,
                .set    = "USR ro",
        }, {
-               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .mask   = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
                .val    = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
                .set    = "USR RW",
-#else
+#else /* ARMv4/ARMv5  */
+       /* These are approximate */
        {
-               .mask   = PMD_SECT_USER,
-               .val    = PMD_SECT_USER,
-               .set    = "USR",
+               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .val    = 0,
+               .set    = "    ro",
        }, {
-               .mask   = PMD_SECT_RDONLY,
-               .val    = PMD_SECT_RDONLY,
-               .set    = "ro",
-               .clear  = "RW",
+               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .val    = PMD_SECT_AP_WRITE,
+               .set    = "    RW",
+       }, {
+               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .val    = PMD_SECT_AP_READ,
+               .set    = "USR ro",
+       }, {
+               .mask   = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .val    = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
+               .set    = "USR RW",
 #endif
        }, {
                .mask   = PMD_SECT_XN,
index 46e17492fd1f3ecccda92c660474c805707b788b..f0759e70fb865b4d24370a46b58b37564e49f871 100644 (file)
@@ -8,9 +8,12 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/init.h>
+#include <linux/linkage.h>
 #include <asm/thread_info.h>
 #include <asm/vfpmacros.h>
-#include "../kernel/entry-header.S"
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
 
 @ VFP entry point.
 @
 @  IRQs disabled.
 @
 ENTRY(do_vfp)
-#ifdef CONFIG_PREEMPT_COUNT
-       ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
-       add     r11, r4, #1             @ increment it
-       str     r11, [r10, #TI_PREEMPT]
-#endif
+       inc_preempt_count r10, r4
        enable_irq
        ldr     r4, .LCvfp
        ldr     r11, [r10, #TI_CPU]     @ CPU number
@@ -35,12 +34,7 @@ ENTRY(do_vfp)
 ENDPROC(do_vfp)
 
 ENTRY(vfp_null_entry)
-#ifdef CONFIG_PREEMPT_COUNT
-       get_thread_info r10
-       ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
-       sub     r11, r4, #1             @ decrement it
-       str     r11, [r10, #TI_PREEMPT]
-#endif
+       dec_preempt_count_ti r10, r4
        mov     pc, lr
 ENDPROC(vfp_null_entry)
 
@@ -53,12 +47,7 @@ ENDPROC(vfp_null_entry)
 
        __INIT
 ENTRY(vfp_testing_entry)
-#ifdef CONFIG_PREEMPT_COUNT
-       get_thread_info r10
-       ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
-       sub     r11, r4, #1             @ decrement it
-       str     r11, [r10, #TI_PREEMPT]
-#endif
+       dec_preempt_count_ti r10, r4
        ldr     r0, VFP_arch_address
        str     r0, [r0]                @ set to non-zero value
        mov     pc, r9                  @ we have handled the fault
index 3e5d3115a2a6847ee41fc3d6d23ffde8fa0a7917..be807625ed8c23dead72282fc9ff5b9babbfe1f6 100644 (file)
  * r10 points at the start of the private FP workspace in the thread structure
  * sp points to a struct pt_regs (as defined in include/asm/proc/ptrace.h)
  */
+#include <linux/init.h>
+#include <linux/linkage.h>
 #include <asm/thread_info.h>
 #include <asm/vfpmacros.h>
 #include <linux/kern_levels.h>
-#include "../kernel/entry-header.S"
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
 
        .macro  DBGSTR, str
 #ifdef DEBUG
@@ -179,12 +182,7 @@ vfp_hw_state_valid:
                                        @ else it's one 32-bit instruction, so
                                        @ always subtract 4 from the following
                                        @ instruction address.
-#ifdef CONFIG_PREEMPT_COUNT
-       get_thread_info r10
-       ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
-       sub     r11, r4, #1             @ decrement it
-       str     r11, [r10, #TI_PREEMPT]
-#endif
+       dec_preempt_count_ti r10, r4
        mov     pc, r9                  @ we think we have handled things
 
 
@@ -203,12 +201,7 @@ look_for_VFP_exceptions:
        @ not recognised by VFP
 
        DBGSTR  "not VFP"
-#ifdef CONFIG_PREEMPT_COUNT
-       get_thread_info r10
-       ldr     r4, [r10, #TI_PREEMPT]  @ get preempt count
-       sub     r11, r4, #1             @ decrement it
-       str     r11, [r10, #TI_PREEMPT]
-#endif
+       dec_preempt_count_ti r10, r4
        mov     pc, lr
 
 process_exception:
index 1325c3bc58e11934222dd7c493f1632535ab242d..12c3afee0f6fabcd664c12e7ddca1bf367bc401e 100644 (file)
@@ -45,6 +45,7 @@ config IA64
        select HAVE_MOD_ARCH_SPECIFIC
        select MODULES_USE_ELF_RELA
        select ARCH_USE_CMPXCHG_LOCKREF
+       select HAVE_ARCH_AUDITSYSCALL
        default y
        help
          The Itanium Processor Family is Intel's 64-bit successor to
index 79b9bcdfe4982ab433a89a5851b1551d47d7be7e..9ae08541e30d0c5ec9ed2d94badd95c6afbf289a 100644 (file)
@@ -1,38 +1,38 @@
 config MICROBLAZE
        def_bool y
        select ARCH_MIGHT_HAVE_PC_PARPORT
-       select HAVE_MEMBLOCK
-       select HAVE_MEMBLOCK_NODE_MAP
-       select HAVE_FUNCTION_TRACER
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
-       select HAVE_FUNCTION_GRAPH_TRACER
-       select HAVE_DYNAMIC_FTRACE
-       select HAVE_FTRACE_MCOUNT_RECORD
-       select ARCH_WANT_OPTIONAL_GPIOLIB
-       select HAVE_OPROFILE
-       select HAVE_ARCH_KGDB
-       select HAVE_DMA_ATTRS
-       select HAVE_DMA_API_DEBUG
-       select TRACING_SUPPORT
-       select OF
-       select OF_EARLY_FLATTREE
        select ARCH_WANT_IPC_PARSE_VERSION
-       select HAVE_DEBUG_KMEMLEAK
-       select IRQ_DOMAIN
-       select VIRT_TO_BUS
+       select ARCH_WANT_OPTIONAL_GPIOLIB
+       select BUILDTIME_EXTABLE_SORT
+       select CLKSRC_OF
+       select CLONE_BACKWARDS3
+       select COMMON_CLK
+       select GENERIC_ATOMIC64
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_CPU_DEVICES
+       select GENERIC_IDLE_POLL_SETUP
        select GENERIC_IRQ_PROBE
        select GENERIC_IRQ_SHOW
        select GENERIC_PCI_IOMAP
-       select GENERIC_CPU_DEVICES
-       select GENERIC_ATOMIC64
-       select GENERIC_CLOCKEVENTS
-       select COMMON_CLK
        select GENERIC_SCHED_CLOCK
-       select GENERIC_IDLE_POLL_SETUP
+       select HAVE_ARCH_KGDB
+       select HAVE_DEBUG_KMEMLEAK
+       select HAVE_DMA_API_DEBUG
+       select HAVE_DMA_ATTRS
+       select HAVE_DYNAMIC_FTRACE
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+       select HAVE_FUNCTION_TRACER
+       select HAVE_MEMBLOCK
+       select HAVE_MEMBLOCK_NODE_MAP
+       select HAVE_OPROFILE
+       select IRQ_DOMAIN
        select MODULES_USE_ELF_RELA
-       select CLONE_BACKWARDS3
-       select CLKSRC_OF
-       select BUILDTIME_EXTABLE_SORT
+       select OF
+       select OF_EARLY_FLATTREE
+       select TRACING_SUPPORT
+       select VIRT_TO_BUS
 
 config SWAP
        def_bool n
@@ -74,7 +74,7 @@ source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
 
-source "arch/microblaze/platform/Kconfig.platform"
+source "arch/microblaze/Kconfig.platform"
 
 menu "Processor type and features"
 
diff --git a/arch/microblaze/Kconfig.platform b/arch/microblaze/Kconfig.platform
new file mode 100644 (file)
index 0000000..1b3d8c8
--- /dev/null
@@ -0,0 +1,69 @@
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.txt.
+#
+# Platform selection Kconfig menu for MicroBlaze targets
+#
+
+menu "Platform options"
+
+config OPT_LIB_FUNCTION
+       bool "Optimalized lib function"
+       default y
+       help
+         Allows turn on optimalized library function (memcpy and memmove).
+         They are optimized by using word alignment. This will work
+         fine if both source and destination are aligned on the same
+         boundary. However, if they are aligned on different boundaries
+         shifts will be necessary. This might result in bad performance
+         on MicroBlaze systems without a barrel shifter.
+
+config OPT_LIB_ASM
+       bool "Optimalized lib function ASM"
+       depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
+       default n
+       help
+         Allows turn on optimalized library function (memcpy and memmove).
+         Function are written in asm code.
+
+# Definitions for MICROBLAZE0
+comment "Definitions for MICROBLAZE0"
+
+config KERNEL_BASE_ADDR
+       hex "Physical address where Linux Kernel is"
+       default "0x90000000"
+       help
+         BASE Address for kernel
+
+config XILINX_MICROBLAZE0_FAMILY
+       string "Targeted FPGA family"
+       default "virtex5"
+
+config XILINX_MICROBLAZE0_USE_MSR_INSTR
+       int "USE_MSR_INSTR range (0:1)"
+       default 0
+
+config XILINX_MICROBLAZE0_USE_PCMP_INSTR
+       int "USE_PCMP_INSTR range (0:1)"
+       default 0
+
+config XILINX_MICROBLAZE0_USE_BARREL
+       int "USE_BARREL range (0:1)"
+       default 0
+
+config XILINX_MICROBLAZE0_USE_DIV
+       int "USE_DIV range (0:1)"
+       default 0
+
+config XILINX_MICROBLAZE0_USE_HW_MUL
+       int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)"
+       default 0
+
+config XILINX_MICROBLAZE0_USE_FPU
+       int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)"
+       default 0
+
+config XILINX_MICROBLAZE0_HW_VER
+       string "Core version number"
+       default 7.10.d
+
+endmenu
index a69eaf2ab1301466c662e27f37837c86d8193d61..740f2b82a182a9761b79028d38bde746ba7131c3 100644 (file)
@@ -48,7 +48,6 @@ head-y := arch/microblaze/kernel/head.o
 libs-y += arch/microblaze/lib/
 core-y += arch/microblaze/kernel/
 core-y += arch/microblaze/mm/
-core-y += arch/microblaze/platform/
 core-$(CONFIG_PCI) += arch/microblaze/pci/
 
 drivers-$(CONFIG_OPROFILE) += arch/microblaze/oprofile/
deleted file mode 120000 (symlink)
index 7cb657892f21229d0068d2e7416e91e551264f77..0000000000000000000000000000000000000000
+++ /dev/null
@@ -1 +0,0 @@
-../../platform/generic/system.dts
\ No newline at end of file
new file mode 100644 (file)
index 0000000000000000000000000000000000000000..b620da23febbfda27eb5eaddd89937c6618b321f
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * Device Tree Generator version: 1.1
+ *
+ * (C) Copyright 2007-2008 Xilinx, Inc.
+ * (C) Copyright 2007-2009 Michal Simek
+ *
+ * Michal SIMEK <monstr@monstr.eu>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ *
+ * CAUTION: This file is automatically generated by libgen.
+ * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6
+ *
+ * XPS project directory: Xilinx-ML505-ll_temac-sgdma-MMU-FDT-edk101
+ */
+
+/dts-v1/;
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       compatible = "xlnx,microblaze";
+       hard-reset-gpios = <&LEDs_8Bit 2 1>;
+       model = "testing";
+       DDR2_SDRAM: memory@90000000 {
+               device_type = "memory";
+               reg = < 0x90000000 0x10000000 >;
+       } ;
+       aliases {
+               ethernet0 = &Hard_Ethernet_MAC;
+               serial0 = &RS232_Uart_1;
+       } ;
+       chosen {
+               bootargs = "console=ttyUL0,115200 highres=on";
+               linux,stdout-path = "/plb@0/serial@84000000";
+       } ;
+       cpus {
+               #address-cells = <1>;
+               #cpus = <0x1>;
+               #size-cells = <0>;
+               microblaze_0: cpu@0 {
+                       clock-frequency = <125000000>;
+                       compatible = "xlnx,microblaze-7.10.d";
+                       d-cache-baseaddr = <0x90000000>;
+                       d-cache-highaddr = <0x9fffffff>;
+                       d-cache-line-size = <0x10>;
+                       d-cache-size = <0x2000>;
+                       device_type = "cpu";
+                       i-cache-baseaddr = <0x90000000>;
+                       i-cache-highaddr = <0x9fffffff>;
+                       i-cache-line-size = <0x10>;
+                       i-cache-size = <0x2000>;
+                       model = "microblaze,7.10.d";
+                       reg = <0>;
+                       timebase-frequency = <125000000>;
+                       xlnx,addr-tag-bits = <0xf>;
+                       xlnx,allow-dcache-wr = <0x1>;
+                       xlnx,allow-icache-wr = <0x1>;
+                       xlnx,area-optimized = <0x0>;
+                       xlnx,cache-byte-size = <0x2000>;
+                       xlnx,d-lmb = <0x1>;
+                       xlnx,d-opb = <0x0>;
+                       xlnx,d-plb = <0x1>;
+                       xlnx,data-size = <0x20>;
+                       xlnx,dcache-addr-tag = <0xf>;
+                       xlnx,dcache-always-used = <0x1>;
+                       xlnx,dcache-byte-size = <0x2000>;
+                       xlnx,dcache-line-len = <0x4>;
+                       xlnx,dcache-use-fsl = <0x1>;
+                       xlnx,debug-enabled = <0x1>;
+                       xlnx,div-zero-exception = <0x1>;
+                       xlnx,dopb-bus-exception = <0x0>;
+                       xlnx,dynamic-bus-sizing = <0x1>;
+                       xlnx,edge-is-positive = <0x1>;
+                       xlnx,family = "virtex5";
+                       xlnx,endianness = <0x1>;
+                       xlnx,fpu-exception = <0x1>;
+                       xlnx,fsl-data-size = <0x20>;
+                       xlnx,fsl-exception = <0x0>;
+                       xlnx,fsl-links = <0x0>;
+                       xlnx,i-lmb = <0x1>;
+                       xlnx,i-opb = <0x0>;
+                       xlnx,i-plb = <0x1>;
+                       xlnx,icache-always-used = <0x1>;
+                       xlnx,icache-line-len = <0x4>;
+                       xlnx,icache-use-fsl = <0x1>;
+                       xlnx,ill-opcode-exception = <0x1>;
+                       xlnx,instance = "microblaze_0";
+                       xlnx,interconnect = <0x1>;
+                       xlnx,interrupt-is-edge = <0x0>;
+                       xlnx,iopb-bus-exception = <0x0>;
+                       xlnx,mmu-dtlb-size = <0x4>;
+                       xlnx,mmu-itlb-size = <0x2>;
+                       xlnx,mmu-tlb-access = <0x3>;
+                       xlnx,mmu-zones = <0x10>;
+                       xlnx,number-of-pc-brk = <0x1>;
+                       xlnx,number-of-rd-addr-brk = <0x0>;
+                       xlnx,number-of-wr-addr-brk = <0x0>;
+                       xlnx,opcode-0x0-illegal = <0x1>;
+                       xlnx,pvr = <0x2>;
+                       xlnx,pvr-user1 = <0x0>;
+                       xlnx,pvr-user2 = <0x0>;
+                       xlnx,reset-msr = <0x0>;
+                       xlnx,sco = <0x0>;
+                       xlnx,unaligned-exceptions = <0x1>;
+                       xlnx,use-barrel = <0x1>;
+                       xlnx,use-dcache = <0x1>;
+                       xlnx,use-div = <0x1>;
+                       xlnx,use-ext-brk = <0x1>;
+                       xlnx,use-ext-nm-brk = <0x1>;
+                       xlnx,use-extended-fsl-instr = <0x0>;
+                       xlnx,use-fpu = <0x2>;
+                       xlnx,use-hw-mul = <0x2>;
+                       xlnx,use-icache = <0x1>;
+                       xlnx,use-interrupt = <0x1>;
+                       xlnx,use-mmu = <0x3>;
+                       xlnx,use-msr-instr = <0x1>;
+                       xlnx,use-pcmp-instr = <0x1>;
+               } ;
+       } ;
+       mb_plb: plb@0 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus";
+               ranges ;
+               FLASH: flash@a0000000 {
+                       bank-width = <2>;
+                       compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
+                       reg = < 0xa0000000 0x2000000 >;
+                       xlnx,family = "virtex5";
+                       xlnx,include-datawidth-matching-0 = <0x1>;
+                       xlnx,include-datawidth-matching-1 = <0x0>;
+                       xlnx,include-datawidth-matching-2 = <0x0>;
+                       xlnx,include-datawidth-matching-3 = <0x0>;
+                       xlnx,include-negedge-ioregs = <0x0>;
+                       xlnx,include-plb-ipif = <0x1>;
+                       xlnx,include-wrbuf = <0x1>;
+                       xlnx,max-mem-width = <0x10>;
+                       xlnx,mch-native-dwidth = <0x20>;
+                       xlnx,mch-plb-clk-period-ps = <0x1f40>;
+                       xlnx,mch-splb-awidth = <0x20>;
+                       xlnx,mch0-accessbuf-depth = <0x10>;
+                       xlnx,mch0-protocol = <0x0>;
+                       xlnx,mch0-rddatabuf-depth = <0x10>;
+                       xlnx,mch1-accessbuf-depth = <0x10>;
+                       xlnx,mch1-protocol = <0x0>;
+                       xlnx,mch1-rddatabuf-depth = <0x10>;
+                       xlnx,mch2-accessbuf-depth = <0x10>;
+                       xlnx,mch2-protocol = <0x0>;
+                       xlnx,mch2-rddatabuf-depth = <0x10>;
+                       xlnx,mch3-accessbuf-depth = <0x10>;
+                       xlnx,mch3-protocol = <0x0>;
+                       xlnx,mch3-rddatabuf-depth = <0x10>;
+                       xlnx,mem0-width = <0x10>;
+                       xlnx,mem1-width = <0x20>;
+                       xlnx,mem2-width = <0x20>;
+                       xlnx,mem3-width = <0x20>;
+                       xlnx,num-banks-mem = <0x1>;
+                       xlnx,num-channels = <0x0>;
+                       xlnx,priority-mode = <0x0>;
+                       xlnx,synch-mem-0 = <0x0>;
+                       xlnx,synch-mem-1 = <0x0>;
+                       xlnx,synch-mem-2 = <0x0>;
+                       xlnx,synch-mem-3 = <0x0>;
+                       xlnx,synch-pipedelay-0 = <0x2>;
+                       xlnx,synch-pipedelay-1 = <0x2>;
+                       xlnx,synch-pipedelay-2 = <0x2>;
+                       xlnx,synch-pipedelay-3 = <0x2>;
+                       xlnx,tavdv-ps-mem-0 = <0x1adb0>;
+                       xlnx,tavdv-ps-mem-1 = <0x3a98>;
+                       xlnx,tavdv-ps-mem-2 = <0x3a98>;
+                       xlnx,tavdv-ps-mem-3 = <0x3a98>;
+                       xlnx,tcedv-ps-mem-0 = <0x1adb0>;
+                       xlnx,tcedv-ps-mem-1 = <0x3a98>;
+                       xlnx,tcedv-ps-mem-2 = <0x3a98>;
+                       xlnx,tcedv-ps-mem-3 = <0x3a98>;
+                       xlnx,thzce-ps-mem-0 = <0x88b8>;
+                       xlnx,thzce-ps-mem-1 = <0x1b58>;
+                       xlnx,thzce-ps-mem-2 = <0x1b58>;
+                       xlnx,thzce-ps-mem-3 = <0x1b58>;
+                       xlnx,thzoe-ps-mem-0 = <0x1b58>;
+                       xlnx,thzoe-ps-mem-1 = <0x1b58>;
+                       xlnx,thzoe-ps-mem-2 = <0x1b58>;
+                       xlnx,thzoe-ps-mem-3 = <0x1b58>;
+                       xlnx,tlzwe-ps-mem-0 = <0x88b8>;
+                       xlnx,tlzwe-ps-mem-1 = <0x0>;
+                       xlnx,tlzwe-ps-mem-2 = <0x0>;
+                       xlnx,tlzwe-ps-mem-3 = <0x0>;
+                       xlnx,twc-ps-mem-0 = <0x2af8>;
+                       xlnx,twc-ps-mem-1 = <0x3a98>;
+                       xlnx,twc-ps-mem-2 = <0x3a98>;
+                       xlnx,twc-ps-mem-3 = <0x3a98>;
+                       xlnx,twp-ps-mem-0 = <0x11170>;
+                       xlnx,twp-ps-mem-1 = <0x2ee0>;
+                       xlnx,twp-ps-mem-2 = <0x2ee0>;
+                       xlnx,twp-ps-mem-3 = <0x2ee0>;
+                       xlnx,xcl0-linesize = <0x4>;
+                       xlnx,xcl0-writexfer = <0x1>;
+                       xlnx,xcl1-linesize = <0x4>;
+                       xlnx,xcl1-writexfer = <0x1>;
+                       xlnx,xcl2-linesize = <0x4>;
+                       xlnx,xcl2-writexfer = <0x1>;
+                       xlnx,xcl3-linesize = <0x4>;
+                       xlnx,xcl3-writexfer = <0x1>;
+               } ;
+               Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "xlnx,compound";
+                       ranges ;
+                       ethernet@81c00000 {
+                               compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a";
+                               interrupt-parent = <&xps_intc_0>;
+                               interrupts = < 5 2 >;
+                               llink-connected = <&PIM3>;
+                               local-mac-address = [ 00 0a 35 00 00 00 ];
+                               reg = < 0x81c00000 0x40 >;
+                               xlnx,bus2core-clk-ratio = <0x1>;
+                               xlnx,phy-type = <0x1>;
+                               xlnx,phyaddr = <0x1>;
+                               xlnx,rxcsum = <0x0>;
+                               xlnx,rxfifo = <0x1000>;
+                               xlnx,temac-type = <0x0>;
+                               xlnx,txcsum = <0x0>;
+                               xlnx,txfifo = <0x1000>;
+                       } ;
+               } ;
+               IIC_EEPROM: i2c@81600000 {
+                       compatible = "xlnx,xps-iic-2.00.a";
+                       interrupt-parent = <&xps_intc_0>;
+                       interrupts = < 6 2 >;
+                       reg = < 0x81600000 0x10000 >;
+                       xlnx,clk-freq = <0x7735940>;
+                       xlnx,family = "virtex5";
+                       xlnx,gpo-width = <0x1>;
+                       xlnx,iic-freq = <0x186a0>;
+                       xlnx,scl-inertial-delay = <0x0>;
+                       xlnx,sda-inertial-delay = <0x0>;
+                       xlnx,ten-bit-adr = <0x0>;
+               } ;
+               LEDs_8Bit: gpio@81400000 {
+                       compatible = "xlnx,xps-gpio-1.00.a";
+                       interrupt-parent = <&xps_intc_0>;
+                       interrupts = < 7 2 >;
+                       reg = < 0x81400000 0x10000 >;
+                       xlnx,all-inputs = <0x0>;
+                       xlnx,all-inputs-2 = <0x0>;
+                       xlnx,dout-default = <0x0>;
+                       xlnx,dout-default-2 = <0x0>;
+                       xlnx,family = "virtex5";
+                       xlnx,gpio-width = <0x8>;
+                       xlnx,interrupt-present = <0x1>;
+                       xlnx,is-bidir = <0x1>;
+                       xlnx,is-bidir-2 = <0x1>;
+                       xlnx,is-dual = <0x0>;
+                       xlnx,tri-default = <0xffffffff>;
+                       xlnx,tri-default-2 = <0xffffffff>;
+                       #gpio-cells = <2>;
+                       gpio-controller;
+               } ;
+
+               gpio-leds {
+                       compatible = "gpio-leds";
+
+                       heartbeat {
+                               label = "Heartbeat";
+                               gpios = <&LEDs_8Bit 4 1>;
+                               linux,default-trigger = "heartbeat";
+                       };
+
+                       yellow {
+                               label = "Yellow";
+                               gpios = <&LEDs_8Bit 5 1>;
+                       };
+
+                       red {
+                               label = "Red";
+                               gpios = <&LEDs_8Bit 6 1>;
+                       };
+
+                       green {
+                               label = "Green";
+                               gpios = <&LEDs_8Bit 7 1>;
+                       };
+               } ;
+               RS232_Uart_1: serial@84000000 {
+                       clock-frequency = <125000000>;
+                       compatible = "xlnx,xps-uartlite-1.00.a";
+                       current-speed = <115200>;
+                       device_type = "serial";
+                       interrupt-parent = <&xps_intc_0>;
+                       interrupts = < 8 0 >;
+                       port-number = <0>;
+                       reg = < 0x84000000 0x10000 >;
+                       xlnx,baudrate = <0x1c200>;
+                       xlnx,data-bits = <0x8>;
+                       xlnx,family = "virtex5";
+                       xlnx,odd-parity = <0x0>;
+                       xlnx,use-parity = <0x0>;
+               } ;
+               SysACE_CompactFlash: sysace@83600000 {
+                       compatible = "xlnx,xps-sysace-1.00.a";
+                       interrupt-parent = <&xps_intc_0>;
+                       interrupts = < 4 2 >;
+                       reg = < 0x83600000 0x10000 >;
+                       xlnx,family = "virtex5";
+                       xlnx,mem-width = <0x10>;
+               } ;
+               debug_module: debug@84400000 {
+                       compatible = "xlnx,mdm-1.00.d";
+                       reg = < 0x84400000 0x10000 >;
+                       xlnx,family = "virtex5";
+                       xlnx,interconnect = <0x1>;
+                       xlnx,jtag-chain = <0x2>;
+                       xlnx,mb-dbg-ports = <0x1>;
+                       xlnx,uart-width = <0x8>;
+                       xlnx,use-uart = <0x1>;
+                       xlnx,write-fsl-ports = <0x0>;
+               } ;
+               mpmc@90000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "xlnx,mpmc-4.02.a";
+                       ranges ;
+                       PIM3: sdma@84600180 {
+                               compatible = "xlnx,ll-dma-1.00.a";
+                               interrupt-parent = <&xps_intc_0>;
+                               interrupts = < 2 2 1 2 >;
+                               reg = < 0x84600180 0x80 >;
+                       } ;
+               } ;
+               xps_intc_0: interrupt-controller@81800000 {
+                       #interrupt-cells = <0x2>;
+                       compatible = "xlnx,xps-intc-1.00.a";
+                       interrupt-controller ;
+                       reg = < 0x81800000 0x10000 >;
+                       xlnx,kind-of-intr = <0x100>;
+                       xlnx,num-intr-inputs = <0x9>;
+               } ;
+               xps_timer_1: timer@83c00000 {
+                       compatible = "xlnx,xps-timer-1.00.a";
+                       interrupt-parent = <&xps_intc_0>;
+                       interrupts = < 3 2 >;
+                       reg = < 0x83c00000 0x10000 >;
+                       xlnx,count-width = <0x20>;
+                       xlnx,family = "virtex5";
+                       xlnx,gen0-assert = <0x1>;
+                       xlnx,gen1-assert = <0x1>;
+                       xlnx,one-timer-only = <0x0>;
+                       xlnx,trig0-assert = <0x1>;
+                       xlnx,trig1-assert = <0x1>;
+               } ;
+       } ;
+}  ;
index 3fbb7f1db3bcdcfbe867c9e6ab7ce3189d0a74c0..1e4c3329f62e5b7be40c1cfe79a565a8d66b77fb 100644 (file)
@@ -15,7 +15,6 @@
 #include <asm/page.h>
 #include <linux/types.h>
 #include <linux/mm.h>          /* Get struct page {...} */
-#include <asm-generic/iomap.h>
 
 #ifndef CONFIG_PCI
 #define _IO_BASE       0
 #define _IO_BASE       isa_io_base
 #define _ISA_MEM_BASE  isa_mem_base
 #define PCI_DRAM_OFFSET        pci_dram_offset
-#endif
+struct pci_dev;
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+#define pci_iounmap pci_iounmap
 
 extern unsigned long isa_io_base;
-extern unsigned long pci_io_base;
 extern unsigned long pci_dram_offset;
-
 extern resource_size_t isa_mem_base;
+#endif
 
+#define PCI_IOBASE     ((void __iomem *)_IO_BASE)
 #define IO_SPACE_LIMIT (0xFFFFFFFF)
 
-/* the following is needed to support PCI with some drivers */
-
-#define mmiowb()
-
-static inline unsigned char __raw_readb(const volatile void __iomem *addr)
-{
-       return *(volatile unsigned char __force *)addr;
-}
-static inline unsigned short __raw_readw(const volatile void __iomem *addr)
-{
-       return *(volatile unsigned short __force *)addr;
-}
-static inline unsigned int __raw_readl(const volatile void __iomem *addr)
-{
-       return *(volatile unsigned int __force *)addr;
-}
-static inline unsigned long __raw_readq(const volatile void __iomem *addr)
-{
-       return *(volatile unsigned long __force *)addr;
-}
-static inline void __raw_writeb(unsigned char v, volatile void __iomem *addr)
-{
-       *(volatile unsigned char __force *)addr = v;
-}
-static inline void __raw_writew(unsigned short v, volatile void __iomem *addr)
-{
-       *(volatile unsigned short __force *)addr = v;
-}
-static inline void __raw_writel(unsigned int v, volatile void __iomem *addr)
-{
-       *(volatile unsigned int __force *)addr = v;
-}
-static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
-{
-       *(volatile unsigned long __force *)addr = v;
-}
-
-/*
- * read (readb, readw, readl, readq) and write (writeb, writew,
- * writel, writeq) accessors are for PCI and thus little endian.
- * Linux 2.4 for Microblaze had this wrong.
- */
-static inline unsigned char readb(const volatile void __iomem *addr)
-{
-       return *(volatile unsigned char __force *)addr;
-}
-static inline unsigned short readw(const volatile void __iomem *addr)
-{
-       return le16_to_cpu(*(volatile unsigned short __force *)addr);
-}
-static inline unsigned int readl(const volatile void __iomem *addr)
-{
-       return le32_to_cpu(*(volatile unsigned int __force *)addr);
-}
-#define readq readq
-static inline u64 readq(const volatile void __iomem *addr)
-{
-       return le64_to_cpu(__raw_readq(addr));
-}
-static inline void writeb(unsigned char v, volatile void __iomem *addr)
-{
-       *(volatile unsigned char __force *)addr = v;
-}
-static inline void writew(unsigned short v, volatile void __iomem *addr)
-{
-       *(volatile unsigned short __force *)addr = cpu_to_le16(v);
-}
-static inline void writel(unsigned int v, volatile void __iomem *addr)
-{
-       *(volatile unsigned int __force *)addr = cpu_to_le32(v);
-}
-#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr)
-
-/* ioread and iowrite variants. thease are for now same as __raw_
- * variants of accessors. we might check for endianess in the feature
- */
-#define ioread8(addr)          __raw_readb((u8 *)(addr))
-#define ioread16(addr)         __raw_readw((u16 *)(addr))
-#define ioread32(addr)         __raw_readl((u32 *)(addr))
-#define iowrite8(v, addr)      __raw_writeb((u8)(v), (u8 *)(addr))
-#define iowrite16(v, addr)     __raw_writew((u16)(v), (u16 *)(addr))
-#define iowrite32(v, addr)     __raw_writel((u32)(v), (u32 *)(addr))
-
-#define ioread16be(addr)       __raw_readw((u16 *)(addr))
-#define ioread32be(addr)       __raw_readl((u32 *)(addr))
-#define iowrite16be(v, addr)   __raw_writew((u16)(v), (u16 *)(addr))
-#define iowrite32be(v, addr)   __raw_writel((u32)(v), (u32 *)(addr))
-
-/* These are the definitions for the x86 IO instructions
- * inb/inw/inl/outb/outw/outl, the "string" versions
- * insb/insw/insl/outsb/outsw/outsl, and the "pausing" versions
- * inb_p/inw_p/...
- * The macros don't do byte-swapping.
- */
-#define inb(port)              readb((u8 *)((unsigned long)(port)))
-#define outb(val, port)                writeb((val), (u8 *)((unsigned long)(port)))
-#define inw(port)              readw((u16 *)((unsigned long)(port)))
-#define outw(val, port)                writew((val), (u16 *)((unsigned long)(port)))
-#define inl(port)              readl((u32 *)((unsigned long)(port)))
-#define outl(val, port)                writel((val), (u32 *)((unsigned long)(port)))
-
-#define inb_p(port)            inb((port))
-#define outb_p(val, port)      outb((val), (port))
-#define inw_p(port)            inw((port))
-#define outw_p(val, port)      outw((val), (port))
-#define inl_p(port)            inl((port))
-#define outl_p(val, port)      outl((val), (port))
-
-#define memset_io(a, b, c)     memset((void *)(a), (b), (c))
-#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
-#define memcpy_toio(a, b, c)   memcpy((void *)(a), (b), (c))
-
 #ifdef CONFIG_MMU
-
-#define phys_to_virt(addr)     ((void *)__phys_to_virt(addr))
-#define virt_to_phys(addr)     ((unsigned long)__virt_to_phys(addr))
-#define virt_to_bus(addr)      ((unsigned long)__virt_to_phys(addr))
-
 #define page_to_bus(page)      (page_to_phys(page))
-#define bus_to_virt(addr)      (phys_to_virt(addr))
 
 extern void iounmap(void __iomem *addr);
-/*extern void *__ioremap(phys_addr_t address, unsigned long size,
-               unsigned long flags);*/
-extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
-#define ioremap_writethrough(addr, size) ioremap((addr), (size))
-#define ioremap_nocache(addr, size)      ioremap((addr), (size))
-#define ioremap_fullcache(addr, size)    ioremap((addr), (size))
-
-#else /* CONFIG_MMU */
-
-/**
- *     virt_to_phys - map virtual addresses to physical
- *     @address: address to remap
- *
- *     The returned physical address is the physical (CPU) mapping for
- *     the memory address given. It is only valid to use this function on
- *     addresses directly mapped or allocated via kmalloc.
- *
- *     This function does not give bus mappings for DMA transfers. In
- *     almost all conceivable cases a device driver should not be using
- *     this function
- */
-static inline unsigned long __iomem virt_to_phys(volatile void *address)
-{
-       return __pa((unsigned long)address);
-}
-
-#define virt_to_bus virt_to_phys
-
-/**
- *     phys_to_virt - map physical address to virtual
- *     @address: address to remap
- *
- *     The returned virtual address is a current CPU mapping for
- *     the memory address given. It is only valid to use this function on
- *     addresses that have a kernel mapping
- *
- *     This function does not handle bus mappings for DMA transfers. In
- *     almost all conceivable cases a device driver should not be using
- *     this function
- */
-static inline void *phys_to_virt(unsigned long address)
-{
-       return (void *)__va(address);
-}
 
-#define bus_to_virt(a) phys_to_virt(a)
-
-static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
-                       unsigned long flags)
-{
-       return (void *)address;
-}
-
-#define ioremap(physaddr, size)        ((void __iomem *)(unsigned long)(physaddr))
-#define iounmap(addr)          ((void)0)
-#define ioremap_nocache(physaddr, size)        ioremap(physaddr, size)
+extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
+#define ioremap_writethrough(addr, size)       ioremap((addr), (size))
+#define ioremap_nocache(addr, size)            ioremap((addr), (size))
+#define ioremap_fullcache(addr, size)          ioremap((addr), (size))
+#define ioremap_wc(addr, size)                 ioremap((addr), (size))
 
 #endif /* CONFIG_MMU */
 
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)   __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)  p
-
-/*
- * Big Endian
- */
+/* Big Endian */
 #define out_be32(a, v) __raw_writel((v), (void __iomem __force *)(a))
 #define out_be16(a, v) __raw_writew((v), (a))
 
@@ -239,10 +59,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
 #define writel_be(v, a)        out_be32((__force unsigned *)a, v)
 #define readl_be(a)    in_be32((__force unsigned *)a)
 
-/*
- * Little endian
- */
-
+/* Little endian */
 #define out_le32(a, v) __raw_writel(__cpu_to_le32(v), (a))
 #define out_le16(a, v) __raw_writew(__cpu_to_le16(v), (a))
 
@@ -253,100 +70,7 @@ static inline void __iomem *__ioremap(phys_addr_t address, unsigned long size,
 #define out_8(a, v) __raw_writeb((v), (a))
 #define in_8(a) __raw_readb(a)
 
-#define mmiowb()
-
-#define ioport_map(port, nr)   ((void __iomem *)(port))
-#define ioport_unmap(addr)
-
-/* from asm-generic/io.h */
-#ifndef insb
-static inline void insb(unsigned long addr, void *buffer, int count)
-{
-       if (count) {
-               u8 *buf = buffer;
-               do {
-                       u8 x = inb(addr);
-                       *buf++ = x;
-               } while (--count);
-       }
-}
-#endif
-
-#ifndef insw
-static inline void insw(unsigned long addr, void *buffer, int count)
-{
-       if (count) {
-               u16 *buf = buffer;
-               do {
-                       u16 x = inw(addr);
-                       *buf++ = x;
-               } while (--count);
-       }
-}
-#endif
-
-#ifndef insl
-static inline void insl(unsigned long addr, void *buffer, int count)
-{
-       if (count) {
-               u32 *buf = buffer;
-               do {
-                       u32 x = inl(addr);
-                       *buf++ = x;
-               } while (--count);
-       }
-}
-#endif
-
-#ifndef outsb
-static inline void outsb(unsigned long addr, const void *buffer, int count)
-{
-       if (count) {
-               const u8 *buf = buffer;
-               do {
-                       outb(*buf++, addr);
-               } while (--count);
-       }
-}
-#endif
-
-#ifndef outsw
-static inline void outsw(unsigned long addr, const void *buffer, int count)
-{
-       if (count) {
-               const u16 *buf = buffer;
-               do {
-                       outw(*buf++, addr);
-               } while (--count);
-       }
-}
-#endif
-
-#ifndef outsl
-static inline void outsl(unsigned long addr, const void *buffer, int count)
-{
-       if (count) {
-               const u32 *buf = buffer;
-               do {
-                       outl(*buf++, addr);
-               } while (--count);
-       }
-}
-#endif
-
-#define ioread8_rep(p, dst, count) \
-       insb((unsigned long) (p), (dst), (count))
-#define ioread16_rep(p, dst, count) \
-       insw((unsigned long) (p), (dst), (count))
-#define ioread32_rep(p, dst, count) \
-       insl((unsigned long) (p), (dst), (count))
-
-#define iowrite8_rep(p, src, count) \
-       outsb((unsigned long) (p), (src), (count))
-#define iowrite16_rep(p, src, count) \
-       outsw((unsigned long) (p), (src), (count))
-#define iowrite32_rep(p, src, count) \
-       outsl((unsigned long) (p), (src), (count))
+#include <asm-generic/io.h>
 
 #define readb_relaxed  readb
 #define readw_relaxed  readw
index d6e0ffea28b67d9f1cf43e6608cdc872bba2e49a..9d31b057c3553da4d7f697179ef0c92a4223d5df 100644 (file)
@@ -122,7 +122,7 @@ struct thread_struct {
 }
 
 /* Free all resources held by a thread. */
-extern inline void release_thread(struct task_struct *dead_task)
+static inline void release_thread(struct task_struct *dead_task)
 {
 }
 
index f05df5630c842a8033b24125e29b4e6030644fdf..be84a4d3917fc1c901bc71c6ca44362c28680257 100644 (file)
@@ -19,14 +19,12 @@ extern char cmd_line[COMMAND_LINE_SIZE];
 
 extern char *klimit;
 
-void early_printk(const char *fmt, ...);
-
 int setup_early_printk(char *opt);
 void remap_early_printk(void);
 void disable_early_printk(void);
 
-void heartbeat(void);
-void setup_heartbeat(void);
+void microblaze_heartbeat(void);
+void microblaze_setup_heartbeat(void);
 
 #   ifdef CONFIG_MMU
 extern void mmu_reset(void);
index 20043b67d158ddd07ff6fbf36050f28562fd96e1..8d0791b49b31a8c6b121f8088c93188d0401a519 100644 (file)
@@ -93,7 +93,7 @@
 #define __NR_settimeofday      79 /* ok */
 #define __NR_getgroups         80 /* ok */
 #define __NR_setgroups         81 /* ok */
-#define __NR_select            82 /* obsolete -> sys_pselect7 */
+#define __NR_select            82 /* obsolete -> sys_pselect6 */
 #define __NR_symlink           83 /* symlinkat */
 #define __NR_oldlstat          84 /* remove */
 #define __NR_readlink          85 /* obsolete -> sys_readlinkat */
 #define __NR_readlinkat                305 /* ok */
 #define __NR_fchmodat          306 /* ok */
 #define __NR_faccessat         307 /* ok */
-#define __NR_pselect6          308 /* obsolete -> sys_pselect7 */
+#define __NR_pselect6          308 /* ok */
 #define __NR_ppoll             309 /* ok */
 #define __NR_unshare           310 /* ok */
 #define __NR_set_robust_list   311 /* ok */
 #define __NR_process_vm_writev 378
 #define __NR_kcmp              379
 #define __NR_finit_module      380
+#define __NR_sched_setattr     381
+#define __NR_sched_getattr     382
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index 5b0e512c78e58c319d6dd32bce88f5af1a91b8a2..08d50cc55e7de28cc63f5126af527a62a2ef6bd7 100644 (file)
@@ -16,7 +16,7 @@ extra-y := head.o vmlinux.lds
 
 obj-y += dma.o exceptions.o \
        hw_exception_handler.o intc.o irq.o \
-       process.o prom.o prom_parse.o ptrace.o \
+       platform.o process.o prom.o prom_parse.o ptrace.o \
        reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
 
 obj-y += cpu/
index 1879a05277760228931cd49f016e99d9f92487fa..4643e3ab94149503252c0ee61d7c9cf8df55777a 100644 (file)
@@ -17,7 +17,7 @@
 
 static unsigned int base_addr;
 
-void heartbeat(void)
+void microblaze_heartbeat(void)
 {
        static unsigned int cnt, period, dist;
 
@@ -42,7 +42,7 @@ void heartbeat(void)
        }
 }
 
-void setup_heartbeat(void)
+void microblaze_setup_heartbeat(void)
 {
        struct device_node *gpio = NULL;
        int *prop;
index 581451ad468778a9faae82bafd3684b90606b779..15c7c12ea0e777183c321872c57c8e0a335bc3df 100644 (file)
@@ -32,6 +32,29 @@ static void __iomem *intc_baseaddr;
 #define MER_ME (1<<0)
 #define MER_HIE (1<<1)
 
+static unsigned int (*read_fn)(void __iomem *);
+static void (*write_fn)(u32, void __iomem *);
+
+static void intc_write32(u32 val, void __iomem *addr)
+{
+       iowrite32(val, addr);
+}
+
+static unsigned int intc_read32(void __iomem *addr)
+{
+       return ioread32(addr);
+}
+
+static void intc_write32_be(u32 val, void __iomem *addr)
+{
+       iowrite32be(val, addr);
+}
+
+static unsigned int intc_read32_be(void __iomem *addr)
+{
+       return ioread32be(addr);
+}
+
 static void intc_enable_or_unmask(struct irq_data *d)
 {
        unsigned long mask = 1 << d->hwirq;
@@ -43,21 +66,21 @@ static void intc_enable_or_unmask(struct irq_data *d)
         * acks the irq before calling the interrupt handler
         */
        if (irqd_is_level_type(d))
-               out_be32(intc_baseaddr + IAR, mask);
+               write_fn(mask, intc_baseaddr + IAR);
 
-       out_be32(intc_baseaddr + SIE, mask);
+       write_fn(mask, intc_baseaddr + SIE);
 }
 
 static void intc_disable_or_mask(struct irq_data *d)
 {
        pr_debug("disable: %ld\n", d->hwirq);
-       out_be32(intc_baseaddr + CIE, 1 << d->hwirq);
+       write_fn(1 << d->hwirq, intc_baseaddr + CIE);
 }
 
 static void intc_ack(struct irq_data *d)
 {
        pr_debug("ack: %ld\n", d->hwirq);
-       out_be32(intc_baseaddr + IAR, 1 << d->hwirq);
+       write_fn(1 << d->hwirq, intc_baseaddr + IAR);
 }
 
 static void intc_mask_ack(struct irq_data *d)
@@ -65,8 +88,8 @@ static void intc_mask_ack(struct irq_data *d)
        unsigned long mask = 1 << d->hwirq;
 
        pr_debug("disable_and_ack: %ld\n", d->hwirq);
-       out_be32(intc_baseaddr + CIE, mask);
-       out_be32(intc_baseaddr + IAR, mask);
+       write_fn(mask, intc_baseaddr + CIE);
+       write_fn(mask, intc_baseaddr + IAR);
 }
 
 static struct irq_chip intc_dev = {
@@ -83,7 +106,7 @@ unsigned int get_irq(void)
 {
        unsigned int hwirq, irq = -1;
 
-       hwirq = in_be32(intc_baseaddr + IVR);
+       hwirq = read_fn(intc_baseaddr + IVR);
        if (hwirq != -1U)
                irq = irq_find_mapping(root_domain, hwirq);
 
@@ -140,17 +163,25 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
        pr_info("%s: num_irq=%d, edge=0x%x\n",
                intc->full_name, nr_irq, intr_mask);
 
+       write_fn = intc_write32;
+       read_fn = intc_read32;
+
        /*
         * Disable all external interrupts until they are
         * explicity requested.
         */
-       out_be32(intc_baseaddr + IER, 0);
+       write_fn(0, intc_baseaddr + IER);
 
        /* Acknowledge any pending interrupts just in case. */
-       out_be32(intc_baseaddr + IAR, 0xffffffff);
+       write_fn(0xffffffff, intc_baseaddr + IAR);
 
        /* Turn on the Master Enable. */
-       out_be32(intc_baseaddr + MER, MER_HIE | MER_ME);
+       write_fn(MER_HIE | MER_ME, intc_baseaddr + MER);
+       if (!(read_fn(intc_baseaddr + MER) & (MER_HIE | MER_ME))) {
+               write_fn = intc_write32_be;
+               read_fn = intc_read32_be;
+               write_fn(MER_HIE | MER_ME, intc_baseaddr + MER);
+       }
 
        /* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm
         * lazy and Michal can clean it up to something nicer when he tests
diff --git a/arch/microblaze/kernel/platform.c b/arch/microblaze/kernel/platform.c
new file mode 100644 (file)
index 0000000..b9529ca
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2008 Michal Simek <monstr@monstr.eu>
+ *
+ * based on virtex.c file
+ *
+ * Copyright 2007 Secret Lab Technologies Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <asm/prom.h>
+#include <asm/setup.h>
+
+static struct of_device_id xilinx_of_bus_ids[] __initdata = {
+       { .compatible = "simple-bus", },
+       { .compatible = "xlnx,compound", },
+       {}
+};
+
+static int __init microblaze_device_probe(void)
+{
+       of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
+       of_platform_reset_gpio_probe();
+       return 0;
+}
+device_initcall(microblaze_device_probe);
index 7d1a9c8b1f3dfff37cad0e5a87d9372f9030a3e8..b2dd37196b3b165fa291c0c0040056f8dda16063 100644 (file)
@@ -8,6 +8,7 @@
  * for more details.
  */
 
+#include <linux/cpu.h>
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/pm.h>
index d26d7e7a691320256238b96a7d6c67cc1bbcdaa3..49a07a4d76d0ecb0a88e6a0b07c2664ebdae537b 100644 (file)
@@ -216,7 +216,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
                /* MS: I need add offset in page */
                address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
                /* MS address is virtual */
-               address = virt_to_phys(address);
+               address = __virt_to_phys(address);
                invalidate_icache_range(address, address + 8);
                flush_dcache_range(address, address + 8);
        }
index b882ad50535b8b1143af60b5af1b3de007060fb6..329dfbad810bca1327a5501c82c60d01285e8cd0 100644 (file)
@@ -308,7 +308,7 @@ ENTRY(sys_call_table)
        .long sys_readlinkat            /* 305 */
        .long sys_fchmodat
        .long sys_faccessat
-       .long sys_ni_syscall /* pselect6 */
+       .long sys_pselect6
        .long sys_ppoll
        .long sys_unshare               /* 310 */
        .long sys_set_robust_list
@@ -363,8 +363,8 @@ ENTRY(sys_call_table)
        .long sys_sendmsg               /* 360 */
        .long sys_recvmsg
        .long sys_accept4
-       .long sys_ni_syscall
-       .long sys_ni_syscall
+       .long sys_preadv
+       .long sys_pwritev
        .long sys_rt_tgsigqueueinfo     /* 365 */
        .long sys_perf_event_open
        .long sys_recvmmsg
@@ -381,3 +381,5 @@ ENTRY(sys_call_table)
        .long sys_process_vm_writev
        .long sys_kcmp
        .long sys_finit_module
+       .long sys_sched_setattr
+       .long sys_sched_getattr
index fb0c61443f196ebbe7776e2d88108280bbc80025..dd96f0e4bfa2bba632a1d2bdb96a4b3864876679 100644 (file)
@@ -43,10 +43,33 @@ static unsigned int timer_clock_freq;
 #define TCSR_PWMA      (1<<9)
 #define TCSR_ENALL     (1<<10)
 
+static unsigned int (*read_fn)(void __iomem *);
+static void (*write_fn)(u32, void __iomem *);
+
+static void timer_write32(u32 val, void __iomem *addr)
+{
+       iowrite32(val, addr);
+}
+
+static unsigned int timer_read32(void __iomem *addr)
+{
+       return ioread32(addr);
+}
+
+static void timer_write32_be(u32 val, void __iomem *addr)
+{
+       iowrite32be(val, addr);
+}
+
+static unsigned int timer_read32_be(void __iomem *addr)
+{
+       return ioread32be(addr);
+}
+
 static inline void xilinx_timer0_stop(void)
 {
-       out_be32(timer_baseaddr + TCSR0,
-                in_be32(timer_baseaddr + TCSR0) & ~TCSR_ENT);
+       write_fn(read_fn(timer_baseaddr + TCSR0) & ~TCSR_ENT,
+                timer_baseaddr + TCSR0);
 }
 
 static inline void xilinx_timer0_start_periodic(unsigned long load_val)
@@ -54,10 +77,10 @@ static inline void xilinx_timer0_start_periodic(unsigned long load_val)
        if (!load_val)
                load_val = 1;
        /* loading value to timer reg */
-       out_be32(timer_baseaddr + TLR0, load_val);
+       write_fn(load_val, timer_baseaddr + TLR0);
 
        /* load the initial value */
-       out_be32(timer_baseaddr + TCSR0, TCSR_LOAD);
+       write_fn(TCSR_LOAD, timer_baseaddr + TCSR0);
 
        /* see timer data sheet for detail
         * !ENALL - don't enable 'em all
@@ -72,8 +95,8 @@ static inline void xilinx_timer0_start_periodic(unsigned long load_val)
         * UDT - set the timer as down counter
         * !MDT0 - generate mode
         */
-       out_be32(timer_baseaddr + TCSR0,
-                       TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT);
+       write_fn(TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT,
+                timer_baseaddr + TCSR0);
 }
 
 static inline void xilinx_timer0_start_oneshot(unsigned long load_val)
@@ -81,13 +104,13 @@ static inline void xilinx_timer0_start_oneshot(unsigned long load_val)
        if (!load_val)
                load_val = 1;
        /* loading value to timer reg */
-       out_be32(timer_baseaddr + TLR0, load_val);
+       write_fn(load_val, timer_baseaddr + TLR0);
 
        /* load the initial value */
-       out_be32(timer_baseaddr + TCSR0, TCSR_LOAD);
+       write_fn(TCSR_LOAD, timer_baseaddr + TCSR0);
 
-       out_be32(timer_baseaddr + TCSR0,
-                       TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT);
+       write_fn(TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT,
+                timer_baseaddr + TCSR0);
 }
 
 static int xilinx_timer_set_next_event(unsigned long delta,
@@ -133,14 +156,14 @@ static struct clock_event_device clockevent_xilinx_timer = {
 
 static inline void timer_ack(void)
 {
-       out_be32(timer_baseaddr + TCSR0, in_be32(timer_baseaddr + TCSR0));
+       write_fn(read_fn(timer_baseaddr + TCSR0), timer_baseaddr + TCSR0);
 }
 
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evt = &clockevent_xilinx_timer;
 #ifdef CONFIG_HEART_BEAT
-       heartbeat();
+       microblaze_heartbeat();
 #endif
        timer_ack();
        evt->event_handler(evt);
@@ -169,7 +192,7 @@ static __init void xilinx_clockevent_init(void)
 
 static u64 xilinx_clock_read(void)
 {
-       return in_be32(timer_baseaddr + TCR1);
+       return read_fn(timer_baseaddr + TCR1);
 }
 
 static cycle_t xilinx_read(struct clocksource *cs)
@@ -217,10 +240,10 @@ static int __init xilinx_clocksource_init(void)
                panic("failed to register clocksource");
 
        /* stop timer1 */
-       out_be32(timer_baseaddr + TCSR1,
-                in_be32(timer_baseaddr + TCSR1) & ~TCSR_ENT);
+       write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT,
+                timer_baseaddr + TCSR1);
        /* start timer1 - up counting without interrupt */
-       out_be32(timer_baseaddr + TCSR1, TCSR_TINT|TCSR_ENT|TCSR_ARHT);
+       write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1);
 
        /* register timecounter - for ftrace support */
        init_xilinx_timecounter();
@@ -245,6 +268,15 @@ static void __init xilinx_timer_init(struct device_node *timer)
                BUG();
        }
 
+       write_fn = timer_write32;
+       read_fn = timer_read32;
+
+       write_fn(TCSR_MDT, timer_baseaddr + TCSR0);
+       if (!(read_fn(timer_baseaddr + TCSR0) & TCSR_MDT)) {
+               write_fn = timer_write32_be;
+               read_fn = timer_read32_be;
+       }
+
        irq = irq_of_parse_and_map(timer, 0);
 
        of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
@@ -274,7 +306,7 @@ static void __init xilinx_timer_init(struct device_node *timer)
 
        setup_irq(irq, &timer_irqaction);
 #ifdef CONFIG_HEART_BEAT
-       setup_heartbeat();
+       microblaze_setup_heartbeat();
 #endif
        xilinx_clocksource_init();
        xilinx_clockevent_init();
index dbbf2246a260fcb3e6d90e78b65655416ccf7d45..e10ad930895e23a5d8e229ba3267eb9344dc772e 100644 (file)
@@ -117,7 +117,7 @@ void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
        ret = (void *)va;
 
        /* This gives us the real physical address of the first page. */
-       *dma_handle = pa = virt_to_bus((void *)vaddr);
+       *dma_handle = pa = __virt_to_phys(vaddr);
 #endif
 
        /*
index 89077d34671458acf121c47224674289eb84e15d..77bc7c7e6522a8f4a792d1bac70e49493775c102 100644 (file)
@@ -369,7 +369,7 @@ asmlinkage void __init mmu_init(void)
        if (initrd_start) {
                unsigned long size;
                size = initrd_end - initrd_start;
-               memblock_reserve(virt_to_phys(initrd_start), size);
+               memblock_reserve(__virt_to_phys(initrd_start), size);
        }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
index 10b3bd0a980d7bacb7fec3941ba32320766a4ec0..4f4520e779a5bf06ccfc69c7b3671961c1967dff 100644 (file)
@@ -69,10 +69,11 @@ static void __iomem *__ioremap(phys_addr_t addr, unsigned long size,
         *
         * However, allow remap of rootfs: TBD
         */
+
        if (mem_init_done &&
                p >= memory_start && p < virt_to_phys(high_memory) &&
-               !(p >= virt_to_phys((unsigned long)&__bss_stop) &&
-               p < virt_to_phys((unsigned long)__bss_stop))) {
+               !(p >= __virt_to_phys((phys_addr_t)__bss_stop) &&
+               p < __virt_to_phys((phys_addr_t)__bss_stop))) {
                pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n",
                        (unsigned long)p, __builtin_return_address(0));
                return NULL;
diff --git a/arch/microblaze/platform/Kconfig.platform b/arch/microblaze/platform/Kconfig.platform
deleted file mode 100644 (file)
index db1aa5c..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-# For a description of the syntax of this configuration file,
-# see Documentation/kbuild/kconfig-language.txt.
-#
-# Platform selection Kconfig menu for MicroBlaze targets
-#
-
-menu "Platform options"
-choice
-       prompt "Platform"
-       default PLATFORM_MICROBLAZE_AUTO
-       help
-         Choose which hardware board/platform you are targeting.
-
-config PLATFORM_GENERIC
-       bool "Generic"
-       help
-         Choose this option for the Generic platform.
-
-endchoice
-
-config OPT_LIB_FUNCTION
-       bool "Optimalized lib function"
-       default y
-       help
-         Allows turn on optimalized library function (memcpy and memmove).
-         They are optimized by using word alignment. This will work
-         fine if both source and destination are aligned on the same
-         boundary. However, if they are aligned on different boundaries
-         shifts will be necessary. This might result in bad performance
-         on MicroBlaze systems without a barrel shifter.
-
-config OPT_LIB_ASM
-       bool "Optimalized lib function ASM"
-       depends on OPT_LIB_FUNCTION && (XILINX_MICROBLAZE0_USE_BARREL = 1)
-       default n
-       help
-         Allows turn on optimalized library function (memcpy and memmove).
-         Function are written in asm code.
-
-if PLATFORM_GENERIC=y
-       source "arch/microblaze/platform/generic/Kconfig.auto"
-endif
-
-endmenu
diff --git a/arch/microblaze/platform/Makefile b/arch/microblaze/platform/Makefile
deleted file mode 100644 (file)
index ea1b75c..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#
-# Makefile for arch/microblaze/platform directory
-#
-#obj-$(CONFIG_PLATFORM_GENERIC) += generic/
-
-obj-y  += platform.o
diff --git a/arch/microblaze/platform/generic/Kconfig.auto b/arch/microblaze/platform/generic/Kconfig.auto
deleted file mode 100644 (file)
index 25a6f01..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-# (C) Copyright 2007 Michal Simek
-#
-# Michal SIMEK <monstr@monstr.eu>
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License as
-# published by the Free Software Foundation; either version 2 of
-# the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
-# MA 02111-1307 USA
-#
-
-# Definitions for MICROBLAZE0
-comment "Definitions for MICROBLAZE0"
-
-config KERNEL_BASE_ADDR
-       hex "Physical address where Linux Kernel is"
-       default "0x90000000"
-       help
-         BASE Address for kernel
-
-config XILINX_MICROBLAZE0_FAMILY
-       string "Targeted FPGA family"
-       default "virtex5"
-
-config XILINX_MICROBLAZE0_USE_MSR_INSTR
-       int "USE_MSR_INSTR range (0:1)"
-       default 0
-
-config XILINX_MICROBLAZE0_USE_PCMP_INSTR
-       int "USE_PCMP_INSTR range (0:1)"
-       default 0
-
-config XILINX_MICROBLAZE0_USE_BARREL
-       int "USE_BARREL range (0:1)"
-       default 0
-
-config XILINX_MICROBLAZE0_USE_DIV
-       int "USE_DIV range (0:1)"
-       default 0
-
-config XILINX_MICROBLAZE0_USE_HW_MUL
-       int "USE_HW_MUL values (0=NONE, 1=MUL32, 2=MUL64)"
-       default 0
-
-config XILINX_MICROBLAZE0_USE_FPU
-       int "USE_FPU values (0=NONE, 1=BASIC, 2=EXTENDED)"
-       default 0
-
-config XILINX_MICROBLAZE0_HW_VER
-       string "Core version number"
-       default 7.10.d
diff --git a/arch/microblaze/platform/generic/Makefile b/arch/microblaze/platform/generic/Makefile
deleted file mode 100644 (file)
index 9a8b1bd..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-#
-# Empty Makefile to keep make clean happy
-#
diff --git a/arch/microblaze/platform/generic/system.dts b/arch/microblaze/platform/generic/system.dts
deleted file mode 100644 (file)
index b620da2..0000000
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Device Tree Generator version: 1.1
- *
- * (C) Copyright 2007-2008 Xilinx, Inc.
- * (C) Copyright 2007-2009 Michal Simek
- *
- * Michal SIMEK <monstr@monstr.eu>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- *
- * CAUTION: This file is automatically generated by libgen.
- * Version: Xilinx EDK 10.1.03 EDK_K_SP3.6
- *
- * XPS project directory: Xilinx-ML505-ll_temac-sgdma-MMU-FDT-edk101
- */
-
-/dts-v1/;
-/ {
-       #address-cells = <1>;
-       #size-cells = <1>;
-       compatible = "xlnx,microblaze";
-       hard-reset-gpios = <&LEDs_8Bit 2 1>;
-       model = "testing";
-       DDR2_SDRAM: memory@90000000 {
-               device_type = "memory";
-               reg = < 0x90000000 0x10000000 >;
-       } ;
-       aliases {
-               ethernet0 = &Hard_Ethernet_MAC;
-               serial0 = &RS232_Uart_1;
-       } ;
-       chosen {
-               bootargs = "console=ttyUL0,115200 highres=on";
-               linux,stdout-path = "/plb@0/serial@84000000";
-       } ;
-       cpus {
-               #address-cells = <1>;
-               #cpus = <0x1>;
-               #size-cells = <0>;
-               microblaze_0: cpu@0 {
-                       clock-frequency = <125000000>;
-                       compatible = "xlnx,microblaze-7.10.d";
-                       d-cache-baseaddr = <0x90000000>;
-                       d-cache-highaddr = <0x9fffffff>;
-                       d-cache-line-size = <0x10>;
-                       d-cache-size = <0x2000>;
-                       device_type = "cpu";
-                       i-cache-baseaddr = <0x90000000>;
-                       i-cache-highaddr = <0x9fffffff>;
-                       i-cache-line-size = <0x10>;
-                       i-cache-size = <0x2000>;
-                       model = "microblaze,7.10.d";
-                       reg = <0>;
-                       timebase-frequency = <125000000>;
-                       xlnx,addr-tag-bits = <0xf>;
-                       xlnx,allow-dcache-wr = <0x1>;
-                       xlnx,allow-icache-wr = <0x1>;
-                       xlnx,area-optimized = <0x0>;
-                       xlnx,cache-byte-size = <0x2000>;
-                       xlnx,d-lmb = <0x1>;
-                       xlnx,d-opb = <0x0>;
-                       xlnx,d-plb = <0x1>;
-                       xlnx,data-size = <0x20>;
-                       xlnx,dcache-addr-tag = <0xf>;
-                       xlnx,dcache-always-used = <0x1>;
-                       xlnx,dcache-byte-size = <0x2000>;
-                       xlnx,dcache-line-len = <0x4>;
-                       xlnx,dcache-use-fsl = <0x1>;
-                       xlnx,debug-enabled = <0x1>;
-                       xlnx,div-zero-exception = <0x1>;
-                       xlnx,dopb-bus-exception = <0x0>;
-                       xlnx,dynamic-bus-sizing = <0x1>;
-                       xlnx,edge-is-positive = <0x1>;
-                       xlnx,family = "virtex5";
-                       xlnx,endianness = <0x1>;
-                       xlnx,fpu-exception = <0x1>;
-                       xlnx,fsl-data-size = <0x20>;
-                       xlnx,fsl-exception = <0x0>;
-                       xlnx,fsl-links = <0x0>;
-                       xlnx,i-lmb = <0x1>;
-                       xlnx,i-opb = <0x0>;
-                       xlnx,i-plb = <0x1>;
-                       xlnx,icache-always-used = <0x1>;
-                       xlnx,icache-line-len = <0x4>;
-                       xlnx,icache-use-fsl = <0x1>;
-                       xlnx,ill-opcode-exception = <0x1>;
-                       xlnx,instance = "microblaze_0";
-                       xlnx,interconnect = <0x1>;
-                       xlnx,interrupt-is-edge = <0x0>;
-                       xlnx,iopb-bus-exception = <0x0>;
-                       xlnx,mmu-dtlb-size = <0x4>;
-                       xlnx,mmu-itlb-size = <0x2>;
-                       xlnx,mmu-tlb-access = <0x3>;
-                       xlnx,mmu-zones = <0x10>;
-                       xlnx,number-of-pc-brk = <0x1>;
-                       xlnx,number-of-rd-addr-brk = <0x0>;
-                       xlnx,number-of-wr-addr-brk = <0x0>;
-                       xlnx,opcode-0x0-illegal = <0x1>;
-                       xlnx,pvr = <0x2>;
-                       xlnx,pvr-user1 = <0x0>;
-                       xlnx,pvr-user2 = <0x0>;
-                       xlnx,reset-msr = <0x0>;
-                       xlnx,sco = <0x0>;
-                       xlnx,unaligned-exceptions = <0x1>;
-                       xlnx,use-barrel = <0x1>;
-                       xlnx,use-dcache = <0x1>;
-                       xlnx,use-div = <0x1>;
-                       xlnx,use-ext-brk = <0x1>;
-                       xlnx,use-ext-nm-brk = <0x1>;
-                       xlnx,use-extended-fsl-instr = <0x0>;
-                       xlnx,use-fpu = <0x2>;
-                       xlnx,use-hw-mul = <0x2>;
-                       xlnx,use-icache = <0x1>;
-                       xlnx,use-interrupt = <0x1>;
-                       xlnx,use-mmu = <0x3>;
-                       xlnx,use-msr-instr = <0x1>;
-                       xlnx,use-pcmp-instr = <0x1>;
-               } ;
-       } ;
-       mb_plb: plb@0 {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "xlnx,plb-v46-1.03.a", "xlnx,plb-v46-1.00.a", "simple-bus";
-               ranges ;
-               FLASH: flash@a0000000 {
-                       bank-width = <2>;
-                       compatible = "xlnx,xps-mch-emc-2.00.a", "cfi-flash";
-                       reg = < 0xa0000000 0x2000000 >;
-                       xlnx,family = "virtex5";
-                       xlnx,include-datawidth-matching-0 = <0x1>;
-                       xlnx,include-datawidth-matching-1 = <0x0>;
-                       xlnx,include-datawidth-matching-2 = <0x0>;
-                       xlnx,include-datawidth-matching-3 = <0x0>;
-                       xlnx,include-negedge-ioregs = <0x0>;
-                       xlnx,include-plb-ipif = <0x1>;
-                       xlnx,include-wrbuf = <0x1>;
-                       xlnx,max-mem-width = <0x10>;
-                       xlnx,mch-native-dwidth = <0x20>;
-                       xlnx,mch-plb-clk-period-ps = <0x1f40>;
-                       xlnx,mch-splb-awidth = <0x20>;
-                       xlnx,mch0-accessbuf-depth = <0x10>;
-                       xlnx,mch0-protocol = <0x0>;
-                       xlnx,mch0-rddatabuf-depth = <0x10>;
-                       xlnx,mch1-accessbuf-depth = <0x10>;
-                       xlnx,mch1-protocol = <0x0>;
-                       xlnx,mch1-rddatabuf-depth = <0x10>;
-                       xlnx,mch2-accessbuf-depth = <0x10>;
-                       xlnx,mch2-protocol = <0x0>;
-                       xlnx,mch2-rddatabuf-depth = <0x10>;
-                       xlnx,mch3-accessbuf-depth = <0x10>;
-                       xlnx,mch3-protocol = <0x0>;
-                       xlnx,mch3-rddatabuf-depth = <0x10>;
-                       xlnx,mem0-width = <0x10>;
-                       xlnx,mem1-width = <0x20>;
-                       xlnx,mem2-width = <0x20>;
-                       xlnx,mem3-width = <0x20>;
-                       xlnx,num-banks-mem = <0x1>;
-                       xlnx,num-channels = <0x0>;
-                       xlnx,priority-mode = <0x0>;
-                       xlnx,synch-mem-0 = <0x0>;
-                       xlnx,synch-mem-1 = <0x0>;
-                       xlnx,synch-mem-2 = <0x0>;
-                       xlnx,synch-mem-3 = <0x0>;
-                       xlnx,synch-pipedelay-0 = <0x2>;
-                       xlnx,synch-pipedelay-1 = <0x2>;
-                       xlnx,synch-pipedelay-2 = <0x2>;
-                       xlnx,synch-pipedelay-3 = <0x2>;
-                       xlnx,tavdv-ps-mem-0 = <0x1adb0>;
-                       xlnx,tavdv-ps-mem-1 = <0x3a98>;
-                       xlnx,tavdv-ps-mem-2 = <0x3a98>;
-                       xlnx,tavdv-ps-mem-3 = <0x3a98>;
-                       xlnx,tcedv-ps-mem-0 = <0x1adb0>;
-                       xlnx,tcedv-ps-mem-1 = <0x3a98>;
-                       xlnx,tcedv-ps-mem-2 = <0x3a98>;
-                       xlnx,tcedv-ps-mem-3 = <0x3a98>;
-                       xlnx,thzce-ps-mem-0 = <0x88b8>;
-                       xlnx,thzce-ps-mem-1 = <0x1b58>;
-                       xlnx,thzce-ps-mem-2 = <0x1b58>;
-                       xlnx,thzce-ps-mem-3 = <0x1b58>;
-                       xlnx,thzoe-ps-mem-0 = <0x1b58>;
-                       xlnx,thzoe-ps-mem-1 = <0x1b58>;
-                       xlnx,thzoe-ps-mem-2 = <0x1b58>;
-                       xlnx,thzoe-ps-mem-3 = <0x1b58>;
-                       xlnx,tlzwe-ps-mem-0 = <0x88b8>;
-                       xlnx,tlzwe-ps-mem-1 = <0x0>;
-                       xlnx,tlzwe-ps-mem-2 = <0x0>;
-                       xlnx,tlzwe-ps-mem-3 = <0x0>;
-                       xlnx,twc-ps-mem-0 = <0x2af8>;
-                       xlnx,twc-ps-mem-1 = <0x3a98>;
-                       xlnx,twc-ps-mem-2 = <0x3a98>;
-                       xlnx,twc-ps-mem-3 = <0x3a98>;
-                       xlnx,twp-ps-mem-0 = <0x11170>;
-                       xlnx,twp-ps-mem-1 = <0x2ee0>;
-                       xlnx,twp-ps-mem-2 = <0x2ee0>;
-                       xlnx,twp-ps-mem-3 = <0x2ee0>;
-                       xlnx,xcl0-linesize = <0x4>;
-                       xlnx,xcl0-writexfer = <0x1>;
-                       xlnx,xcl1-linesize = <0x4>;
-                       xlnx,xcl1-writexfer = <0x1>;
-                       xlnx,xcl2-linesize = <0x4>;
-                       xlnx,xcl2-writexfer = <0x1>;
-                       xlnx,xcl3-linesize = <0x4>;
-                       xlnx,xcl3-writexfer = <0x1>;
-               } ;
-               Hard_Ethernet_MAC: xps-ll-temac@81c00000 {
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-                       compatible = "xlnx,compound";
-                       ranges ;
-                       ethernet@81c00000 {
-                               compatible = "xlnx,xps-ll-temac-1.01.b", "xlnx,xps-ll-temac-1.00.a";
-                               interrupt-parent = <&xps_intc_0>;
-                               interrupts = < 5 2 >;
-                               llink-connected = <&PIM3>;
-                               local-mac-address = [ 00 0a 35 00 00 00 ];
-                               reg = < 0x81c00000 0x40 >;
-                               xlnx,bus2core-clk-ratio = <0x1>;
-                               xlnx,phy-type = <0x1>;
-                               xlnx,phyaddr = <0x1>;
-                               xlnx,rxcsum = <0x0>;
-                               xlnx,rxfifo = <0x1000>;
-                               xlnx,temac-type = <0x0>;
-                               xlnx,txcsum = <0x0>;
-                               xlnx,txfifo = <0x1000>;
-                       } ;
-               } ;
-               IIC_EEPROM: i2c@81600000 {
-                       compatible = "xlnx,xps-iic-2.00.a";
-                       interrupt-parent = <&xps_intc_0>;
-                       interrupts = < 6 2 >;
-                       reg = < 0x81600000 0x10000 >;
-                       xlnx,clk-freq = <0x7735940>;
-                       xlnx,family = "virtex5";
-                       xlnx,gpo-width = <0x1>;
-                       xlnx,iic-freq = <0x186a0>;
-                       xlnx,scl-inertial-delay = <0x0>;
-                       xlnx,sda-inertial-delay = <0x0>;
-                       xlnx,ten-bit-adr = <0x0>;
-               } ;
-               LEDs_8Bit: gpio@81400000 {
-                       compatible = "xlnx,xps-gpio-1.00.a";
-                       interrupt-parent = <&xps_intc_0>;
-                       interrupts = < 7 2 >;
-                       reg = < 0x81400000 0x10000 >;
-                       xlnx,all-inputs = <0x0>;
-                       xlnx,all-inputs-2 = <0x0>;
-                       xlnx,dout-default = <0x0>;
-                       xlnx,dout-default-2 = <0x0>;
-                       xlnx,family = "virtex5";
-                       xlnx,gpio-width = <0x8>;
-                       xlnx,interrupt-present = <0x1>;
-                       xlnx,is-bidir = <0x1>;
-                       xlnx,is-bidir-2 = <0x1>;
-                       xlnx,is-dual = <0x0>;
-                       xlnx,tri-default = <0xffffffff>;
-                       xlnx,tri-default-2 = <0xffffffff>;
-                       #gpio-cells = <2>;
-                       gpio-controller;
-               } ;
-
-               gpio-leds {
-                       compatible = "gpio-leds";
-
-                       heartbeat {
-                               label = "Heartbeat";
-                               gpios = <&LEDs_8Bit 4 1>;
-                               linux,default-trigger = "heartbeat";
-                       };
-
-                       yellow {
-                               label = "Yellow";
-                               gpios = <&LEDs_8Bit 5 1>;
-                       };
-
-                       red {
-                               label = "Red";
-                               gpios = <&LEDs_8Bit 6 1>;
-                       };
-
-                       green {
-                               label = "Green";
-                               gpios = <&LEDs_8Bit 7 1>;
-                       };
-               } ;
-               RS232_Uart_1: serial@84000000 {
-                       clock-frequency = <125000000>;
-                       compatible = "xlnx,xps-uartlite-1.00.a";
-                       current-speed = <115200>;
-                       device_type = "serial";
-                       interrupt-parent = <&xps_intc_0>;
-                       interrupts = < 8 0 >;
-                       port-number = <0>;
-                       reg = < 0x84000000 0x10000 >;
-                       xlnx,baudrate = <0x1c200>;
-                       xlnx,data-bits = <0x8>;
-                       xlnx,family = "virtex5";
-                       xlnx,odd-parity = <0x0>;
-                       xlnx,use-parity = <0x0>;
-               } ;
-               SysACE_CompactFlash: sysace@83600000 {
-                       compatible = "xlnx,xps-sysace-1.00.a";
-                       interrupt-parent = <&xps_intc_0>;
-                       interrupts = < 4 2 >;
-                       reg = < 0x83600000 0x10000 >;
-                       xlnx,family = "virtex5";
-                       xlnx,mem-width = <0x10>;
-               } ;
-               debug_module: debug@84400000 {
-                       compatible = "xlnx,mdm-1.00.d";
-                       reg = < 0x84400000 0x10000 >;
-                       xlnx,family = "virtex5";
-                       xlnx,interconnect = <0x1>;
-                       xlnx,jtag-chain = <0x2>;
-                       xlnx,mb-dbg-ports = <0x1>;
-                       xlnx,uart-width = <0x8>;
-                       xlnx,use-uart = <0x1>;
-                       xlnx,write-fsl-ports = <0x0>;
-               } ;
-               mpmc@90000000 {
-                       #address-cells = <1>;
-                       #size-cells = <1>;
-                       compatible = "xlnx,mpmc-4.02.a";
-                       ranges ;
-                       PIM3: sdma@84600180 {
-                               compatible = "xlnx,ll-dma-1.00.a";
-                               interrupt-parent = <&xps_intc_0>;
-                               interrupts = < 2 2 1 2 >;
-                               reg = < 0x84600180 0x80 >;
-                       } ;
-               } ;
-               xps_intc_0: interrupt-controller@81800000 {
-                       #interrupt-cells = <0x2>;
-                       compatible = "xlnx,xps-intc-1.00.a";
-                       interrupt-controller ;
-                       reg = < 0x81800000 0x10000 >;
-                       xlnx,kind-of-intr = <0x100>;
-                       xlnx,num-intr-inputs = <0x9>;
-               } ;
-               xps_timer_1: timer@83c00000 {
-                       compatible = "xlnx,xps-timer-1.00.a";
-                       interrupt-parent = <&xps_intc_0>;
-                       interrupts = < 3 2 >;
-                       reg = < 0x83c00000 0x10000 >;
-                       xlnx,count-width = <0x20>;
-                       xlnx,family = "virtex5";
-                       xlnx,gen0-assert = <0x1>;
-                       xlnx,gen1-assert = <0x1>;
-                       xlnx,one-timer-only = <0x0>;
-                       xlnx,trig0-assert = <0x1>;
-                       xlnx,trig1-assert = <0x1>;
-               } ;
-       } ;
-}  ;
diff --git a/arch/microblaze/platform/platform.c b/arch/microblaze/platform/platform.c
deleted file mode 100644 (file)
index b9529ca..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright 2008 Michal Simek <monstr@monstr.eu>
- *
- * based on virtex.c file
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <asm/prom.h>
-#include <asm/setup.h>
-
-static struct of_device_id xilinx_of_bus_ids[] __initdata = {
-       { .compatible = "simple-bus", },
-       { .compatible = "xlnx,compound", },
-       {}
-};
-
-static int __init microblaze_device_probe(void)
-{
-       of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
-       of_platform_reset_gpio_probe();
-       return 0;
-}
-device_initcall(microblaze_device_probe);
index 6c488c85d79141b385d1d6c0c394aa4400518fbf..c6e9cd2bca8dbf7de5512c9be8de0f2b92db7bd4 100644 (file)
@@ -14,7 +14,7 @@
 #define __ASM_MIPS_SYSCALL_H
 
 #include <linux/compiler.h>
-#include <linux/audit.h>
+#include <uapi/linux/audit.h>
 #include <linux/elf-em.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -127,12 +127,11 @@ extern const unsigned long sys_call_table[];
 extern const unsigned long sys32_call_table[];
 extern const unsigned long sysn32_call_table[];
 
-static inline int syscall_get_arch(struct task_struct *task,
-                                  struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
        int arch = EM_MIPS;
 #ifdef CONFIG_64BIT
-       if (!test_tsk_thread_flag(task, TIF_32BIT_REGS))
+       if (!test_thread_flag(TIF_32BIT_REGS))
                arch |= __AUDIT_ARCH_64BIT;
 #endif
 #if defined(__LITTLE_ENDIAN)
index 7271e5a8308165c901a5c4e248f3b7c02225239f..71f85f4270341252d046db277aecfd5d3d0ae13d 100644 (file)
@@ -649,7 +649,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->regs[2]);
 
-       audit_syscall_entry(syscall_get_arch(current, regs),
+       audit_syscall_entry(syscall_get_arch(),
                            syscall,
                            regs->regs[4], regs->regs[5],
                            regs->regs[6], regs->regs[7]);
index aed32b88576cbe1fbd11f4256a5f8b875bf3f809..e1f427f4f5f3fed4985bb370054421d7d2f91cdc 100644 (file)
@@ -28,16 +28,16 @@ enum {
 };
 
 struct cpufreq_frequency_table loongson2_clockmod_table[] = {
-       {DC_RESV, CPUFREQ_ENTRY_INVALID},
-       {DC_ZERO, CPUFREQ_ENTRY_INVALID},
-       {DC_25PT, 0},
-       {DC_37PT, 0},
-       {DC_50PT, 0},
-       {DC_62PT, 0},
-       {DC_75PT, 0},
-       {DC_87PT, 0},
-       {DC_DISABLE, 0},
-       {DC_RESV, CPUFREQ_TABLE_END},
+       {0, DC_RESV, CPUFREQ_ENTRY_INVALID},
+       {0, DC_ZERO, CPUFREQ_ENTRY_INVALID},
+       {0, DC_25PT, 0},
+       {0, DC_37PT, 0},
+       {0, DC_50PT, 0},
+       {0, DC_62PT, 0},
+       {0, DC_75PT, 0},
+       {0, DC_87PT, 0},
+       {0, DC_DISABLE, 0},
+       {0, DC_RESV, CPUFREQ_TABLE_END},
 };
 EXPORT_SYMBOL_GPL(loongson2_clockmod_table);
 
index 7c137cd8aa37490e07e2e9b7e917785e63236f5b..2fbbe4d920aa2efb353ed5fd52babaf309a386db 100644 (file)
@@ -70,7 +70,7 @@ static inline void kunmap(struct page *page)
  * be used in IRQ contexts, so in some (very limited) cases we need
  * it.
  */
-static inline unsigned long kmap_atomic(struct page *page)
+static inline void *kmap_atomic(struct page *page)
 {
        unsigned long vaddr;
        int idx, type;
@@ -89,7 +89,7 @@ static inline unsigned long kmap_atomic(struct page *page)
        set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
        local_flush_tlb_one(vaddr);
 
-       return vaddr;
+       return (void *)vaddr;
 }
 
 static inline void __kunmap_atomic(unsigned long vaddr)
index bb2a8ec440e76ac80fcb2d0afa69284291054099..1faefed32749c93ff31a7d6237732d3dd6c55f26 100644 (file)
@@ -28,6 +28,7 @@ config PARISC
        select CLONE_BACKWARDS
        select TTY # Needed for pdc_cons.c
        select HAVE_DEBUG_STACKOVERFLOW
+       select HAVE_ARCH_AUDITSYSCALL
 
        help
          The PA-RISC microprocessor is designed by Hewlett-Packard and used
index 6c03a94991ad970a754713c1ecfc638661f04dc7..e0998997943bf789cddb11967781b595850574bc 100644 (file)
@@ -144,6 +144,7 @@ config PPC
        select HAVE_DEBUG_STACKOVERFLOW
        select HAVE_IRQ_EXIT_ON_IRQ_STACK
        select ARCH_USE_CMPXCHG_LOCKREF if PPC64
+       select HAVE_ARCH_AUDITSYSCALL
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
index c2353bf059fd49ac95525a90aedd432c8a61701b..175a8b99c196e3942d2c14f1f082f3796243cc82 100644 (file)
@@ -1244,7 +1244,6 @@ CONFIG_DEBUG_SPINLOCK_SLEEP=y
 CONFIG_DEBUG_HIGHMEM=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 # CONFIG_RCU_CPU_STALL_DETECTOR is not set
index 139a8308070c8fee09665b81d168ef092e584d3e..fdee37fab81c5aa1f70263e7c4979fc0df7e9956 100644 (file)
@@ -174,7 +174,6 @@ CONFIG_DETECT_HUNG_TASK=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_MEMORY_INIT=y
 CONFIG_DEBUG_LIST=y
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
index 9ea8342bd219b8851cbe209420b554f9f8128d15..a905063281cc329b376605fa583d1e3282ce36c2 100644 (file)
@@ -306,3 +306,4 @@ CONFIG_KVM_BOOK3S_64=m
 CONFIG_KVM_BOOK3S_64_HV=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
index 3c84f9d879800f47ada749fdad0628cfe7bd5be0..58e3dbf43ca419f6877c7f87ac856e4ace764560 100644 (file)
@@ -301,3 +301,4 @@ CONFIG_CRYPTO_LZO=m
 # CONFIG_CRYPTO_ANSI_CPRNG is not set
 CONFIG_CRYPTO_DEV_NX=y
 CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
index 0dcc48af25a302759a5c0aca3f46b5abd7fb2507..e5d2e0bc7e032b64890701cdc91d88e18cd665c6 100644 (file)
 #define SPRN_HSRR1     0x13B   /* Hypervisor Save/Restore 1 */
 #define SPRN_IC                0x350   /* Virtual Instruction Count */
 #define SPRN_VTB       0x351   /* Virtual Time Base */
+#define SPRN_PMICR     0x354   /* Power Management Idle Control Reg */
+#define SPRN_PMSR      0x355   /* Power Management Status Reg */
+#define SPRN_PMCR      0x374   /* Power Management Control Register */
+
 /* HFSCR and FSCR bit numbers are the same */
 #define FSCR_TAR_LG    8       /* Enable Target Address Register */
 #define FSCR_EBB_LG    7       /* Enable Event Based Branching */
index 895e8a20a3fc064069d49d4b4cc1280b05df4e87..c252ee95bddf31b17e676f0d46a852880502f131 100644 (file)
@@ -11,6 +11,12 @@ config PPC_POWERNV
        select PPC_UDBG_16550
        select PPC_SCOM
        select ARCH_RANDOM
+       select CPU_FREQ
+       select CPU_FREQ_GOV_PERFORMANCE
+       select CPU_FREQ_GOV_POWERSAVE
+       select CPU_FREQ_GOV_USERSPACE
+       select CPU_FREQ_GOV_ONDEMAND
+       select CPU_FREQ_GOV_CONSERVATIVE
        default y
 
 config PPC_POWERNV_RTAS
index 346d21678ffdf02e8d677d8ca4516b642be6fce7..d68fe34799b0fc8f988b81bbeaf3a14d40afc8e3 100644 (file)
@@ -103,6 +103,7 @@ config S390
        select GENERIC_SMP_IDLE_THREAD
        select GENERIC_TIME_VSYSCALL
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
+       select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
index ddaae2f5c9137d0155ef5d5e943b40d097471881..8df022c43af7e7db7afb8d803b19fe4a162c18ca 100644 (file)
@@ -581,7 +581,6 @@ CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
-CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
index cd29d2f4e4f355512ecbafd0cfcc0c7fab05042a..777687055e7be576eb793d3a09aae71b97595ed9 100644 (file)
@@ -12,7 +12,7 @@
 #ifndef _ASM_SYSCALL_H
 #define _ASM_SYSCALL_H 1
 
-#include <linux/audit.h>
+#include <uapi/linux/audit.h>
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <asm/ptrace.h>
@@ -89,11 +89,10 @@ static inline void syscall_set_arguments(struct task_struct *task,
                regs->orig_gpr2 = args[0];
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-                                  struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 #ifdef CONFIG_COMPAT
-       if (test_tsk_thread_flag(task, TIF_31BIT))
+       if (test_tsk_thread_flag(current, TIF_31BIT))
                return AUDIT_ARCH_S390;
 #endif
        return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390;
index ba55e939a820cb5792ee6186090f32d2f667f81d..834b67c4db5af00f4b864931034980948d74f14b 100644 (file)
@@ -42,6 +42,7 @@ config SUPERH
        select MODULES_USE_ELF_RELA
        select OLD_SIGSUSPEND
        select OLD_SIGACTION
+       select HAVE_ARCH_AUDITSYSCALL
        help
          The SuperH is a RISC processor targeted for use in embedded systems
          and consumer electronics; it was also used in the Sega Dreamcast
index 4e5229b0c5bbbc5adc116a5dbe18c02cf83f4cee..47236573db83bb3624f8542b4d905b156bcae5ed 100644 (file)
@@ -128,7 +128,6 @@ CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK_SLEEP=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_FRAME_POINTER=y
index 7d8b7e94b93b6fb8e9795f49368c056053597bec..29f2e988c56a9be4cb8824942f1226ebf958670f 100644 (file)
@@ -77,6 +77,7 @@ config SPARC64
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select HAVE_C_RECORDMCOUNT
        select NO_BOOTMEM
+       select HAVE_ARCH_AUDITSYSCALL
 
 config ARCH_DEFCONFIG
        string
index 21ca44c4f6d58b1a845dfba592cbc704649bc1e3..6915d28cf118f6c406ff53b20802a36e6ddfcbe0 100644 (file)
@@ -1,6 +1,7 @@
 config UML
        bool
        default y
+       select HAVE_ARCH_AUDITSYSCALL
        select HAVE_UID16
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
index 5b8ec0f53b57ec77848100a42a1e0c2838c940a5..25d2c6f7325e8d64133100d651661af713f6563a 100644 (file)
@@ -129,6 +129,7 @@ config X86
        select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
        select HAVE_CC_STACKPROTECTOR
        select GENERIC_CPU_AUTOPROBE
+       select HAVE_ARCH_AUDITSYSCALL
 
 config INSTRUCTION_DECODER
        def_bool y
index 1e6146137f8e56753aab7d68b0e0b45af3c5fed9..4703a6c4b8e315050a38533438a27228ddc4758c 100644 (file)
@@ -112,7 +112,7 @@ __file_size64(void *__fh, efi_char16_t *filename_16,
        efi_file_info_t *info;
        efi_status_t status;
        efi_guid_t info_guid = EFI_FILE_INFO_ID;
-       u32 info_sz;
+       u64 info_sz;
 
        status = efi_early->call((unsigned long)fh->open, fh, &h, filename_16,
                                 EFI_FILE_MODE_READ, (u64)0);
@@ -167,31 +167,31 @@ efi_file_size(efi_system_table_t *sys_table, void *__fh,
 }
 
 static inline efi_status_t
-efi_file_read(void *__fh, void *handle, unsigned long *size, void *addr)
+efi_file_read(void *handle, unsigned long *size, void *addr)
 {
        unsigned long func;
 
        if (efi_early->is64) {
-               efi_file_handle_64_t *fh = __fh;
+               efi_file_handle_64_t *fh = handle;
 
                func = (unsigned long)fh->read;
                return efi_early->call(func, handle, size, addr);
        } else {
-               efi_file_handle_32_t *fh = __fh;
+               efi_file_handle_32_t *fh = handle;
 
                func = (unsigned long)fh->read;
                return efi_early->call(func, handle, size, addr);
        }
 }
 
-static inline efi_status_t efi_file_close(void *__fh, void *handle)
+static inline efi_status_t efi_file_close(void *handle)
 {
        if (efi_early->is64) {
-               efi_file_handle_64_t *fh = __fh;
+               efi_file_handle_64_t *fh = handle;
 
                return efi_early->call((unsigned long)fh->close, handle);
        } else {
-               efi_file_handle_32_t *fh = __fh;
+               efi_file_handle_32_t *fh = handle;
 
                return efi_early->call((unsigned long)fh->close, handle);
        }
@@ -1016,6 +1016,9 @@ void setup_graphics(struct boot_params *boot_params)
  * Because the x86 boot code expects to be passed a boot_params we
  * need to create one ourselves (usually the bootloader would create
  * one for us).
+ *
+ * The caller is responsible for filling out ->code32_start in the
+ * returned boot_params.
  */
 struct boot_params *make_boot_params(struct efi_config *c)
 {
@@ -1081,8 +1084,6 @@ struct boot_params *make_boot_params(struct efi_config *c)
        hdr->vid_mode = 0xffff;
        hdr->boot_flag = 0xAA55;
 
-       hdr->code32_start = (__u64)(unsigned long)image->image_base;
-
        hdr->type_of_loader = 0x21;
 
        /* Convert unicode cmdline to ascii */
index de9d4200d305ba86eb48bb60f3a852b210fb2649..cbed1407a5cdb7ead8fa0cae90d2e7902133aaa7 100644 (file)
@@ -59,6 +59,7 @@ ENTRY(efi_pe_entry)
        call    make_boot_params
        cmpl    $0, %eax
        je      fail
+       movl    %esi, BP_code32_start(%eax)
        popl    %ecx
        pushl   %eax
        pushl   %ecx
@@ -90,12 +91,7 @@ fail:
        hlt
        jmp     fail
 2:
-       call    3f
-3:
-       popl    %eax
-       subl    $3b, %eax
-       subl    BP_pref_address(%esi), %eax
-       add     BP_code32_start(%esi), %eax
+       movl    BP_code32_start(%esi), %eax
        leal    preferred_addr(%eax), %eax
        jmp     *%eax
 
index 57e58a5fa21073de1c86f4ceee6b76e674d87399..0d558ee899aec85cf3fce7f69691441691a41f93 100644 (file)
@@ -261,6 +261,8 @@ ENTRY(efi_pe_entry)
        cmpq    $0,%rax
        je      fail
        mov     %rax, %rsi
+       leaq    startup_32(%rip), %rax
+       movl    %eax, BP_code32_start(%rsi)
        jmp     2f              /* Skip the relocation */
 
 handover_entry:
@@ -284,12 +286,7 @@ fail:
        hlt
        jmp     fail
 2:
-       call    3f
-3:
-       popq    %rax
-       subq    $3b, %rax
-       subq    BP_pref_address(%rsi), %rax
-       add     BP_code32_start(%esi), %eax
+       movl    BP_code32_start(%esi), %eax
        leaq    preferred_addr(%rax), %rax
        jmp     *%rax
 
index aea284b413122f14ccbf757fd5b114d6b12bb094..d6a756ae04c8b999ed379159aa03029b17a38139 100644 (file)
@@ -13,7 +13,7 @@
 #ifndef _ASM_X86_SYSCALL_H
 #define _ASM_X86_SYSCALL_H
 
-#include <linux/audit.h>
+#include <uapi/linux/audit.h>
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <asm/asm-offsets.h>   /* For NR_syscalls */
@@ -91,8 +91,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
        memcpy(&regs->bx + i, args, n * sizeof(args[0]));
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-                                  struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
        return AUDIT_ARCH_I386;
 }
@@ -221,8 +220,7 @@ static inline void syscall_set_arguments(struct task_struct *task,
                }
 }
 
-static inline int syscall_get_arch(struct task_struct *task,
-                                  struct pt_regs *regs)
+static inline int syscall_get_arch(void)
 {
 #ifdef CONFIG_IA32_EMULATION
        /*
@@ -234,7 +232,7 @@ static inline int syscall_get_arch(struct task_struct *task,
         *
         * x32 tasks should be considered AUDIT_ARCH_X86_64.
         */
-       if (task_thread_info(task)->status & TS_COMPAT)
+       if (task_thread_info(current)->status & TS_COMPAT)
                return AUDIT_ARCH_I386;
 #endif
        /* Both x32 and x86_64 are considered "64-bit". */
index e69182fd01cfe4c9694a2178de43a8c784a30f4a..4b28159e0421d87bc36a1564cb32b6653314e86f 100644 (file)
@@ -87,7 +87,9 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
        num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
 
        retval = 0;
-       if (num_cstate_subtype < (cx->address & MWAIT_SUBSTATE_MASK)) {
+       /* If the HW does not support any sub-states in this C-state */
+       if (num_cstate_subtype == 0) {
+               pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n", cx->address, edx_part);
                retval = -1;
                goto out;
        }
index 481ae38f6a44f5ac240fdd39b06f8708f66522d5..ad28db7e6bdea3594a4bfad1d70d0c975ded7021 100644 (file)
@@ -1996,7 +1996,8 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
        };
 
        /* First tickle the hardware, only then report what went on. -- REW */
-       apic_write(APIC_ESR, 0);
+       if (lapic_get_maxlvt() > 3)     /* Due to the Pentium erratum 3AP. */
+               apic_write(APIC_ESR, 0);
        v = apic_read(APIC_ESR);
        ack_APIC_irq();
        atomic_inc(&irq_err_count);
index 9b7734b1f975a4c0cfc9776749aecc83ace1cd10..eeee23ff75ef8ddb28a002d26b5bee98b21e03ca 100644 (file)
@@ -89,6 +89,9 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_chrdev_wait);
 static DEFINE_PER_CPU(struct mce, mces_seen);
 static int                     cpu_missing;
 
+/* CMCI storm detection filter */
+static DEFINE_PER_CPU(unsigned long, mce_polled_error);
+
 /*
  * MCA banks polled by the period polling timer for corrected events.
  * With Intel CMCI, this only has MCA banks which do not support CMCI (if any).
@@ -595,6 +598,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
 {
        struct mce m;
        int i;
+       unsigned long *v;
 
        this_cpu_inc(mce_poll_count);
 
@@ -614,6 +618,8 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
                if (!(m.status & MCI_STATUS_VAL))
                        continue;
 
+               v = &get_cpu_var(mce_polled_error);
+               set_bit(0, v);
                /*
                 * Uncorrected or signalled events are handled by the exception
                 * handler when it is enabled, so don't process those here.
@@ -1278,10 +1284,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
 static unsigned long (*mce_adjust_timer)(unsigned long interval) =
        mce_adjust_timer_default;
 
+static int cmc_error_seen(void)
+{
+       unsigned long *v = &__get_cpu_var(mce_polled_error);
+
+       return test_and_clear_bit(0, v);
+}
+
 static void mce_timer_fn(unsigned long data)
 {
        struct timer_list *t = &__get_cpu_var(mce_timer);
        unsigned long iv;
+       int notify;
 
        WARN_ON(smp_processor_id() != data);
 
@@ -1296,7 +1310,9 @@ static void mce_timer_fn(unsigned long data)
         * polling interval, otherwise increase the polling interval.
         */
        iv = __this_cpu_read(mce_next_interval);
-       if (mce_notify_irq()) {
+       notify = mce_notify_irq();
+       notify |= cmc_error_seen();
+       if (notify) {
                iv = max(iv / 2, (unsigned long) HZ/100);
        } else {
                iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
index fb6156fee6f79dcc07559a9765f3702d7365506d..3bdb95ae8c430fa8bacc76a9f644c4abce8ec35e 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/interrupt.h>
 #include <linux/percpu.h>
 #include <linux/sched.h>
+#include <linux/cpumask.h>
 #include <asm/apic.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
@@ -137,6 +138,22 @@ unsigned long mce_intel_adjust_timer(unsigned long interval)
        }
 }
 
+static void cmci_storm_disable_banks(void)
+{
+       unsigned long flags, *owned;
+       int bank;
+       u64 val;
+
+       raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+       owned = __get_cpu_var(mce_banks_owned);
+       for_each_set_bit(bank, owned, MAX_NR_BANKS) {
+               rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+               val &= ~MCI_CTL2_CMCI_EN;
+               wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+       }
+       raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+}
+
 static bool cmci_storm_detect(void)
 {
        unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
@@ -158,7 +175,7 @@ static bool cmci_storm_detect(void)
        if (cnt <= CMCI_STORM_THRESHOLD)
                return false;
 
-       cmci_clear();
+       cmci_storm_disable_banks();
        __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
        r = atomic_add_return(1, &cmci_storm_on_cpus);
        mce_timer_kick(CMCI_POLL_INTERVAL);
index 6d7d5a1260a68aca347b2b3d056a6d24dbf94f42..b0cc3809723d4f50bb7009691eabdfa9682b81e1 100644 (file)
@@ -225,7 +225,7 @@ static void __init intel_remapping_check(int num, int slot, int func)
  *
  * And yes, so far on current devices the base addr is always under 4G.
  */
-static u32 __init intel_stolen_base(int num, int slot, int func)
+static u32 __init intel_stolen_base(int num, int slot, int func, size_t stolen_size)
 {
        u32 base;
 
@@ -244,6 +244,114 @@ static u32 __init intel_stolen_base(int num, int slot, int func)
 #define MB(x)  (KB (KB (x)))
 #define GB(x)  (MB (KB (x)))
 
+static size_t __init i830_tseg_size(void)
+{
+       u8 tmp = read_pci_config_byte(0, 0, 0, I830_ESMRAMC);
+
+       if (!(tmp & TSEG_ENABLE))
+               return 0;
+
+       if (tmp & I830_TSEG_SIZE_1M)
+               return MB(1);
+       else
+               return KB(512);
+}
+
+static size_t __init i845_tseg_size(void)
+{
+       u8 tmp = read_pci_config_byte(0, 0, 0, I845_ESMRAMC);
+
+       if (!(tmp & TSEG_ENABLE))
+               return 0;
+
+       switch (tmp & I845_TSEG_SIZE_MASK) {
+       case I845_TSEG_SIZE_512K:
+               return KB(512);
+       case I845_TSEG_SIZE_1M:
+               return MB(1);
+       default:
+               WARN_ON(1);
+               return 0;
+       }
+}
+
+static size_t __init i85x_tseg_size(void)
+{
+       u8 tmp = read_pci_config_byte(0, 0, 0, I85X_ESMRAMC);
+
+       if (!(tmp & TSEG_ENABLE))
+               return 0;
+
+       return MB(1);
+}
+
+static size_t __init i830_mem_size(void)
+{
+       return read_pci_config_byte(0, 0, 0, I830_DRB3) * MB(32);
+}
+
+static size_t __init i85x_mem_size(void)
+{
+       return read_pci_config_byte(0, 0, 1, I85X_DRB3) * MB(32);
+}
+
+/*
+ * On 830/845/85x the stolen memory base isn't available in any
+ * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size.
+ */
+static u32 __init i830_stolen_base(int num, int slot, int func, size_t stolen_size)
+{
+       return i830_mem_size() - i830_tseg_size() - stolen_size;
+}
+
+static u32 __init i845_stolen_base(int num, int slot, int func, size_t stolen_size)
+{
+       return i830_mem_size() - i845_tseg_size() - stolen_size;
+}
+
+static u32 __init i85x_stolen_base(int num, int slot, int func, size_t stolen_size)
+{
+       return i85x_mem_size() - i85x_tseg_size() - stolen_size;
+}
+
+static u32 __init i865_stolen_base(int num, int slot, int func, size_t stolen_size)
+{
+       /*
+        * FIXME is the graphics stolen memory region
+        * always at TOUD? Ie. is it always the last
+        * one to be allocated by the BIOS?
+        */
+       return read_pci_config_16(0, 0, 0, I865_TOUD) << 16;
+}
+
+static size_t __init i830_stolen_size(int num, int slot, int func)
+{
+       size_t stolen_size;
+       u16 gmch_ctrl;
+
+       gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL);
+
+       switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
+       case I830_GMCH_GMS_STOLEN_512:
+               stolen_size = KB(512);
+               break;
+       case I830_GMCH_GMS_STOLEN_1024:
+               stolen_size = MB(1);
+               break;
+       case I830_GMCH_GMS_STOLEN_8192:
+               stolen_size = MB(8);
+               break;
+       case I830_GMCH_GMS_LOCAL:
+               /* local memory isn't part of the normal address space */
+               stolen_size = 0;
+               break;
+       default:
+               return 0;
+       }
+
+       return stolen_size;
+}
+
 static size_t __init gen3_stolen_size(int num, int slot, int func)
 {
        size_t stolen_size;
@@ -310,7 +418,7 @@ static size_t __init gen6_stolen_size(int num, int slot, int func)
        return gmch_ctrl << 25; /* 32 MB units */
 }
 
-static inline size_t gen8_stolen_size(int num, int slot, int func)
+static size_t gen8_stolen_size(int num, int slot, int func)
 {
        u16 gmch_ctrl;
 
@@ -320,31 +428,74 @@ static inline size_t gen8_stolen_size(int num, int slot, int func)
        return gmch_ctrl << 25; /* 32 MB units */
 }
 
-typedef size_t (*stolen_size_fn)(int num, int slot, int func);
+
+struct intel_stolen_funcs {
+       size_t (*size)(int num, int slot, int func);
+       u32 (*base)(int num, int slot, int func, size_t size);
+};
+
+static const struct intel_stolen_funcs i830_stolen_funcs = {
+       .base = i830_stolen_base,
+       .size = i830_stolen_size,
+};
+
+static const struct intel_stolen_funcs i845_stolen_funcs = {
+       .base = i845_stolen_base,
+       .size = i830_stolen_size,
+};
+
+static const struct intel_stolen_funcs i85x_stolen_funcs = {
+       .base = i85x_stolen_base,
+       .size = gen3_stolen_size,
+};
+
+static const struct intel_stolen_funcs i865_stolen_funcs = {
+       .base = i865_stolen_base,
+       .size = gen3_stolen_size,
+};
+
+static const struct intel_stolen_funcs gen3_stolen_funcs = {
+       .base = intel_stolen_base,
+       .size = gen3_stolen_size,
+};
+
+static const struct intel_stolen_funcs gen6_stolen_funcs = {
+       .base = intel_stolen_base,
+       .size = gen6_stolen_size,
+};
+
+static const struct intel_stolen_funcs gen8_stolen_funcs = {
+       .base = intel_stolen_base,
+       .size = gen8_stolen_size,
+};
 
 static struct pci_device_id intel_stolen_ids[] __initdata = {
-       INTEL_I915G_IDS(gen3_stolen_size),
-       INTEL_I915GM_IDS(gen3_stolen_size),
-       INTEL_I945G_IDS(gen3_stolen_size),
-       INTEL_I945GM_IDS(gen3_stolen_size),
-       INTEL_VLV_M_IDS(gen6_stolen_size),
-       INTEL_VLV_D_IDS(gen6_stolen_size),
-       INTEL_PINEVIEW_IDS(gen3_stolen_size),
-       INTEL_I965G_IDS(gen3_stolen_size),
-       INTEL_G33_IDS(gen3_stolen_size),
-       INTEL_I965GM_IDS(gen3_stolen_size),
-       INTEL_GM45_IDS(gen3_stolen_size),
-       INTEL_G45_IDS(gen3_stolen_size),
-       INTEL_IRONLAKE_D_IDS(gen3_stolen_size),
-       INTEL_IRONLAKE_M_IDS(gen3_stolen_size),
-       INTEL_SNB_D_IDS(gen6_stolen_size),
-       INTEL_SNB_M_IDS(gen6_stolen_size),
-       INTEL_IVB_M_IDS(gen6_stolen_size),
-       INTEL_IVB_D_IDS(gen6_stolen_size),
-       INTEL_HSW_D_IDS(gen6_stolen_size),
-       INTEL_HSW_M_IDS(gen6_stolen_size),
-       INTEL_BDW_M_IDS(gen8_stolen_size),
-       INTEL_BDW_D_IDS(gen8_stolen_size)
+       INTEL_I830_IDS(&i830_stolen_funcs),
+       INTEL_I845G_IDS(&i845_stolen_funcs),
+       INTEL_I85X_IDS(&i85x_stolen_funcs),
+       INTEL_I865G_IDS(&i865_stolen_funcs),
+       INTEL_I915G_IDS(&gen3_stolen_funcs),
+       INTEL_I915GM_IDS(&gen3_stolen_funcs),
+       INTEL_I945G_IDS(&gen3_stolen_funcs),
+       INTEL_I945GM_IDS(&gen3_stolen_funcs),
+       INTEL_VLV_M_IDS(&gen6_stolen_funcs),
+       INTEL_VLV_D_IDS(&gen6_stolen_funcs),
+       INTEL_PINEVIEW_IDS(&gen3_stolen_funcs),
+       INTEL_I965G_IDS(&gen3_stolen_funcs),
+       INTEL_G33_IDS(&gen3_stolen_funcs),
+       INTEL_I965GM_IDS(&gen3_stolen_funcs),
+       INTEL_GM45_IDS(&gen3_stolen_funcs),
+       INTEL_G45_IDS(&gen3_stolen_funcs),
+       INTEL_IRONLAKE_D_IDS(&gen3_stolen_funcs),
+       INTEL_IRONLAKE_M_IDS(&gen3_stolen_funcs),
+       INTEL_SNB_D_IDS(&gen6_stolen_funcs),
+       INTEL_SNB_M_IDS(&gen6_stolen_funcs),
+       INTEL_IVB_M_IDS(&gen6_stolen_funcs),
+       INTEL_IVB_D_IDS(&gen6_stolen_funcs),
+       INTEL_HSW_D_IDS(&gen6_stolen_funcs),
+       INTEL_HSW_M_IDS(&gen6_stolen_funcs),
+       INTEL_BDW_M_IDS(&gen8_stolen_funcs),
+       INTEL_BDW_D_IDS(&gen8_stolen_funcs)
 };
 
 static void __init intel_graphics_stolen(int num, int slot, int func)
@@ -361,11 +512,13 @@ static void __init intel_graphics_stolen(int num, int slot, int func)
 
        for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) {
                if (intel_stolen_ids[i].device == device) {
-                       stolen_size_fn stolen_size =
-                               (stolen_size_fn)intel_stolen_ids[i].driver_data;
-                       size = stolen_size(num, slot, func);
-                       start = intel_stolen_base(num, slot, func);
+                       const struct intel_stolen_funcs *stolen_funcs =
+                               (const struct intel_stolen_funcs *)intel_stolen_ids[i].driver_data;
+                       size = stolen_funcs->size(num, slot, func);
+                       start = stolen_funcs->base(num, slot, func, size);
                        if (size && start) {
+                               printk(KERN_INFO "Reserving Intel graphics stolen memory at 0x%x-0x%x\n",
+                                      start, start + (u32)size - 1);
                                /* Mark this space as reserved */
                                e820_add_region(start, size, E820_RESERVED);
                                sanitize_e820_map(e820.map,
index 42805fac009215ac5c70bdd71fdc2ad5d72dfa89..283a76a9cc4099427b8d8b7dcc595cfdddb880a1 100644 (file)
@@ -125,7 +125,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
                seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
        seq_printf(p, "  Machine check polls\n");
 #endif
-#if defined(CONFIG_HYPERV) || defined(CONFIG_XEN)
+#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
        seq_printf(p, "%*s: ", prec, "THR");
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
index ebc9873989233dcba13d1fa1af14e3e921959a85..af1d14a9ebdae1ac2fddace4c15271babd1613a3 100644 (file)
@@ -229,6 +229,17 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
                }
        }
 
+       /*
+        * On x86-64 we do not support 16-bit segments due to
+        * IRET leaking the high bits of the kernel stack address.
+        */
+#ifdef CONFIG_X86_64
+       if (!ldt_info.seg_32bit) {
+               error = -EINVAL;
+               goto out_unlock;
+       }
+#endif
+
        fill_ldt(&ldt, &ldt_info);
        if (oldmode)
                ldt.avl = 0;
index 299d49302e7d2a46cedcd00230f8a4ed8b027c5b..0497f719977dff8ca0094b536a6b7e40ac371ef2 100644 (file)
@@ -1207,23 +1207,31 @@ error:
        return ret;
 }
 
-static inline int __init determine_tce_table_size(u64 ram)
+static inline int __init determine_tce_table_size(void)
 {
        int ret;
 
        if (specified_table_size != TCE_TABLE_SIZE_UNSPECIFIED)
                return specified_table_size;
 
-       /*
-        * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
-        * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
-        * larger table size has twice as many entries, so shift the
-        * max ram address by 13 to divide by 8K and then look at the
-        * order of the result to choose between 0-7.
-        */
-       ret = get_order(ram >> 13);
-       if (ret > TCE_TABLE_SIZE_8M)
+       if (is_kdump_kernel() && saved_max_pfn) {
+               /*
+                * Table sizes are from 0 to 7 (TCE_TABLE_SIZE_64K to
+                * TCE_TABLE_SIZE_8M). Table size 0 has 8K entries and each
+                * larger table size has twice as many entries, so shift the
+                * max ram address by 13 to divide by 8K and then look at the
+                * order of the result to choose between 0-7.
+                */
+               ret = get_order((saved_max_pfn * PAGE_SIZE) >> 13);
+               if (ret > TCE_TABLE_SIZE_8M)
+                       ret = TCE_TABLE_SIZE_8M;
+       } else {
+               /*
+                * Use 8M by default (suggested by Muli) if it's not
+                * kdump kernel and saved_max_pfn isn't set.
+                */
                ret = TCE_TABLE_SIZE_8M;
+       }
 
        return ret;
 }
@@ -1418,8 +1426,7 @@ int __init detect_calgary(void)
                return -ENOMEM;
        }
 
-       specified_table_size = determine_tce_table_size((is_kdump_kernel() ?
-                                       saved_max_pfn : max_pfn) * PAGE_SIZE);
+       specified_table_size = determine_tce_table_size();
 
        for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
                struct calgary_bus_info *info = &bus_info[bus];
index d57d917ff2406c308b6b12b4790bf6654f4d900a..1493c68352d11454a50a7fdfb5f11f49ab54de88 100644 (file)
@@ -627,7 +627,6 @@ CONFIG_SCHED_DEBUG=y
 # CONFIG_DEBUG_KOBJECT is not set
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
-# CONFIG_DEBUG_WRITECOUNT is not set
 # CONFIG_DEBUG_MEMORY_INIT is not set
 # CONFIG_DEBUG_LIST is not set
 # CONFIG_DEBUG_SG is not set
index 583c2b0974cab79dfb08e7381836702688c2ed52..12a492ab6d17f9fbf74dedd1a9723a269ee7cbef 100644 (file)
@@ -569,7 +569,6 @@ CONFIG_DEBUG_SPINLOCK_SLEEP=y
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
 CONFIG_DEBUG_NOMMU_REGIONS=y
-# CONFIG_DEBUG_WRITECOUNT is not set
 # CONFIG_DEBUG_MEMORY_INIT is not set
 # CONFIG_DEBUG_LIST is not set
 # CONFIG_DEBUG_SG is not set
index 34d7c196338b146e99c7fb31ee93ad8fa5f53f49..a0e3096c4bb53a48c129d3df0337ad66731c417a 100644 (file)
@@ -1307,7 +1307,7 @@ void __blk_put_request(struct request_queue *q, struct request *req)
                struct request_list *rl = blk_rq_rl(req);
 
                BUG_ON(!list_empty(&req->queuelist));
-               BUG_ON(!hlist_unhashed(&req->hash));
+               BUG_ON(ELV_ON_HASH(req));
 
                blk_free_request(rl, req);
                freed_request(rl, flags);
index cca6356d216d13977665e17a846aef31ab1e4a87..f7b22bc215180d4b7f467135faeaf52975a77013 100644 (file)
@@ -188,7 +188,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
  *    unmapping.
  */
 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
-                       struct rq_map_data *map_data, struct sg_iovec *iov,
+                       struct rq_map_data *map_data, const struct sg_iovec *iov,
                        int iov_count, unsigned int len, gfp_t gfp_mask)
 {
        struct bio *bio;
index b1bcc619d0ea90778d9626e3923585bb02767dd7..1d2a9bdbee57f100faacf91ab3a9aef6b7b2a944 100644 (file)
@@ -956,6 +956,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
                               unsigned int cpu)
 {
        struct blk_mq_hw_ctx *hctx = data;
+       struct request_queue *q = hctx->queue;
        struct blk_mq_ctx *ctx;
        LIST_HEAD(tmp);
 
@@ -965,7 +966,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
        /*
         * Move ctx entries to new CPU, if this one is going away.
         */
-       ctx = __blk_mq_get_ctx(hctx->queue, cpu);
+       ctx = __blk_mq_get_ctx(q, cpu);
 
        spin_lock(&ctx->lock);
        if (!list_empty(&ctx->rq_list)) {
@@ -977,7 +978,7 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
        if (list_empty(&tmp))
                return;
 
-       ctx = blk_mq_get_ctx(hctx->queue);
+       ctx = blk_mq_get_ctx(q);
        spin_lock(&ctx->lock);
 
        while (!list_empty(&tmp)) {
@@ -988,10 +989,13 @@ static void blk_mq_hctx_notify(void *data, unsigned long action,
                list_move_tail(&rq->queuelist, &ctx->rq_list);
        }
 
+       hctx = q->mq_ops->map_queue(q, ctx->cpu);
        blk_mq_hctx_mark_pending(hctx, ctx);
 
        spin_unlock(&ctx->lock);
        blk_mq_put_ctx(ctx);
+
+       blk_mq_run_hw_queue(hctx, true);
 }
 
 static int blk_mq_init_hw_commands(struct blk_mq_hw_ctx *hctx,
index ebd6b6f1bdeb78a79b5bebdaf64771c315f69abd..53b1737e978d584878f3dae13352e17a8aed1f06 100644 (file)
@@ -30,8 +30,8 @@ static void blk_done_softirq(struct softirq_action *h)
        while (!list_empty(&local_list)) {
                struct request *rq;
 
-               rq = list_entry(local_list.next, struct request, queuelist);
-               list_del_init(&rq->queuelist);
+               rq = list_entry(local_list.next, struct request, ipi_list);
+               list_del_init(&rq->ipi_list);
                rq->q->softirq_done_fn(rq);
        }
 }
@@ -45,14 +45,9 @@ static void trigger_softirq(void *data)
 
        local_irq_save(flags);
        list = this_cpu_ptr(&blk_cpu_done);
-       /*
-        * We reuse queuelist for a list of requests to process. Since the
-        * queuelist is used by the block layer only for requests waiting to be
-        * submitted to the device it is unused now.
-        */
-       list_add_tail(&rq->queuelist, list);
+       list_add_tail(&rq->ipi_list, list);
 
-       if (list->next == &rq->queuelist)
+       if (list->next == &rq->ipi_list)
                raise_softirq_irqoff(BLOCK_SOFTIRQ);
 
        local_irq_restore(flags);
@@ -141,7 +136,7 @@ void __blk_complete_request(struct request *req)
                struct list_head *list;
 do_local:
                list = this_cpu_ptr(&blk_cpu_done);
-               list_add_tail(&req->queuelist, list);
+               list_add_tail(&req->ipi_list, list);
 
                /*
                 * if the list only contains our just added request,
@@ -149,7 +144,7 @@ do_local:
                 * entries there, someone already raised the irq but it
                 * hasn't run yet.
                 */
-               if (list->next == &req->queuelist)
+               if (list->next == &req->ipi_list)
                        raise_softirq_irqoff(BLOCK_SOFTIRQ);
        } else if (raise_blk_irq(ccpu, req))
                goto do_local;
index d23b415b8a28f90e0ff83c713e0c2677a1e00f96..1d880f1f957fe473fbb0f78ad8ad03a3726faa73 100644 (file)
@@ -78,7 +78,7 @@ static inline void blk_clear_rq_complete(struct request *rq)
 /*
  * Internal elevator interface
  */
-#define ELV_ON_HASH(rq) hash_hashed(&(rq)->hash)
+#define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
 
 void blk_insert_flush(struct request *rq);
 void blk_abort_flushes(struct request_queue *q);
index 42c45a7d67144a5598f5d7b2242a63eb9d58e292..1e01b66a0b927018498c8d28d5b09472cbf559ce 100644 (file)
@@ -247,6 +247,7 @@ EXPORT_SYMBOL(elevator_exit);
 static inline void __elv_rqhash_del(struct request *rq)
 {
        hash_del(&rq->hash);
+       rq->cmd_flags &= ~REQ_HASHED;
 }
 
 static void elv_rqhash_del(struct request_queue *q, struct request *rq)
@@ -261,6 +262,7 @@ static void elv_rqhash_add(struct request_queue *q, struct request *rq)
 
        BUG_ON(ELV_ON_HASH(rq));
        hash_add(e->hash, &rq->hash, rq_hash_key(rq));
+       rq->cmd_flags |= REQ_HASHED;
 }
 
 static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
index c205653e96447b12ab9ed163715bf9d64c0cd230..ab686b31010034083ba3b4784ba288fb93b3f554 100644 (file)
@@ -31,10 +31,14 @@ menuconfig ACPI
          ACPI CA, see:
          <http://acpica.org/>
 
-         ACPI is an open industry specification co-developed by
-         Hewlett-Packard, Intel, Microsoft, Phoenix, and Toshiba.
+         ACPI is an open industry specification originally co-developed by
+         Hewlett-Packard, Intel, Microsoft, Phoenix, and Toshiba. Currently,
+         it is developed by the ACPI Specification Working Group (ASWG) under
+         the UEFI Forum and any UEFI member can join the ASWG and contribute
+         to the ACPI specification.
          The specification is available at:
          <http://www.acpi.info>
+         <http://www.uefi.org/acpi/specs>
 
 if ACPI
 
index f0fc6260266bfe852a7228b7f64bf8db40ce4bb9..d9339b442a4ebdc6870d4dd0b4dac4db65ed8613 100644 (file)
@@ -51,12 +51,6 @@ MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
        " the driver to wait for userspace to write the undock sysfs file "
        " before undocking");
 
-static const struct acpi_device_id dock_device_ids[] = {
-       {"LNXDOCK", 0},
-       {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, dock_device_ids);
-
 struct dock_station {
        acpi_handle handle;
        unsigned long last_dock_time;
index f7fd72ac69cf52839e6bd92a9f503fc8e7c66dad..6776c599816f33100a2403ae7286e534f0a96354 100644 (file)
@@ -1219,10 +1219,9 @@ acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
 {
        struct semaphore *sem = NULL;
 
-       sem = acpi_os_allocate(sizeof(struct semaphore));
+       sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
        if (!sem)
                return AE_NO_MEMORY;
-       memset(sem, 0, sizeof(struct semaphore));
 
        sema_init(sem, initial_units);
 
index 9640685533345340bbff00bc67abeecac4e9c7d1..c1e31a41f94957d87ded390dfbfcc54333728e3c 100644 (file)
@@ -344,7 +344,7 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
                        tz->trips.hot.flags.valid = 1;
                        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                                        "Found hot threshold [%lu]\n",
-                                       tz->trips.critical.temperature));
+                                       tz->trips.hot.temperature));
                }
        }
 
index 0f5f78fa6545cf4d30da8f56b426033ac7f7132e..bba526148583c4ea67e0a5d08631d7f0eecbee2a 100644 (file)
@@ -164,11 +164,10 @@ acpi_extract_package(union acpi_object *package,
         * Validate output buffer.
         */
        if (buffer->length == ACPI_ALLOCATE_BUFFER) {
-               buffer->pointer = ACPI_ALLOCATE(size_required);
+               buffer->pointer = ACPI_ALLOCATE_ZEROED(size_required);
                if (!buffer->pointer)
                        return AE_NO_MEMORY;
                buffer->length = size_required;
-               memset(buffer->pointer, 0, size_required);
        } else {
                if (buffer->length < size_required) {
                        buffer->length = size_required;
index 48c7e8af9c96cba545731cb0ab464cb4097dc44b..8b6990e417ec870d7c77e42c994b1e2245b3b88e 100644 (file)
@@ -487,6 +487,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
                },
        },
+       {
+       .callback = video_set_use_native_backlight,
+       .ident = "Thinkpad Helix",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
+               },
+       },
        {
         .callback = video_set_use_native_backlight,
         .ident = "Dell Inspiron 7520",
index 6f54962aae1dd1a6160b777238b37f639f03f80f..ae098a261fcdb29262e778036ad5e9712841f221 100644 (file)
@@ -705,6 +705,14 @@ static int pm_genpd_runtime_resume(struct device *dev)
        return 0;
 }
 
+static bool pd_ignore_unused;
+static int __init pd_ignore_unused_setup(char *__unused)
+{
+       pd_ignore_unused = true;
+       return 1;
+}
+__setup("pd_ignore_unused", pd_ignore_unused_setup);
+
 /**
  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
  */
@@ -712,6 +720,11 @@ void pm_genpd_poweroff_unused(void)
 {
        struct generic_pm_domain *genpd;
 
+       if (pd_ignore_unused) {
+               pr_warn("genpd: Not disabling unused power domains\n");
+               return;
+       }
+
        mutex_lock(&gpd_list_lock);
 
        list_for_each_entry(genpd, &gpd_list, gpd_list_node)
index d0a072463a04ff1c4d3f83cbd66de587be4c5e44..63e30ef096e2be5e16a1e989bba22dd3b6c69f8d 100644 (file)
@@ -761,10 +761,11 @@ skip_format_initialization:
        if (ret != 0)
                goto err_range;
 
-       if (dev)
+       if (dev) {
                ret = regmap_attach_dev(dev, map, config);
                if (ret != 0)
                        goto err_regcache;
+       }
 
        return map;
 
index 18c76e84d54085c0819d46f3af1f1ef3aa6fcb3b..68e3992e88381cd4974ebfa2da3400708ab4afa0 100644 (file)
@@ -469,24 +469,14 @@ static void drbd_wait_ee_list_empty(struct drbd_device *device,
 
 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
 {
-       mm_segment_t oldfs;
        struct kvec iov = {
                .iov_base = buf,
                .iov_len = size,
        };
        struct msghdr msg = {
-               .msg_iovlen = 1,
-               .msg_iov = (struct iovec *)&iov,
                .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
        };
-       int rv;
-
-       oldfs = get_fs();
-       set_fs(KERNEL_DS);
-       rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
-       set_fs(oldfs);
-
-       return rv;
+       return kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags);
 }
 
 static int drbd_recv(struct drbd_connection *connection, void *buf, size_t size)
index 66e8c3b94ef35443f46bf67ea3065023da8b808d..f70a230a2945225f89ae188909c7bc9db90bc32f 100644 (file)
@@ -237,7 +237,7 @@ static int __do_lo_send_write(struct file *file,
        file_end_write(file);
        if (likely(bw == len))
                return 0;
-       printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
+       printk_ratelimited(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
                        (unsigned long long)pos, len);
        if (bw >= 0)
                bw = -EIO;
@@ -277,7 +277,7 @@ static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
                return __do_lo_send_write(lo->lo_backing_file,
                                page_address(page), bvec->bv_len,
                                pos);
-       printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
+       printk_ratelimited(KERN_ERR "loop: Transfer error at byte offset %llu, "
                        "length %i.\n", (unsigned long long)pos, bvec->bv_len);
        if (ret > 0)
                ret = -EIO;
@@ -316,7 +316,7 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
 out:
        return ret;
 fail:
-       printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
+       printk_ratelimited(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
        ret = -ENOMEM;
        goto out;
 }
@@ -345,7 +345,7 @@ lo_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
                size = p->bsize;
 
        if (lo_do_transfer(lo, READ, page, buf->offset, p->page, p->offset, size, IV)) {
-               printk(KERN_ERR "loop: transfer error block %ld\n",
+               printk_ratelimited(KERN_ERR "loop: transfer error block %ld\n",
                       page->index);
                size = -EINVAL;
        }
index 55298db36b2d61a113f25c22905fffb0f22ddd32..3a70ea2f7cd69b2641302e6c44560f32245a078c 100644 (file)
@@ -630,37 +630,29 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
        }
  
        case NBD_CLEAR_SOCK: {
-               struct file *file;
-
+               struct socket *sock = nbd->sock;
                nbd->sock = NULL;
-               file = nbd->file;
-               nbd->file = NULL;
                nbd_clear_que(nbd);
                BUG_ON(!list_empty(&nbd->queue_head));
                BUG_ON(!list_empty(&nbd->waiting_queue));
                kill_bdev(bdev);
-               if (file)
-                       fput(file);
+               if (sock)
+                       sockfd_put(sock);
                return 0;
        }
 
        case NBD_SET_SOCK: {
-               struct file *file;
-               if (nbd->file)
+               struct socket *sock;
+               int err;
+               if (nbd->sock)
                        return -EBUSY;
-               file = fget(arg);
-               if (file) {
-                       struct inode *inode = file_inode(file);
-                       if (S_ISSOCK(inode->i_mode)) {
-                               nbd->file = file;
-                               nbd->sock = SOCKET_I(inode);
-                               if (max_part > 0)
-                                       bdev->bd_invalidated = 1;
-                               nbd->disconnect = 0; /* we're connected now */
-                               return 0;
-                       } else {
-                               fput(file);
-                       }
+               sock = sockfd_lookup(arg, &err);
+               if (sock) {
+                       nbd->sock = sock;
+                       if (max_part > 0)
+                               bdev->bd_invalidated = 1;
+                       nbd->disconnect = 0; /* we're connected now */
+                       return 0;
                }
                return -EINVAL;
        }
@@ -697,12 +689,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 
        case NBD_DO_IT: {
                struct task_struct *thread;
-               struct file *file;
+               struct socket *sock;
                int error;
 
                if (nbd->pid)
                        return -EBUSY;
-               if (!nbd->file)
+               if (!nbd->sock)
                        return -EINVAL;
 
                mutex_unlock(&nbd->tx_lock);
@@ -731,15 +723,15 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
                if (error)
                        return error;
                sock_shutdown(nbd, 0);
-               file = nbd->file;
-               nbd->file = NULL;
+               sock = nbd->sock;
+               nbd->sock = NULL;
                nbd_clear_que(nbd);
                dev_warn(disk_to_dev(nbd->disk), "queue cleared\n");
                kill_bdev(bdev);
                queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
                set_device_ro(bdev, false);
-               if (file)
-                       fput(file);
+               if (sock)
+                       sockfd_put(sock);
                nbd->flags = 0;
                nbd->bytesize = 0;
                bdev->bd_inode->i_size = 0;
@@ -875,9 +867,7 @@ static int __init nbd_init(void)
 
        for (i = 0; i < nbds_max; i++) {
                struct gendisk *disk = nbd_dev[i].disk;
-               nbd_dev[i].file = NULL;
                nbd_dev[i].magic = NBD_MAGIC;
-               nbd_dev[i].flags = 0;
                INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
                spin_lock_init(&nbd_dev[i].queue_lock);
                INIT_LIST_HEAD(&nbd_dev[i].queue_head);
index da085ff10d25159b7233c98771fe1964adb7cbde..7c64fa756cced628807e70fb873b532964ea39d9 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * NVM Express device driver
- * Copyright (c) 2011, Intel Corporation.
+ * Copyright (c) 2011-2014, Intel Corporation.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 #include <linux/bio.h>
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
+#include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/fs.h>
 #include <linux/genhd.h>
+#include <linux/hdreg.h>
 #include <linux/idr.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
@@ -35,6 +37,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/pci.h>
+#include <linux/percpu.h>
 #include <linux/poison.h>
 #include <linux/ptrace.h>
 #include <linux/sched.h>
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
 #define ADMIN_TIMEOUT  (60 * HZ)
+#define IOD_TIMEOUT    (4 * NVME_IO_TIMEOUT)
+
+unsigned char io_timeout = 30;
+module_param(io_timeout, byte, 0644);
+MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 
 static int nvme_major;
 module_param(nvme_major, int, 0);
@@ -58,6 +66,7 @@ static DEFINE_SPINLOCK(dev_list_lock);
 static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
+static wait_queue_head_t nvme_kthread_wait;
 
 static void nvme_reset_failed_dev(struct work_struct *ws);
 
@@ -74,6 +83,7 @@ struct async_cmd_info {
  * commands and one for I/O commands).
  */
 struct nvme_queue {
+       struct rcu_head r_head;
        struct device *q_dmadev;
        struct nvme_dev *dev;
        char irqname[24];       /* nvme4294967295-65535\0 */
@@ -85,6 +95,7 @@ struct nvme_queue {
        wait_queue_head_t sq_full;
        wait_queue_t sq_cong_wait;
        struct bio_list sq_cong;
+       struct list_head iod_bio;
        u32 __iomem *q_db;
        u16 q_depth;
        u16 cq_vector;
@@ -95,6 +106,7 @@ struct nvme_queue {
        u8 cq_phase;
        u8 cqe_seen;
        u8 q_suspended;
+       cpumask_var_t cpu_mask;
        struct async_cmd_info cmdinfo;
        unsigned long cmdid_data[];
 };
@@ -118,7 +130,7 @@ static inline void _nvme_check_size(void)
        BUILD_BUG_ON(sizeof(struct nvme_smart_log) != 512);
 }
 
-typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+typedef void (*nvme_completion_fn)(struct nvme_queue *, void *,
                                                struct nvme_completion *);
 
 struct nvme_cmd_info {
@@ -190,7 +202,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
 #define CMD_CTX_FLUSH          (0x318 + CMD_CTX_BASE)
 #define CMD_CTX_ABORT          (0x31C + CMD_CTX_BASE)
 
-static void special_completion(struct nvme_dev *dev, void *ctx,
+static void special_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
 {
        if (ctx == CMD_CTX_CANCELLED)
@@ -198,26 +210,26 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
        if (ctx == CMD_CTX_FLUSH)
                return;
        if (ctx == CMD_CTX_ABORT) {
-               ++dev->abort_limit;
+               ++nvmeq->dev->abort_limit;
                return;
        }
        if (ctx == CMD_CTX_COMPLETED) {
-               dev_warn(&dev->pci_dev->dev,
+               dev_warn(nvmeq->q_dmadev,
                                "completed id %d twice on queue %d\n",
                                cqe->command_id, le16_to_cpup(&cqe->sq_id));
                return;
        }
        if (ctx == CMD_CTX_INVALID) {
-               dev_warn(&dev->pci_dev->dev,
+               dev_warn(nvmeq->q_dmadev,
                                "invalid id %d completed on queue %d\n",
                                cqe->command_id, le16_to_cpup(&cqe->sq_id));
                return;
        }
 
-       dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+       dev_warn(nvmeq->q_dmadev, "Unknown special completion %p\n", ctx);
 }
 
-static void async_completion(struct nvme_dev *dev, void *ctx,
+static void async_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
 {
        struct async_cmd_info *cmdinfo = ctx;
@@ -262,14 +274,34 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
        return ctx;
 }
 
-struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+static struct nvme_queue *raw_nvmeq(struct nvme_dev *dev, int qid)
+{
+       return rcu_dereference_raw(dev->queues[qid]);
+}
+
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) __acquires(RCU)
+{
+       unsigned queue_id = get_cpu_var(*dev->io_queue);
+       rcu_read_lock();
+       return rcu_dereference(dev->queues[queue_id]);
+}
+
+static void put_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
 {
-       return dev->queues[get_cpu() + 1];
+       rcu_read_unlock();
+       put_cpu_var(nvmeq->dev->io_queue);
 }
 
-void put_nvmeq(struct nvme_queue *nvmeq)
+static struct nvme_queue *lock_nvmeq(struct nvme_dev *dev, int q_idx)
+                                                       __acquires(RCU)
 {
-       put_cpu();
+       rcu_read_lock();
+       return rcu_dereference(dev->queues[q_idx]);
+}
+
+static void unlock_nvmeq(struct nvme_queue *nvmeq) __releases(RCU)
+{
+       rcu_read_unlock();
 }
 
 /**
@@ -284,6 +316,10 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
        unsigned long flags;
        u16 tail;
        spin_lock_irqsave(&nvmeq->q_lock, flags);
+       if (nvmeq->q_suspended) {
+               spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+               return -EBUSY;
+       }
        tail = nvmeq->sq_tail;
        memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
        if (++tail == nvmeq->q_depth)
@@ -323,6 +359,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
                iod->npages = -1;
                iod->length = nbytes;
                iod->nents = 0;
+               iod->first_dma = 0ULL;
                iod->start_time = jiffies;
        }
 
@@ -371,19 +408,31 @@ static void nvme_end_io_acct(struct bio *bio, unsigned long start_time)
        part_stat_unlock();
 }
 
-static void bio_completion(struct nvme_dev *dev, void *ctx,
+static void bio_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
 {
        struct nvme_iod *iod = ctx;
        struct bio *bio = iod->private;
        u16 status = le16_to_cpup(&cqe->status) >> 1;
 
+       if (unlikely(status)) {
+               if (!(status & NVME_SC_DNR ||
+                               bio->bi_rw & REQ_FAILFAST_MASK) &&
+                               (jiffies - iod->start_time) < IOD_TIMEOUT) {
+                       if (!waitqueue_active(&nvmeq->sq_full))
+                               add_wait_queue(&nvmeq->sq_full,
+                                                       &nvmeq->sq_cong_wait);
+                       list_add_tail(&iod->node, &nvmeq->iod_bio);
+                       wake_up(&nvmeq->sq_full);
+                       return;
+               }
+       }
        if (iod->nents) {
-               dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+               dma_unmap_sg(nvmeq->q_dmadev, iod->sg, iod->nents,
                        bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
                nvme_end_io_acct(bio, iod->start_time);
        }
-       nvme_free_iod(dev, iod);
+       nvme_free_iod(nvmeq->dev, iod);
        if (status)
                bio_endio(bio, -EIO);
        else
@@ -391,8 +440,8 @@ static void bio_completion(struct nvme_dev *dev, void *ctx,
 }
 
 /* length is in bytes.  gfp flags indicates whether we may sleep. */
-int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
-                       struct nvme_iod *iod, int total_len, gfp_t gfp)
+int nvme_setup_prps(struct nvme_dev *dev, struct nvme_iod *iod, int total_len,
+                                                               gfp_t gfp)
 {
        struct dma_pool *pool;
        int length = total_len;
@@ -405,7 +454,6 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
        dma_addr_t prp_dma;
        int nprps, i;
 
-       cmd->prp1 = cpu_to_le64(dma_addr);
        length -= (PAGE_SIZE - offset);
        if (length <= 0)
                return total_len;
@@ -420,7 +468,7 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
        }
 
        if (length <= PAGE_SIZE) {
-               cmd->prp2 = cpu_to_le64(dma_addr);
+               iod->first_dma = dma_addr;
                return total_len;
        }
 
@@ -435,13 +483,12 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
 
        prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
        if (!prp_list) {
-               cmd->prp2 = cpu_to_le64(dma_addr);
+               iod->first_dma = dma_addr;
                iod->npages = -1;
                return (total_len - length) + PAGE_SIZE;
        }
        list[0] = prp_list;
        iod->first_dma = prp_dma;
-       cmd->prp2 = cpu_to_le64(prp_dma);
        i = 0;
        for (;;) {
                if (i == PAGE_SIZE / 8) {
@@ -480,10 +527,11 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
 
        bio_chain(split, bio);
 
-       if (bio_list_empty(&nvmeq->sq_cong))
+       if (!waitqueue_active(&nvmeq->sq_full))
                add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
        bio_list_add(&nvmeq->sq_cong, split);
        bio_list_add(&nvmeq->sq_cong, bio);
+       wake_up(&nvmeq->sq_full);
 
        return 0;
 }
@@ -536,25 +584,13 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
        return length;
 }
 
-/*
- * We reuse the small pool to allocate the 16-byte range here as it is not
- * worth having a special pool for these or additional cases to handle freeing
- * the iod.
- */
 static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
                struct bio *bio, struct nvme_iod *iod, int cmdid)
 {
-       struct nvme_dsm_range *range;
+       struct nvme_dsm_range *range =
+                               (struct nvme_dsm_range *)iod_list(iod)[0];
        struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
 
-       range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC,
-                                                       &iod->first_dma);
-       if (!range)
-               return -ENOMEM;
-
-       iod_list(iod)[0] = (__le64 *)range;
-       iod->npages = 0;
-
        range->cattr = cpu_to_le32(0);
        range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
        range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
@@ -601,44 +637,22 @@ int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
        return nvme_submit_flush(nvmeq, ns, cmdid);
 }
 
-/*
- * Called with local interrupts disabled and the q_lock held.  May not sleep.
- */
-static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
-                                                               struct bio *bio)
+static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod)
 {
+       struct bio *bio = iod->private;
+       struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
        struct nvme_command *cmnd;
-       struct nvme_iod *iod;
-       enum dma_data_direction dma_dir;
-       int cmdid, length, result;
+       int cmdid;
        u16 control;
        u32 dsmgmt;
-       int psegs = bio_phys_segments(ns->queue, bio);
-
-       if ((bio->bi_rw & REQ_FLUSH) && psegs) {
-               result = nvme_submit_flush_data(nvmeq, ns);
-               if (result)
-                       return result;
-       }
 
-       result = -ENOMEM;
-       iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
-       if (!iod)
-               goto nomem;
-       iod->private = bio;
-
-       result = -EBUSY;
        cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
        if (unlikely(cmdid < 0))
-               goto free_iod;
+               return cmdid;
 
-       if (bio->bi_rw & REQ_DISCARD) {
-               result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
-               if (result)
-                       goto free_cmdid;
-               return result;
-       }
-       if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+       if (bio->bi_rw & REQ_DISCARD)
+               return nvme_submit_discard(nvmeq, ns, bio, iod, cmdid);
+       if ((bio->bi_rw & REQ_FLUSH) && !iod->nents)
                return nvme_submit_flush(nvmeq, ns, cmdid);
 
        control = 0;
@@ -652,42 +666,85 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
                dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
 
        cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
-
        memset(cmnd, 0, sizeof(*cmnd));
-       if (bio_data_dir(bio)) {
-               cmnd->rw.opcode = nvme_cmd_write;
-               dma_dir = DMA_TO_DEVICE;
-       } else {
-               cmnd->rw.opcode = nvme_cmd_read;
-               dma_dir = DMA_FROM_DEVICE;
-       }
-
-       result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs);
-       if (result <= 0)
-               goto free_cmdid;
-       length = result;
 
+       cmnd->rw.opcode = bio_data_dir(bio) ? nvme_cmd_write : nvme_cmd_read;
        cmnd->rw.command_id = cmdid;
        cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
-       length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
-                                                               GFP_ATOMIC);
+       cmnd->rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+       cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
        cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
-       cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+       cmnd->rw.length =
+               cpu_to_le16((bio->bi_iter.bi_size >> ns->lba_shift) - 1);
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 
-       nvme_start_io_acct(bio);
        if (++nvmeq->sq_tail == nvmeq->q_depth)
                nvmeq->sq_tail = 0;
        writel(nvmeq->sq_tail, nvmeq->q_db);
 
        return 0;
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held.  May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+                                                               struct bio *bio)
+{
+       struct nvme_iod *iod;
+       int psegs = bio_phys_segments(ns->queue, bio);
+       int result;
+
+       if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+               result = nvme_submit_flush_data(nvmeq, ns);
+               if (result)
+                       return result;
+       }
+
+       iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
+       if (!iod)
+               return -ENOMEM;
+
+       iod->private = bio;
+       if (bio->bi_rw & REQ_DISCARD) {
+               void *range;
+               /*
+                * We reuse the small pool to allocate the 16-byte range here
+                * as it is not worth having a special pool for these or
+                * additional cases to handle freeing the iod.
+                */
+               range = dma_pool_alloc(nvmeq->dev->prp_small_pool,
+                                               GFP_ATOMIC,
+                                               &iod->first_dma);
+               if (!range) {
+                       result = -ENOMEM;
+                       goto free_iod;
+               }
+               iod_list(iod)[0] = (__le64 *)range;
+               iod->npages = 0;
+       } else if (psegs) {
+               result = nvme_map_bio(nvmeq, iod, bio,
+                       bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
+                       psegs);
+               if (result <= 0)
+                       goto free_iod;
+               if (nvme_setup_prps(nvmeq->dev, iod, result, GFP_ATOMIC) !=
+                                                               result) {
+                       result = -ENOMEM;
+                       goto free_iod;
+               }
+               nvme_start_io_acct(bio);
+       }
+       if (unlikely(nvme_submit_iod(nvmeq, iod))) {
+               if (!waitqueue_active(&nvmeq->sq_full))
+                       add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+               list_add_tail(&iod->node, &nvmeq->iod_bio);
+       }
+       return 0;
 
- free_cmdid:
-       free_cmdid(nvmeq, cmdid, NULL);
  free_iod:
        nvme_free_iod(nvmeq->dev, iod);
- nomem:
        return result;
 }
 
@@ -711,7 +768,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
                }
 
                ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
-               fn(nvmeq->dev, ctx, &cqe);
+               fn(nvmeq, ctx, &cqe);
        }
 
        /* If the controller ignores the cq head doorbell and continuously
@@ -747,7 +804,7 @@ static void nvme_make_request(struct request_queue *q, struct bio *bio)
        if (!nvmeq->q_suspended && bio_list_empty(&nvmeq->sq_cong))
                result = nvme_submit_bio_queue(nvmeq, ns, bio);
        if (unlikely(result)) {
-               if (bio_list_empty(&nvmeq->sq_cong))
+               if (!waitqueue_active(&nvmeq->sq_full))
                        add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
                bio_list_add(&nvmeq->sq_cong, bio);
        }
@@ -791,7 +848,7 @@ struct sync_cmd_info {
        int status;
 };
 
-static void sync_completion(struct nvme_dev *dev, void *ctx,
+static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
 {
        struct sync_cmd_info *cmdinfo = ctx;
@@ -804,27 +861,46 @@ static void sync_completion(struct nvme_dev *dev, void *ctx,
  * Returns 0 on success.  If the result is negative, it's a Linux error code;
  * if the result is positive, it's an NVM Express status code
  */
-int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
+static int nvme_submit_sync_cmd(struct nvme_dev *dev, int q_idx,
+                                               struct nvme_command *cmd,
                                                u32 *result, unsigned timeout)
 {
-       int cmdid;
+       int cmdid, ret;
        struct sync_cmd_info cmdinfo;
+       struct nvme_queue *nvmeq;
+
+       nvmeq = lock_nvmeq(dev, q_idx);
+       if (!nvmeq) {
+               unlock_nvmeq(nvmeq);
+               return -ENODEV;
+       }
 
        cmdinfo.task = current;
        cmdinfo.status = -EINTR;
 
-       cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
-                                                               timeout);
-       if (cmdid < 0)
+       cmdid = alloc_cmdid(nvmeq, &cmdinfo, sync_completion, timeout);
+       if (cmdid < 0) {
+               unlock_nvmeq(nvmeq);
                return cmdid;
+       }
        cmd->common.command_id = cmdid;
 
        set_current_state(TASK_KILLABLE);
-       nvme_submit_cmd(nvmeq, cmd);
+       ret = nvme_submit_cmd(nvmeq, cmd);
+       if (ret) {
+               free_cmdid(nvmeq, cmdid, NULL);
+               unlock_nvmeq(nvmeq);
+               set_current_state(TASK_RUNNING);
+               return ret;
+       }
+       unlock_nvmeq(nvmeq);
        schedule_timeout(timeout);
 
        if (cmdinfo.status == -EINTR) {
-               nvme_abort_command(nvmeq, cmdid);
+               nvmeq = lock_nvmeq(dev, q_idx);
+               if (nvmeq)
+                       nvme_abort_command(nvmeq, cmdid);
+               unlock_nvmeq(nvmeq);
                return -EINTR;
        }
 
@@ -845,20 +921,26 @@ static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
                return cmdid;
        cmdinfo->status = -EINTR;
        cmd->common.command_id = cmdid;
-       nvme_submit_cmd(nvmeq, cmd);
-       return 0;
+       return nvme_submit_cmd(nvmeq, cmd);
 }
 
 int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
                                                                u32 *result)
 {
-       return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+       return nvme_submit_sync_cmd(dev, 0, cmd, result, ADMIN_TIMEOUT);
+}
+
+int nvme_submit_io_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+                                                               u32 *result)
+{
+       return nvme_submit_sync_cmd(dev, smp_processor_id() + 1, cmd, result,
+                                                       NVME_IO_TIMEOUT);
 }
 
 static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
                struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
 {
-       return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+       return nvme_submit_async_cmd(raw_nvmeq(dev, 0), cmd, cmdinfo,
                                                                ADMIN_TIMEOUT);
 }
 
@@ -985,6 +1067,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
        struct nvme_command cmd;
        struct nvme_dev *dev = nvmeq->dev;
        struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+       struct nvme_queue *adminq;
 
        if (!nvmeq->qid || info[cmdid].aborted) {
                if (work_busy(&dev->reset_work))
@@ -1001,7 +1084,8 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
        if (!dev->abort_limit)
                return;
 
-       a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+       adminq = rcu_dereference(dev->queues[0]);
+       a_cmdid = alloc_cmdid(adminq, CMD_CTX_ABORT, special_completion,
                                                                ADMIN_TIMEOUT);
        if (a_cmdid < 0)
                return;
@@ -1018,7 +1102,7 @@ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
 
        dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
                                                        nvmeq->qid);
-       nvme_submit_cmd(dev->queues[0], &cmd);
+       nvme_submit_cmd(adminq, &cmd);
 }
 
 /**
@@ -1051,23 +1135,38 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
                dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
                                                                nvmeq->qid);
                ctx = cancel_cmdid(nvmeq, cmdid, &fn);
-               fn(nvmeq->dev, ctx, &cqe);
+               fn(nvmeq, ctx, &cqe);
        }
 }
 
-static void nvme_free_queue(struct nvme_queue *nvmeq)
+static void nvme_free_queue(struct rcu_head *r)
 {
+       struct nvme_queue *nvmeq = container_of(r, struct nvme_queue, r_head);
+
        spin_lock_irq(&nvmeq->q_lock);
        while (bio_list_peek(&nvmeq->sq_cong)) {
                struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
                bio_endio(bio, -EIO);
        }
+       while (!list_empty(&nvmeq->iod_bio)) {
+               static struct nvme_completion cqe = {
+                       .status = cpu_to_le16(
+                               (NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1),
+               };
+               struct nvme_iod *iod = list_first_entry(&nvmeq->iod_bio,
+                                                       struct nvme_iod,
+                                                       node);
+               list_del(&iod->node);
+               bio_completion(nvmeq, iod, &cqe);
+       }
        spin_unlock_irq(&nvmeq->q_lock);
 
        dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
                                (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
        dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
                                        nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+       if (nvmeq->qid)
+               free_cpumask_var(nvmeq->cpu_mask);
        kfree(nvmeq);
 }
 
@@ -1076,9 +1175,10 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
        int i;
 
        for (i = dev->queue_count - 1; i >= lowest; i--) {
-               nvme_free_queue(dev->queues[i]);
+               struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
+               rcu_assign_pointer(dev->queues[i], NULL);
+               call_rcu(&nvmeq->r_head, nvme_free_queue);
                dev->queue_count--;
-               dev->queues[i] = NULL;
        }
 }
 
@@ -1098,6 +1198,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
                return 1;
        }
        nvmeq->q_suspended = 1;
+       nvmeq->dev->online_queues--;
        spin_unlock_irq(&nvmeq->q_lock);
 
        irq_set_affinity_hint(vector, NULL);
@@ -1116,7 +1217,7 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
 
 static void nvme_disable_queue(struct nvme_dev *dev, int qid)
 {
-       struct nvme_queue *nvmeq = dev->queues[qid];
+       struct nvme_queue *nvmeq = raw_nvmeq(dev, qid);
 
        if (!nvmeq)
                return;
@@ -1152,6 +1253,9 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
        if (!nvmeq->sq_cmds)
                goto free_cqdma;
 
+       if (qid && !zalloc_cpumask_var(&nvmeq->cpu_mask, GFP_KERNEL))
+               goto free_sqdma;
+
        nvmeq->q_dmadev = dmadev;
        nvmeq->dev = dev;
        snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
@@ -1162,15 +1266,20 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
        init_waitqueue_head(&nvmeq->sq_full);
        init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
        bio_list_init(&nvmeq->sq_cong);
+       INIT_LIST_HEAD(&nvmeq->iod_bio);
        nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        nvmeq->q_depth = depth;
        nvmeq->cq_vector = vector;
        nvmeq->qid = qid;
        nvmeq->q_suspended = 1;
        dev->queue_count++;
+       rcu_assign_pointer(dev->queues[qid], nvmeq);
 
        return nvmeq;
 
+ free_sqdma:
+       dma_free_coherent(dmadev, SQ_SIZE(depth), (void *)nvmeq->sq_cmds,
+                                                       nvmeq->sq_dma_addr);
  free_cqdma:
        dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes,
                                                        nvmeq->cq_dma_addr);
@@ -1203,6 +1312,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
        memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
        nvme_cancel_ios(nvmeq, false);
        nvmeq->q_suspended = 0;
+       dev->online_queues++;
 }
 
 static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
@@ -1311,12 +1421,11 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
        if (result < 0)
                return result;
 
-       nvmeq = dev->queues[0];
+       nvmeq = raw_nvmeq(dev, 0);
        if (!nvmeq) {
                nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
                if (!nvmeq)
                        return -ENOMEM;
-               dev->queues[0] = nvmeq;
        }
 
        aqa = nvmeq->q_depth - 1;
@@ -1418,7 +1527,6 @@ void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 {
        struct nvme_dev *dev = ns->dev;
-       struct nvme_queue *nvmeq;
        struct nvme_user_io io;
        struct nvme_command c;
        unsigned length, meta_len;
@@ -1492,22 +1600,14 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
                c.rw.metadata = cpu_to_le64(meta_dma_addr);
        }
 
-       length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+       length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+       c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+       c.rw.prp2 = cpu_to_le64(iod->first_dma);
 
-       nvmeq = get_nvmeq(dev);
-       /*
-        * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
-        * disabled.  We may be preempted at any point, and be rescheduled
-        * to a different CPU.  That will cause cacheline bouncing, but no
-        * additional races since q_lock already protects against other CPUs.
-        */
-       put_nvmeq(nvmeq);
        if (length != (io.nblocks + 1) << ns->lba_shift)
                status = -ENOMEM;
-       else if (!nvmeq || nvmeq->q_suspended)
-               status = -EBUSY;
        else
-               status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+               status = nvme_submit_io_cmd(dev, &c, NULL);
 
        if (meta_len) {
                if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) {
@@ -1572,8 +1672,9 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
                                                                length);
                if (IS_ERR(iod))
                        return PTR_ERR(iod);
-               length = nvme_setup_prps(dev, &c.common, iod, length,
-                                                               GFP_KERNEL);
+               length = nvme_setup_prps(dev, iod, length, GFP_KERNEL);
+               c.common.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+               c.common.prp2 = cpu_to_le64(iod->first_dma);
        }
 
        timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) :
@@ -1581,8 +1682,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
        if (length != cmd.data_len)
                status = -ENOMEM;
        else
-               status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result,
-                                                               timeout);
+               status = nvme_submit_sync_cmd(dev, 0, &c, &cmd.result, timeout);
 
        if (cmd.data_len) {
                nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
@@ -1653,25 +1753,51 @@ static void nvme_release(struct gendisk *disk, fmode_t mode)
        kref_put(&dev->kref, nvme_free_dev);
 }
 
+static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
+{
+       /* some standard values */
+       geo->heads = 1 << 6;
+       geo->sectors = 1 << 5;
+       geo->cylinders = get_capacity(bd->bd_disk) >> 11;
+       return 0;
+}
+
 static const struct block_device_operations nvme_fops = {
        .owner          = THIS_MODULE,
        .ioctl          = nvme_ioctl,
        .compat_ioctl   = nvme_compat_ioctl,
        .open           = nvme_open,
        .release        = nvme_release,
+       .getgeo         = nvme_getgeo,
 };
 
+static void nvme_resubmit_iods(struct nvme_queue *nvmeq)
+{
+       struct nvme_iod *iod, *next;
+
+       list_for_each_entry_safe(iod, next, &nvmeq->iod_bio, node) {
+               if (unlikely(nvme_submit_iod(nvmeq, iod)))
+                       break;
+               list_del(&iod->node);
+               if (bio_list_empty(&nvmeq->sq_cong) &&
+                                               list_empty(&nvmeq->iod_bio))
+                       remove_wait_queue(&nvmeq->sq_full,
+                                               &nvmeq->sq_cong_wait);
+       }
+}
+
 static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
 {
        while (bio_list_peek(&nvmeq->sq_cong)) {
                struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
                struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
 
-               if (bio_list_empty(&nvmeq->sq_cong))
+               if (bio_list_empty(&nvmeq->sq_cong) &&
+                                               list_empty(&nvmeq->iod_bio))
                        remove_wait_queue(&nvmeq->sq_full,
                                                        &nvmeq->sq_cong_wait);
                if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
-                       if (bio_list_empty(&nvmeq->sq_cong))
+                       if (!waitqueue_active(&nvmeq->sq_full))
                                add_wait_queue(&nvmeq->sq_full,
                                                        &nvmeq->sq_cong_wait);
                        bio_list_add_head(&nvmeq->sq_cong, bio);
@@ -1700,8 +1826,10 @@ static int nvme_kthread(void *data)
                                queue_work(nvme_workq, &dev->reset_work);
                                continue;
                        }
+                       rcu_read_lock();
                        for (i = 0; i < dev->queue_count; i++) {
-                               struct nvme_queue *nvmeq = dev->queues[i];
+                               struct nvme_queue *nvmeq =
+                                               rcu_dereference(dev->queues[i]);
                                if (!nvmeq)
                                        continue;
                                spin_lock_irq(&nvmeq->q_lock);
@@ -1710,9 +1838,11 @@ static int nvme_kthread(void *data)
                                nvme_process_cq(nvmeq);
                                nvme_cancel_ios(nvmeq, true);
                                nvme_resubmit_bios(nvmeq);
+                               nvme_resubmit_iods(nvmeq);
  unlock:
                                spin_unlock_irq(&nvmeq->q_lock);
                        }
+                       rcu_read_unlock();
                }
                spin_unlock(&dev_list_lock);
                schedule_timeout(round_jiffies_relative(HZ));
@@ -1787,6 +1917,143 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
        return NULL;
 }
 
+static int nvme_find_closest_node(int node)
+{
+       int n, val, min_val = INT_MAX, best_node = node;
+
+       for_each_online_node(n) {
+               if (n == node)
+                       continue;
+               val = node_distance(node, n);
+               if (val < min_val) {
+                       min_val = val;
+                       best_node = n;
+               }
+       }
+       return best_node;
+}
+
+static void nvme_set_queue_cpus(cpumask_t *qmask, struct nvme_queue *nvmeq,
+                                                               int count)
+{
+       int cpu;
+       for_each_cpu(cpu, qmask) {
+               if (cpumask_weight(nvmeq->cpu_mask) >= count)
+                       break;
+               if (!cpumask_test_and_set_cpu(cpu, nvmeq->cpu_mask))
+                       *per_cpu_ptr(nvmeq->dev->io_queue, cpu) = nvmeq->qid;
+       }
+}
+
+static void nvme_add_cpus(cpumask_t *mask, const cpumask_t *unassigned_cpus,
+       const cpumask_t *new_mask, struct nvme_queue *nvmeq, int cpus_per_queue)
+{
+       int next_cpu;
+       for_each_cpu(next_cpu, new_mask) {
+               cpumask_or(mask, mask, get_cpu_mask(next_cpu));
+               cpumask_or(mask, mask, topology_thread_cpumask(next_cpu));
+               cpumask_and(mask, mask, unassigned_cpus);
+               nvme_set_queue_cpus(mask, nvmeq, cpus_per_queue);
+       }
+}
+
+static void nvme_create_io_queues(struct nvme_dev *dev)
+{
+       unsigned i, max;
+
+       max = min(dev->max_qid, num_online_cpus());
+       for (i = dev->queue_count; i <= max; i++)
+               if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+                       break;
+
+       max = min(dev->queue_count - 1, num_online_cpus());
+       for (i = dev->online_queues; i <= max; i++)
+               if (nvme_create_queue(raw_nvmeq(dev, i), i))
+                       break;
+}
+
+/*
+ * If there are fewer queues than online cpus, this will try to optimally
+ * assign a queue to multiple cpus by grouping cpus that are "close" together:
+ * thread siblings, core, socket, closest node, then whatever else is
+ * available.
+ */
+static void nvme_assign_io_queues(struct nvme_dev *dev)
+{
+       unsigned cpu, cpus_per_queue, queues, remainder, i;
+       cpumask_var_t unassigned_cpus;
+
+       nvme_create_io_queues(dev);
+
+       queues = min(dev->online_queues - 1, num_online_cpus());
+       if (!queues)
+               return;
+
+       cpus_per_queue = num_online_cpus() / queues;
+       remainder = queues - (num_online_cpus() - queues * cpus_per_queue);
+
+       if (!alloc_cpumask_var(&unassigned_cpus, GFP_KERNEL))
+               return;
+
+       cpumask_copy(unassigned_cpus, cpu_online_mask);
+       cpu = cpumask_first(unassigned_cpus);
+       for (i = 1; i <= queues; i++) {
+               struct nvme_queue *nvmeq = lock_nvmeq(dev, i);
+               cpumask_t mask;
+
+               cpumask_clear(nvmeq->cpu_mask);
+               if (!cpumask_weight(unassigned_cpus)) {
+                       unlock_nvmeq(nvmeq);
+                       break;
+               }
+
+               mask = *get_cpu_mask(cpu);
+               nvme_set_queue_cpus(&mask, nvmeq, cpus_per_queue);
+               if (cpus_weight(mask) < cpus_per_queue)
+                       nvme_add_cpus(&mask, unassigned_cpus,
+                               topology_thread_cpumask(cpu),
+                               nvmeq, cpus_per_queue);
+               if (cpus_weight(mask) < cpus_per_queue)
+                       nvme_add_cpus(&mask, unassigned_cpus,
+                               topology_core_cpumask(cpu),
+                               nvmeq, cpus_per_queue);
+               if (cpus_weight(mask) < cpus_per_queue)
+                       nvme_add_cpus(&mask, unassigned_cpus,
+                               cpumask_of_node(cpu_to_node(cpu)),
+                               nvmeq, cpus_per_queue);
+               if (cpus_weight(mask) < cpus_per_queue)
+                       nvme_add_cpus(&mask, unassigned_cpus,
+                               cpumask_of_node(
+                                       nvme_find_closest_node(
+                                               cpu_to_node(cpu))),
+                               nvmeq, cpus_per_queue);
+               if (cpus_weight(mask) < cpus_per_queue)
+                       nvme_add_cpus(&mask, unassigned_cpus,
+                               unassigned_cpus,
+                               nvmeq, cpus_per_queue);
+
+               WARN(cpumask_weight(nvmeq->cpu_mask) != cpus_per_queue,
+                       "nvme%d qid:%d mis-matched queue-to-cpu assignment\n",
+                       dev->instance, i);
+
+               irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+                                                       nvmeq->cpu_mask);
+               cpumask_andnot(unassigned_cpus, unassigned_cpus,
+                                               nvmeq->cpu_mask);
+               cpu = cpumask_next(cpu, unassigned_cpus);
+               if (remainder && !--remainder)
+                       cpus_per_queue++;
+               unlock_nvmeq(nvmeq);
+       }
+       WARN(cpumask_weight(unassigned_cpus), "nvme%d unassigned online cpus\n",
+                                                               dev->instance);
+       i = 0;
+       cpumask_andnot(unassigned_cpus, cpu_possible_mask, cpu_online_mask);
+       for_each_cpu(cpu, unassigned_cpus)
+               *per_cpu_ptr(dev->io_queue, cpu) = (i++ % queues) + 1;
+       free_cpumask_var(unassigned_cpus);
+}
+
 static int set_queue_count(struct nvme_dev *dev, int count)
 {
        int status;
@@ -1805,13 +2072,26 @@ static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
        return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
 }
 
+static int nvme_cpu_notify(struct notifier_block *self,
+                               unsigned long action, void *hcpu)
+{
+       struct nvme_dev *dev = container_of(self, struct nvme_dev, nb);
+       switch (action) {
+       case CPU_ONLINE:
+       case CPU_DEAD:
+               nvme_assign_io_queues(dev);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
 static int nvme_setup_io_queues(struct nvme_dev *dev)
 {
-       struct nvme_queue *adminq = dev->queues[0];
+       struct nvme_queue *adminq = raw_nvmeq(dev, 0);
        struct pci_dev *pdev = dev->pci_dev;
-       int result, cpu, i, vecs, nr_io_queues, size, q_depth;
+       int result, i, vecs, nr_io_queues, size;
 
-       nr_io_queues = num_online_cpus();
+       nr_io_queues = num_possible_cpus();
        result = set_queue_count(dev, nr_io_queues);
        if (result < 0)
                return result;
@@ -1830,7 +2110,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
                        size = db_bar_size(dev, nr_io_queues);
                } while (1);
                dev->dbs = ((void __iomem *)dev->bar) + 4096;
-               dev->queues[0]->q_db = dev->dbs;
+               adminq->q_db = dev->dbs;
        }
 
        /* Deregister the admin queue's interrupt */
@@ -1856,6 +2136,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
         * number of interrupts.
         */
        nr_io_queues = vecs;
+       dev->max_qid = nr_io_queues;
 
        result = queue_request_irq(dev, adminq, adminq->irqname);
        if (result) {
@@ -1864,49 +2145,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        }
 
        /* Free previously allocated queues that are no longer usable */
-       spin_lock(&dev_list_lock);
-       for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
-               struct nvme_queue *nvmeq = dev->queues[i];
-
-               spin_lock_irq(&nvmeq->q_lock);
-               nvme_cancel_ios(nvmeq, false);
-               spin_unlock_irq(&nvmeq->q_lock);
-
-               nvme_free_queue(nvmeq);
-               dev->queue_count--;
-               dev->queues[i] = NULL;
-       }
-       spin_unlock(&dev_list_lock);
-
-       cpu = cpumask_first(cpu_online_mask);
-       for (i = 0; i < nr_io_queues; i++) {
-               irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
-               cpu = cpumask_next(cpu, cpu_online_mask);
-       }
-
-       q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
-                                                               NVME_Q_DEPTH);
-       for (i = dev->queue_count - 1; i < nr_io_queues; i++) {
-               dev->queues[i + 1] = nvme_alloc_queue(dev, i + 1, q_depth, i);
-               if (!dev->queues[i + 1]) {
-                       result = -ENOMEM;
-                       goto free_queues;
-               }
-       }
-
-       for (; i < num_possible_cpus(); i++) {
-               int target = i % rounddown_pow_of_two(dev->queue_count - 1);
-               dev->queues[i + 1] = dev->queues[target + 1];
-       }
+       nvme_free_queues(dev, nr_io_queues + 1);
+       nvme_assign_io_queues(dev);
 
-       for (i = 1; i < dev->queue_count; i++) {
-               result = nvme_create_queue(dev->queues[i], i);
-               if (result) {
-                       for (--i; i > 0; i--)
-                               nvme_disable_queue(dev, i);
-                       goto free_queues;
-               }
-       }
+       dev->nb.notifier_call = &nvme_cpu_notify;
+       result = register_hotcpu_notifier(&dev->nb);
+       if (result)
+               goto free_queues;
 
        return 0;
 
@@ -1985,6 +2230,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
 
 static int nvme_dev_map(struct nvme_dev *dev)
 {
+       u64 cap;
        int bars, result = -ENOMEM;
        struct pci_dev *pdev = dev->pci_dev;
 
@@ -2008,7 +2254,9 @@ static int nvme_dev_map(struct nvme_dev *dev)
                result = -ENODEV;
                goto unmap;
        }
-       dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
+       cap = readq(&dev->bar->cap);
+       dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
+       dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
        dev->dbs = ((void __iomem *)dev->bar) + 4096;
 
        return 0;
@@ -2164,7 +2412,7 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
        atomic_set(&dq.refcount, 0);
        dq.worker = &worker;
        for (i = dev->queue_count - 1; i > 0; i--) {
-               struct nvme_queue *nvmeq = dev->queues[i];
+               struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
 
                if (nvme_suspend_queue(nvmeq))
                        continue;
@@ -2177,19 +2425,38 @@ static void nvme_disable_io_queues(struct nvme_dev *dev)
        kthread_stop(kworker_task);
 }
 
+/*
+* Remove the node from the device list and check
+* for whether or not we need to stop the nvme_thread.
+*/
+static void nvme_dev_list_remove(struct nvme_dev *dev)
+{
+       struct task_struct *tmp = NULL;
+
+       spin_lock(&dev_list_lock);
+       list_del_init(&dev->node);
+       if (list_empty(&dev_list) && !IS_ERR_OR_NULL(nvme_thread)) {
+               tmp = nvme_thread;
+               nvme_thread = NULL;
+       }
+       spin_unlock(&dev_list_lock);
+
+       if (tmp)
+               kthread_stop(tmp);
+}
+
 static void nvme_dev_shutdown(struct nvme_dev *dev)
 {
        int i;
 
        dev->initialized = 0;
+       unregister_hotcpu_notifier(&dev->nb);
 
-       spin_lock(&dev_list_lock);
-       list_del_init(&dev->node);
-       spin_unlock(&dev_list_lock);
+       nvme_dev_list_remove(dev);
 
        if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
                for (i = dev->queue_count - 1; i >= 0; i--) {
-                       struct nvme_queue *nvmeq = dev->queues[i];
+                       struct nvme_queue *nvmeq = raw_nvmeq(dev, i);
                        nvme_suspend_queue(nvmeq);
                        nvme_clear_queue(nvmeq);
                }
@@ -2282,6 +2549,7 @@ static void nvme_free_dev(struct kref *kref)
        struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
 
        nvme_free_namespaces(dev);
+       free_percpu(dev->io_queue);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
@@ -2325,6 +2593,7 @@ static const struct file_operations nvme_dev_fops = {
 static int nvme_dev_start(struct nvme_dev *dev)
 {
        int result;
+       bool start_thread = false;
 
        result = nvme_dev_map(dev);
        if (result)
@@ -2335,9 +2604,24 @@ static int nvme_dev_start(struct nvme_dev *dev)
                goto unmap;
 
        spin_lock(&dev_list_lock);
+       if (list_empty(&dev_list) && IS_ERR_OR_NULL(nvme_thread)) {
+               start_thread = true;
+               nvme_thread = NULL;
+       }
        list_add(&dev->node, &dev_list);
        spin_unlock(&dev_list_lock);
 
+       if (start_thread) {
+               nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+               wake_up(&nvme_kthread_wait);
+       } else
+               wait_event_killable(nvme_kthread_wait, nvme_thread);
+
+       if (IS_ERR_OR_NULL(nvme_thread)) {
+               result = nvme_thread ? PTR_ERR(nvme_thread) : -EINTR;
+               goto disable;
+       }
+
        result = nvme_setup_io_queues(dev);
        if (result && result != -EBUSY)
                goto disable;
@@ -2346,9 +2630,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
 
  disable:
        nvme_disable_queue(dev, 0);
-       spin_lock(&dev_list_lock);
-       list_del_init(&dev->node);
-       spin_unlock(&dev_list_lock);
+       nvme_dev_list_remove(dev);
  unmap:
        nvme_dev_unmap(dev);
        return result;
@@ -2367,18 +2649,10 @@ static int nvme_remove_dead_ctrl(void *arg)
 
 static void nvme_remove_disks(struct work_struct *ws)
 {
-       int i;
        struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
 
        nvme_dev_remove(dev);
-       spin_lock(&dev_list_lock);
-       for (i = dev->queue_count - 1; i > 0; i--) {
-               BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
-               nvme_free_queue(dev->queues[i]);
-               dev->queue_count--;
-               dev->queues[i] = NULL;
-       }
-       spin_unlock(&dev_list_lock);
+       nvme_free_queues(dev, 1);
 }
 
 static int nvme_dev_resume(struct nvme_dev *dev)
@@ -2441,6 +2715,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                                                                GFP_KERNEL);
        if (!dev->queues)
                goto free;
+       dev->io_queue = alloc_percpu(unsigned short);
+       if (!dev->io_queue)
+               goto free;
 
        INIT_LIST_HEAD(&dev->namespaces);
        dev->reset_workfn = nvme_reset_failed_dev;
@@ -2455,6 +2732,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto release;
 
+       kref_init(&dev->kref);
        result = nvme_dev_start(dev);
        if (result) {
                if (result == -EBUSY)
@@ -2462,7 +2740,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto release_pools;
        }
 
-       kref_init(&dev->kref);
        result = nvme_dev_add(dev);
        if (result)
                goto shutdown;
@@ -2491,6 +2768,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  release:
        nvme_release_instance(dev);
  free:
+       free_percpu(dev->io_queue);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
@@ -2517,6 +2795,7 @@ static void nvme_remove(struct pci_dev *pdev)
        nvme_dev_remove(dev);
        nvme_dev_shutdown(dev);
        nvme_free_queues(dev, 0);
+       rcu_barrier();
        nvme_release_instance(dev);
        nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
@@ -2529,6 +2808,7 @@ static void nvme_remove(struct pci_dev *pdev)
 #define nvme_slot_reset NULL
 #define nvme_error_resume NULL
 
+#ifdef CONFIG_PM_SLEEP
 static int nvme_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -2549,6 +2829,7 @@ static int nvme_resume(struct device *dev)
        }
        return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
 
@@ -2563,7 +2844,7 @@ static const struct pci_error_handlers nvme_err_handler = {
 /* Move to pci_ids.h later */
 #define PCI_CLASS_STORAGE_EXPRESS      0x010802
 
-static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+static const struct pci_device_id nvme_id_table[] = {
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
 };
@@ -2585,14 +2866,11 @@ static int __init nvme_init(void)
 {
        int result;
 
-       nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
-       if (IS_ERR(nvme_thread))
-               return PTR_ERR(nvme_thread);
+       init_waitqueue_head(&nvme_kthread_wait);
 
-       result = -ENOMEM;
        nvme_workq = create_singlethread_workqueue("nvme");
        if (!nvme_workq)
-               goto kill_kthread;
+               return -ENOMEM;
 
        result = register_blkdev(nvme_major, "nvme");
        if (result < 0)
@@ -2609,8 +2887,6 @@ static int __init nvme_init(void)
        unregister_blkdev(nvme_major, "nvme");
  kill_workq:
        destroy_workqueue(nvme_workq);
- kill_kthread:
-       kthread_stop(nvme_thread);
        return result;
 }
 
@@ -2619,11 +2895,11 @@ static void __exit nvme_exit(void)
        pci_unregister_driver(&nvme_driver);
        unregister_blkdev(nvme_major, "nvme");
        destroy_workqueue(nvme_workq);
-       kthread_stop(nvme_thread);
+       BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
 }
 
 MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
 MODULE_LICENSE("GPL");
-MODULE_VERSION("0.8");
+MODULE_VERSION("0.9");
 module_init(nvme_init);
 module_exit(nvme_exit);
index 4a0ceb64e26924b0b777d19d1580850b1e12901d..2c3f5be06da1078aa28a1a42e070495d4e761367 100644 (file)
@@ -1562,13 +1562,14 @@ static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                        res = PTR_ERR(iod);
                        goto out;
                }
-               length = nvme_setup_prps(dev, &c.common, iod, tot_len,
-                                                               GFP_KERNEL);
+               length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
                if (length != tot_len) {
                        res = -ENOMEM;
                        goto out_unmap;
                }
 
+               c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+               c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
                c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
                c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
        } else if (opcode == nvme_admin_activate_fw) {
@@ -2033,7 +2034,6 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        int res = SNTI_TRANSLATION_SUCCESS;
        int nvme_sc;
        struct nvme_dev *dev = ns->dev;
-       struct nvme_queue *nvmeq;
        u32 num_cmds;
        struct nvme_iod *iod;
        u64 unit_len;
@@ -2045,7 +2045,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        struct nvme_command c;
        u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
        u16 control;
-       u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors);
+       u32 max_blocks = queue_max_hw_sectors(ns->queue);
 
        num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
 
@@ -2093,8 +2093,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                        res = PTR_ERR(iod);
                        goto out;
                }
-               retcode = nvme_setup_prps(dev, &c.common, iod, unit_len,
-                                                       GFP_KERNEL);
+               retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
                if (retcode != unit_len) {
                        nvme_unmap_user_pages(dev,
                                (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2103,21 +2102,12 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                        res = -ENOMEM;
                        goto out;
                }
+               c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
+               c.rw.prp2 = cpu_to_le64(iod->first_dma);
 
                nvme_offset += unit_num_blocks;
 
-               nvmeq = get_nvmeq(dev);
-               /*
-                * Since nvme_submit_sync_cmd sleeps, we can't keep
-                * preemption disabled.  We may be preempted at any
-                * point, and be rescheduled to a different CPU.  That
-                * will cause cacheline bouncing, but no additional
-                * races since q_lock already protects against other
-                * CPUs.
-                */
-               put_nvmeq(nvmeq);
-               nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
-                                               NVME_IO_TIMEOUT);
+               nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
                if (nvme_sc != NVME_SC_SUCCESS) {
                        nvme_unmap_user_pages(dev,
                                (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2644,7 +2634,6 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 {
        int res = SNTI_TRANSLATION_SUCCESS;
        int nvme_sc;
-       struct nvme_queue *nvmeq;
        struct nvme_command c;
        u8 immed, pcmod, pc, no_flush, start;
 
@@ -2671,10 +2660,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                        c.common.opcode = nvme_cmd_flush;
                        c.common.nsid = cpu_to_le32(ns->ns_id);
 
-                       nvmeq = get_nvmeq(ns->dev);
-                       put_nvmeq(nvmeq);
-                       nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
-
+                       nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
                        res = nvme_trans_status_code(hdr, nvme_sc);
                        if (res)
                                goto out;
@@ -2697,15 +2683,12 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
        int res = SNTI_TRANSLATION_SUCCESS;
        int nvme_sc;
        struct nvme_command c;
-       struct nvme_queue *nvmeq;
 
        memset(&c, 0, sizeof(c));
        c.common.opcode = nvme_cmd_flush;
        c.common.nsid = cpu_to_le32(ns->ns_id);
 
-       nvmeq = get_nvmeq(ns->dev);
-       put_nvmeq(nvmeq);
-       nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+       nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
 
        res = nvme_trans_status_code(hdr, nvme_sc);
        if (res)
@@ -2872,7 +2855,6 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        struct nvme_dev *dev = ns->dev;
        struct scsi_unmap_parm_list *plist;
        struct nvme_dsm_range *range;
-       struct nvme_queue *nvmeq;
        struct nvme_command c;
        int i, nvme_sc, res = -ENOMEM;
        u16 ndesc, list_len;
@@ -2914,10 +2896,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        c.dsm.nr = cpu_to_le32(ndesc - 1);
        c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
 
-       nvmeq = get_nvmeq(dev);
-       put_nvmeq(nvmeq);
-
-       nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+       nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
        res = nvme_trans_status_code(hdr, nvme_sc);
 
        dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
index 1386749b48ffd6e2711316a9083e913c560eabb6..fbae63e3d304350261e536159e960ab70b5ba409 100644 (file)
@@ -408,7 +408,7 @@ config APPLICOM
 
 config SONYPI
        tristate "Sony Vaio Programmable I/O Control Device support"
-       depends on X86 && PCI && INPUT && !64BIT
+       depends on X86_32 && PCI && INPUT
        ---help---
          This driver enables access to the Sony Programmable I/O Control
          Device which can be found in many (all ?) Sony Vaio laptops.
index 6928d094451d607795b4f2a07d7599e01712f824..60aafb8a1f2e24d38993aa57c114f913d5524735 100644 (file)
@@ -901,9 +901,9 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
                if (len + offset > PAGE_SIZE)
                        len = PAGE_SIZE - offset;
 
-               src = buf->ops->map(pipe, buf, 1);
+               src = kmap_atomic(buf->page);
                memcpy(page_address(page) + offset, src + buf->offset, len);
-               buf->ops->unmap(pipe, buf, src);
+               kunmap_atomic(src);
 
                sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
        }
index 1e2b9db563ec1ac74b39516a9438fc360cf658fa..0e9cce82844bf519f62b7bed24bf0783d89400e9 100644 (file)
@@ -30,7 +30,7 @@ config ARM_EXYNOS_CPUFREQ
 
 config ARM_EXYNOS4210_CPUFREQ
        bool "SAMSUNG EXYNOS4210"
-       depends on CPU_EXYNOS4210
+       depends on CPU_EXYNOS4210 && !ARCH_MULTIPLATFORM
        default y
        select ARM_EXYNOS_CPUFREQ
        help
@@ -41,7 +41,7 @@ config ARM_EXYNOS4210_CPUFREQ
 
 config ARM_EXYNOS4X12_CPUFREQ
        bool "SAMSUNG EXYNOS4x12"
-       depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+       depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412) && !ARCH_MULTIPLATFORM
        default y
        select ARM_EXYNOS_CPUFREQ
        help
@@ -52,7 +52,7 @@ config ARM_EXYNOS4X12_CPUFREQ
 
 config ARM_EXYNOS5250_CPUFREQ
        bool "SAMSUNG EXYNOS5250"
-       depends on SOC_EXYNOS5250
+       depends on SOC_EXYNOS5250 && !ARCH_MULTIPLATFORM
        default y
        select ARM_EXYNOS_CPUFREQ
        help
index ca0021a96e19738abefa9538bcb590e3e2c3faa7..72564b701b4a7018643c77de03f58a9092ff5f71 100644 (file)
@@ -54,3 +54,11 @@ config PPC_PASEMI_CPUFREQ
        help
          This adds the support for frequency switching on PA Semi
          PWRficient processors.
+
+config POWERNV_CPUFREQ
+       tristate "CPU frequency scaling for IBM POWERNV platform"
+       depends on PPC_POWERNV
+       default y
+       help
+        This adds support for CPU frequency switching on IBM POWERNV
+        platform
index 74945652dd7af595ca3498c3064944abdd73e9fe..0dbb963c1aef1911318f43e4269d2cb857d02477 100644 (file)
@@ -86,6 +86,7 @@ obj-$(CONFIG_PPC_CORENET_CPUFREQ)   += ppc-corenet-cpufreq.o
 obj-$(CONFIG_CPU_FREQ_PMAC)            += pmac32-cpufreq.o
 obj-$(CONFIG_CPU_FREQ_PMAC64)          += pmac64-cpufreq.o
 obj-$(CONFIG_PPC_PASEMI_CPUFREQ)       += pasemi-cpufreq.o
+obj-$(CONFIG_POWERNV_CPUFREQ)          += powernv-cpufreq.o
 
 ##################################################################################
 # Other platform drivers
index d5eaedbe464f873e5a32978ac6cf86eb70169188..000e4e0afd7e672abaac488d3096ae199f68a301 100644 (file)
@@ -754,7 +754,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
                goto err_unreg;
        }
 
-       data->freq_table = kmalloc(sizeof(*data->freq_table) *
+       data->freq_table = kzalloc(sizeof(*data->freq_table) *
                    (perf->state_count+1), GFP_KERNEL);
        if (!data->freq_table) {
                result = -ENOMEM;
index a1c79f549edb58a4bc88492afea527892aa1a2f5..7b612c8bb09ea917f39083bf13e98c2caa775f0e 100644 (file)
@@ -52,7 +52,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
 static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
 {
        unsigned int frequency, rate, min_freq;
-       static struct clk *cpuclk;
+       struct clk *cpuclk;
        int retval, steps, i;
 
        if (policy->cpu != 0)
index d4573032cbbc6e5cdf522951a1ee4e3d86c2c47e..601b88c490cfe2f20a9bbce4f2a1ab65fb4cd9c8 100644 (file)
@@ -15,9 +15,9 @@ static struct notifier_block cris_sdram_freq_notifier_block = {
 };
 
 static struct cpufreq_frequency_table cris_freq_table[] = {
-       {0x01,  6000},
-       {0x02,  200000},
-       {0,     CPUFREQ_TABLE_END},
+       {0, 0x01, 6000},
+       {0, 0x02, 200000},
+       {0, 0, CPUFREQ_TABLE_END},
 };
 
 static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
index 13c3361437f7be274fec5b49be8f8aba1c43dd55..22b2cdde74d9b170e175f8352ed7b920cb24170c 100644 (file)
@@ -15,9 +15,9 @@ static struct notifier_block cris_sdram_freq_notifier_block = {
 };
 
 static struct cpufreq_frequency_table cris_freq_table[] = {
-       {0x01, 6000},
-       {0x02, 200000},
-       {0, CPUFREQ_TABLE_END},
+       {0, 0x01, 6000},
+       {0, 0x02, 200000},
+       {0, 0, CPUFREQ_TABLE_END},
 };
 
 static unsigned int cris_freq_get_cpu_frequency(unsigned int cpu)
index c987e94708f5015b7c48ce62795d0f93c0ed8c49..7f5d2a68c3532880b37646ff0761bb73911e00e5 100644 (file)
@@ -56,15 +56,15 @@ static struct s_elan_multiplier elan_multiplier[] = {
 };
 
 static struct cpufreq_frequency_table elanfreq_table[] = {
-       {0,     1000},
-       {1,     2000},
-       {2,     4000},
-       {3,     8000},
-       {4,     16000},
-       {5,     33000},
-       {6,     66000},
-       {7,     99000},
-       {0,     CPUFREQ_TABLE_END},
+       {0, 0,  1000},
+       {0, 1,  2000},
+       {0, 2,  4000},
+       {0, 3,  8000},
+       {0, 4,  16000},
+       {0, 5,  33000},
+       {0, 6,  66000},
+       {0, 7,  99000},
+       {0, 0,  CPUFREQ_TABLE_END},
 };
 
 
index 40d84c43d8f46cf31ad8f68fd007bf5777df6575..6384e5b9a347dab5fefdcb5aa2ad40c0924f5230 100644 (file)
@@ -29,12 +29,12 @@ static unsigned int exynos4210_volt_table[] = {
 };
 
 static struct cpufreq_frequency_table exynos4210_freq_table[] = {
-       {L0, 1200 * 1000},
-       {L1, 1000 * 1000},
-       {L2,  800 * 1000},
-       {L3,  500 * 1000},
-       {L4,  200 * 1000},
-       {0, CPUFREQ_TABLE_END},
+       {0, L0, 1200 * 1000},
+       {0, L1, 1000 * 1000},
+       {0, L2,  800 * 1000},
+       {0, L3,  500 * 1000},
+       {0, L4,  200 * 1000},
+       {0, 0, CPUFREQ_TABLE_END},
 };
 
 static struct apll_freq apll_freq_4210[] = {
index 7c11ace3b3fc15638d08c2df6065e07ab83f36b8..466c76ad335bf54cb85038776e30f0c5d597f639 100644 (file)
@@ -30,21 +30,21 @@ static unsigned int exynos4x12_volt_table[] = {
 };
 
 static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
-       {CPUFREQ_BOOST_FREQ, 1500 * 1000},
-       {L1, 1400 * 1000},
-       {L2, 1300 * 1000},
-       {L3, 1200 * 1000},
-       {L4, 1100 * 1000},
-       {L5, 1000 * 1000},
-       {L6,  900 * 1000},
-       {L7,  800 * 1000},
-       {L8,  700 * 1000},
-       {L9,  600 * 1000},
-       {L10, 500 * 1000},
-       {L11, 400 * 1000},
-       {L12, 300 * 1000},
-       {L13, 200 * 1000},
-       {0, CPUFREQ_TABLE_END},
+       {CPUFREQ_BOOST_FREQ, L0, 1500 * 1000},
+       {0, L1, 1400 * 1000},
+       {0, L2, 1300 * 1000},
+       {0, L3, 1200 * 1000},
+       {0, L4, 1100 * 1000},
+       {0, L5, 1000 * 1000},
+       {0, L6,  900 * 1000},
+       {0, L7,  800 * 1000},
+       {0, L8,  700 * 1000},
+       {0, L9,  600 * 1000},
+       {0, L10, 500 * 1000},
+       {0, L11, 400 * 1000},
+       {0, L12, 300 * 1000},
+       {0, L13, 200 * 1000},
+       {0, 0, CPUFREQ_TABLE_END},
 };
 
 static struct apll_freq *apll_freq_4x12;
index 5f90b82a40825d9b9f1724809a64fea3b693e408..363a0b3fe1b109a7d746c15626155cabf2a03089 100644 (file)
@@ -34,23 +34,23 @@ static unsigned int exynos5250_volt_table[] = {
 };
 
 static struct cpufreq_frequency_table exynos5250_freq_table[] = {
-       {L0, 1700 * 1000},
-       {L1, 1600 * 1000},
-       {L2, 1500 * 1000},
-       {L3, 1400 * 1000},
-       {L4, 1300 * 1000},
-       {L5, 1200 * 1000},
-       {L6, 1100 * 1000},
-       {L7, 1000 * 1000},
-       {L8,  900 * 1000},
-       {L9,  800 * 1000},
-       {L10, 700 * 1000},
-       {L11, 600 * 1000},
-       {L12, 500 * 1000},
-       {L13, 400 * 1000},
-       {L14, 300 * 1000},
-       {L15, 200 * 1000},
-       {0, CPUFREQ_TABLE_END},
+       {0, L0, 1700 * 1000},
+       {0, L1, 1600 * 1000},
+       {0, L2, 1500 * 1000},
+       {0, L3, 1400 * 1000},
+       {0, L4, 1300 * 1000},
+       {0, L5, 1200 * 1000},
+       {0, L6, 1100 * 1000},
+       {0, L7, 1000 * 1000},
+       {0, L8,  900 * 1000},
+       {0, L9,  800 * 1000},
+       {0, L10, 700 * 1000},
+       {0, L11, 600 * 1000},
+       {0, L12, 500 * 1000},
+       {0, L13, 400 * 1000},
+       {0, L14, 300 * 1000},
+       {0, L15, 200 * 1000},
+       {0, 0, CPUFREQ_TABLE_END},
 };
 
 static struct apll_freq apll_freq_5250[] = {
index 65a477075b3f2d8c424bec5e39682550140051c0..08e7bbcf6d7362d633311132e8b9fe21433360a0 100644 (file)
@@ -33,11 +33,10 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
                        continue;
                }
                if (!cpufreq_boost_enabled()
-                   && table[i].driver_data == CPUFREQ_BOOST_FREQ)
+                   && (table[i].flags & CPUFREQ_BOOST_FREQ))
                        continue;
 
-               pr_debug("table entry %u: %u kHz, %u driver_data\n",
-                                       i, freq, table[i].driver_data);
+               pr_debug("table entry %u: %u kHz\n", i, freq);
                if (freq < min_freq)
                        min_freq = freq;
                if (freq > max_freq)
@@ -175,8 +174,8 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
        } else
                *index = optimal.driver_data;
 
-       pr_debug("target is %u (%u kHz, %u)\n", *index, table[*index].frequency,
-               table[*index].driver_data);
+       pr_debug("target index is %u, freq is:%u kHz\n", *index,
+                table[*index].frequency);
 
        return 0;
 }
@@ -230,7 +229,7 @@ static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
                 * show_boost = false and driver_data != BOOST freq
                 * display NON BOOST freqs
                 */
-               if (show_boost ^ (table[i].driver_data == CPUFREQ_BOOST_FREQ))
+               if (show_boost ^ (table[i].flags & CPUFREQ_BOOST_FREQ))
                        continue;
 
                count += sprintf(&buf[count], "%d ", table[i].frequency);
index a22b5d182e0eb1767f96fd65063d1eebc935ec7c..c30aaa6a54e84cd39d8778098e6251b140d0b975 100644 (file)
@@ -254,7 +254,7 @@ acpi_cpufreq_cpu_init (
        }
 
        /* alloc freq_table */
-       data->freq_table = kmalloc(sizeof(*data->freq_table) *
+       data->freq_table = kzalloc(sizeof(*data->freq_table) *
                                   (data->acpi_data.state_count + 1),
                                   GFP_KERNEL);
        if (!data->freq_table) {
@@ -275,7 +275,6 @@ acpi_cpufreq_cpu_init (
        /* table init */
        for (i = 0; i <= data->acpi_data.state_count; i++)
        {
-               data->freq_table[i].driver_data = i;
                if (i < data->acpi_data.state_count) {
                        data->freq_table[i].frequency =
                              data->acpi_data.states[i].core_frequency * 1000;
index 3d114bc5a97ad56999b03d2f03d9c701a8c72f6b..37a480680cd07bb49b3aa439c6be95f158907fca 100644 (file)
@@ -43,9 +43,9 @@ static struct priv
  * table.
  */
 static struct cpufreq_frequency_table kirkwood_freq_table[] = {
-       {STATE_CPU_FREQ,        0}, /* CPU uses cpuclk */
-       {STATE_DDR_FREQ,        0}, /* CPU uses ddrclk */
-       {0,                     CPUFREQ_TABLE_END},
+       {0, STATE_CPU_FREQ,     0}, /* CPU uses cpuclk */
+       {0, STATE_DDR_FREQ,     0}, /* CPU uses ddrclk */
+       {0, 0,                  CPUFREQ_TABLE_END},
 };
 
 static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu)
index 5c440f87ba8a5973a0cd3e0e8a234415523d45b9..d00e5d1abd258b469bf48862a5f14b08e04f97a7 100644 (file)
@@ -475,7 +475,7 @@ static int longhaul_get_ranges(void)
                return -EINVAL;
        }
 
-       longhaul_table = kmalloc((numscales + 1) * sizeof(*longhaul_table),
+       longhaul_table = kzalloc((numscales + 1) * sizeof(*longhaul_table),
                        GFP_KERNEL);
        if (!longhaul_table)
                return -ENOMEM;
index a3588d61d933f62f313767b7034ca749e5dce163..f0bc31f5db27a41db3d7f8556e72274b42a5eda5 100644 (file)
@@ -69,7 +69,7 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
 
 static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
-       static struct clk *cpuclk;
+       struct clk *cpuclk;
        int i;
        unsigned long rate;
        int ret;
index c4dfa42a75acc6663de5b0c4aabd7db3705de954..cc3408fc073f982110b446684cf8ed5dceb44bff 100644 (file)
@@ -59,9 +59,9 @@
 #define CPUFREQ_LOW                   1
 
 static struct cpufreq_frequency_table maple_cpu_freqs[] = {
-       {CPUFREQ_HIGH,          0},
-       {CPUFREQ_LOW,           0},
-       {0,                     CPUFREQ_TABLE_END},
+       {0, CPUFREQ_HIGH,               0},
+       {0, CPUFREQ_LOW,                0},
+       {0, 0,                          CPUFREQ_TABLE_END},
 };
 
 /* Power mode data is an array of the 32 bits PCR values to use for
index 74f593e70e191ffdd8c297377cf347a833a9dbce..529cfd92158fa6f7e0e9cfb79d88307c83c57ac3 100644 (file)
@@ -92,16 +92,16 @@ static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
 
 
 static struct cpufreq_frequency_table p4clockmod_table[] = {
-       {DC_RESV, CPUFREQ_ENTRY_INVALID},
-       {DC_DFLT, 0},
-       {DC_25PT, 0},
-       {DC_38PT, 0},
-       {DC_50PT, 0},
-       {DC_64PT, 0},
-       {DC_75PT, 0},
-       {DC_88PT, 0},
-       {DC_DISABLE, 0},
-       {DC_RESV, CPUFREQ_TABLE_END},
+       {0, DC_RESV, CPUFREQ_ENTRY_INVALID},
+       {0, DC_DFLT, 0},
+       {0, DC_25PT, 0},
+       {0, DC_38PT, 0},
+       {0, DC_50PT, 0},
+       {0, DC_64PT, 0},
+       {0, DC_75PT, 0},
+       {0, DC_88PT, 0},
+       {0, DC_DISABLE, 0},
+       {0, DC_RESV, CPUFREQ_TABLE_END},
 };
 
 
index 6a2b7d3e85a7bf408c926037a0949e4d0cf0143c..84c84b5f0f3a426d70738730b8715defc18bec20 100644 (file)
@@ -60,12 +60,12 @@ static int current_astate;
 
 /* We support 5(A0-A4) power states excluding turbo(A5-A6) modes */
 static struct cpufreq_frequency_table pas_freqs[] = {
-       {0,     0},
-       {1,     0},
-       {2,     0},
-       {3,     0},
-       {4,     0},
-       {0,     CPUFREQ_TABLE_END},
+       {0, 0,  0},
+       {0, 1,  0},
+       {0, 2,  0},
+       {0, 3,  0},
+       {0, 4,  0},
+       {0, 0,  CPUFREQ_TABLE_END},
 };
 
 /*
index cf55d202f332be36322d7e645ed596b2f2669d16..7615180d7ee3c497e915d72f18af11e9b3f09c5c 100644 (file)
@@ -81,9 +81,9 @@ static int is_pmu_based;
 #define CPUFREQ_LOW                   1
 
 static struct cpufreq_frequency_table pmac_cpu_freqs[] = {
-       {CPUFREQ_HIGH,          0},
-       {CPUFREQ_LOW,           0},
-       {0,                     CPUFREQ_TABLE_END},
+       {0, CPUFREQ_HIGH,       0},
+       {0, CPUFREQ_LOW,        0},
+       {0, 0,                  CPUFREQ_TABLE_END},
 };
 
 static inline void local_delay(unsigned long ms)
index 6a338f8c386096eb22929c7228a7c38723229a5b..8bc422977b5b88120ad93c08ac27cbea22cc80ec 100644 (file)
@@ -65,9 +65,9 @@
 #define CPUFREQ_LOW                   1
 
 static struct cpufreq_frequency_table g5_cpu_freqs[] = {
-       {CPUFREQ_HIGH,          0},
-       {CPUFREQ_LOW,           0},
-       {0,                     CPUFREQ_TABLE_END},
+       {0, CPUFREQ_HIGH,       0},
+       {0, CPUFREQ_LOW,        0},
+       {0, 0,                  CPUFREQ_TABLE_END},
 };
 
 /* Power mode data is an array of the 32 bits PCR values to use for
index 62c6f2e5afced391ee7260e6ab6065c73954fa58..49f120e1bc7be0ecb879f184424d1dc56ee63981 100644 (file)
@@ -37,15 +37,15 @@ MODULE_PARM_DESC(bus_frequency, "Bus frequency in kHz");
 
 /* Clock ratio multiplied by 10 - see table 27 in AMD#23446 */
 static struct cpufreq_frequency_table clock_ratio[] = {
-       {60,  /* 110 -> 6.0x */ 0},
-       {55,  /* 011 -> 5.5x */ 0},
-       {50,  /* 001 -> 5.0x */ 0},
-       {45,  /* 000 -> 4.5x */ 0},
-       {40,  /* 010 -> 4.0x */ 0},
-       {35,  /* 111 -> 3.5x */ 0},
-       {30,  /* 101 -> 3.0x */ 0},
-       {20,  /* 100 -> 2.0x */ 0},
-       {0, CPUFREQ_TABLE_END}
+       {0, 60,  /* 110 -> 6.0x */ 0},
+       {0, 55,  /* 011 -> 5.5x */ 0},
+       {0, 50,  /* 001 -> 5.0x */ 0},
+       {0, 45,  /* 000 -> 4.5x */ 0},
+       {0, 40,  /* 010 -> 4.0x */ 0},
+       {0, 35,  /* 111 -> 3.5x */ 0},
+       {0, 30,  /* 101 -> 3.0x */ 0},
+       {0, 20,  /* 100 -> 2.0x */ 0},
+       {0, 0, CPUFREQ_TABLE_END}
 };
 
 static const u8 index_to_register[8] = { 6, 3, 1, 0, 2, 7, 5, 4 };
index 770a9e1b3468f3f13b1b67a8ed81934bf4a1f139..1b6ae6b57c1183ed5a01232876045be9592f3d23 100644 (file)
@@ -623,7 +623,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
        if (check_pst_table(data, pst, maxvid))
                return -EINVAL;
 
-       powernow_table = kmalloc((sizeof(*powernow_table)
+       powernow_table = kzalloc((sizeof(*powernow_table)
                * (data->numps + 1)), GFP_KERNEL);
        if (!powernow_table) {
                printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
@@ -793,7 +793,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
        }
 
        /* fill in data->powernow_table */
-       powernow_table = kmalloc((sizeof(*powernow_table)
+       powernow_table = kzalloc((sizeof(*powernow_table)
                * (data->acpi_data.state_count + 1)), GFP_KERNEL);
        if (!powernow_table) {
                pr_debug("powernow_table memory alloc failure\n");
@@ -810,7 +810,6 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
 
        powernow_table[data->acpi_data.state_count].frequency =
                CPUFREQ_TABLE_END;
-       powernow_table[data->acpi_data.state_count].driver_data = 0;
        data->powernow_table = powernow_table;
 
        if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
new file mode 100644 (file)
index 0000000..9edccc6
--- /dev/null
@@ -0,0 +1,341 @@
+/*
+ * POWERNV cpufreq driver for the IBM POWER processors
+ *
+ * (C) Copyright IBM 2014
+ *
+ * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt)    "powernv-cpufreq: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sysfs.h>
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/cpufreq.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+
+#include <asm/cputhreads.h>
+#include <asm/reg.h>
+
+#define POWERNV_MAX_PSTATES    256
+
+static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
+
+/*
+ * Note: The set of pstates consists of contiguous integers, the
+ * smallest of which is indicated by powernv_pstate_info.min, the
+ * largest of which is indicated by powernv_pstate_info.max.
+ *
+ * The nominal pstate is the highest non-turbo pstate in this
+ * platform. This is indicated by powernv_pstate_info.nominal.
+ */
+static struct powernv_pstate_info {
+       int min;
+       int max;
+       int nominal;
+       int nr_pstates;
+} powernv_pstate_info;
+
+/*
+ * Initialize the freq table based on data obtained
+ * from the firmware passed via device-tree
+ */
+static int init_powernv_pstates(void)
+{
+       struct device_node *power_mgt;
+       int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0;
+       const __be32 *pstate_ids, *pstate_freqs;
+       u32 len_ids, len_freqs;
+
+       power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+       if (!power_mgt) {
+               pr_warn("power-mgt node not found\n");
+               return -ENODEV;
+       }
+
+       if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
+               pr_warn("ibm,pstate-min node not found\n");
+               return -ENODEV;
+       }
+
+       if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
+               pr_warn("ibm,pstate-max node not found\n");
+               return -ENODEV;
+       }
+
+       if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
+                                &pstate_nominal)) {
+               pr_warn("ibm,pstate-nominal not found\n");
+               return -ENODEV;
+       }
+       pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
+               pstate_nominal, pstate_max);
+
+       pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
+       if (!pstate_ids) {
+               pr_warn("ibm,pstate-ids not found\n");
+               return -ENODEV;
+       }
+
+       pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
+                                     &len_freqs);
+       if (!pstate_freqs) {
+               pr_warn("ibm,pstate-frequencies-mhz not found\n");
+               return -ENODEV;
+       }
+
+       WARN_ON(len_ids != len_freqs);
+       nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
+       if (!nr_pstates) {
+               pr_warn("No PStates found\n");
+               return -ENODEV;
+       }
+
+       pr_debug("NR PStates %d\n", nr_pstates);
+       for (i = 0; i < nr_pstates; i++) {
+               u32 id = be32_to_cpu(pstate_ids[i]);
+               u32 freq = be32_to_cpu(pstate_freqs[i]);
+
+               pr_debug("PState id %d freq %d MHz\n", id, freq);
+               powernv_freqs[i].frequency = freq * 1000; /* kHz */
+               powernv_freqs[i].driver_data = id;
+       }
+       /* End of list marker entry */
+       powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
+
+       powernv_pstate_info.min = pstate_min;
+       powernv_pstate_info.max = pstate_max;
+       powernv_pstate_info.nominal = pstate_nominal;
+       powernv_pstate_info.nr_pstates = nr_pstates;
+
+       return 0;
+}
+
+/* Returns the CPU frequency corresponding to the pstate_id. */
+static unsigned int pstate_id_to_freq(int pstate_id)
+{
+       int i;
+
+       i = powernv_pstate_info.max - pstate_id;
+       BUG_ON(i >= powernv_pstate_info.nr_pstates || i < 0);
+
+       return powernv_freqs[i].frequency;
+}
+
+/*
+ * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
+ * the firmware
+ */
+static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
+                                       char *buf)
+{
+       return sprintf(buf, "%u\n",
+               pstate_id_to_freq(powernv_pstate_info.nominal));
+}
+
+struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
+       __ATTR_RO(cpuinfo_nominal_freq);
+
+static struct freq_attr *powernv_cpu_freq_attr[] = {
+       &cpufreq_freq_attr_scaling_available_freqs,
+       &cpufreq_freq_attr_cpuinfo_nominal_freq,
+       NULL,
+};
+
+/* Helper routines */
+
+/* Access helpers to power mgt SPR */
+
+static inline unsigned long get_pmspr(unsigned long sprn)
+{
+       switch (sprn) {
+       case SPRN_PMCR:
+               return mfspr(SPRN_PMCR);
+
+       case SPRN_PMICR:
+               return mfspr(SPRN_PMICR);
+
+       case SPRN_PMSR:
+               return mfspr(SPRN_PMSR);
+       }
+       BUG();
+}
+
+static inline void set_pmspr(unsigned long sprn, unsigned long val)
+{
+       switch (sprn) {
+       case SPRN_PMCR:
+               mtspr(SPRN_PMCR, val);
+               return;
+
+       case SPRN_PMICR:
+               mtspr(SPRN_PMICR, val);
+               return;
+       }
+       BUG();
+}
+
+/*
+ * Use objects of this type to query/update
+ * pstates on a remote CPU via smp_call_function.
+ */
+struct powernv_smp_call_data {
+       unsigned int freq;
+       int pstate_id;
+};
+
+/*
+ * powernv_read_cpu_freq: Reads the current frequency on this CPU.
+ *
+ * Called via smp_call_function.
+ *
+ * Note: The caller of the smp_call_function should pass an argument of
+ * the type 'struct powernv_smp_call_data *' along with this function.
+ *
+ * The current frequency on this CPU will be returned via
+ * ((struct powernv_smp_call_data *)arg)->freq;
+ */
+static void powernv_read_cpu_freq(void *arg)
+{
+       unsigned long pmspr_val;
+       s8 local_pstate_id;
+       struct powernv_smp_call_data *freq_data = arg;
+
+       pmspr_val = get_pmspr(SPRN_PMSR);
+
+       /*
+        * The local pstate id corresponds bits 48..55 in the PMSR.
+        * Note: Watch out for the sign!
+        */
+       local_pstate_id = (pmspr_val >> 48) & 0xFF;
+       freq_data->pstate_id = local_pstate_id;
+       freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
+
+       pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
+               raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
+               freq_data->freq);
+}
+
+/*
+ * powernv_cpufreq_get: Returns the CPU frequency as reported by the
+ * firmware for CPU 'cpu'. This value is reported through the sysfs
+ * file cpuinfo_cur_freq.
+ */
+unsigned int powernv_cpufreq_get(unsigned int cpu)
+{
+       struct powernv_smp_call_data freq_data;
+
+       smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
+                       &freq_data, 1);
+
+       return freq_data.freq;
+}
+
+/*
+ * set_pstate: Sets the pstate on this CPU.
+ *
+ * This is called via an smp_call_function.
+ *
+ * The caller must ensure that freq_data is of the type
+ * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
+ * on this CPU should be present in freq_data->pstate_id.
+ */
+static void set_pstate(void *freq_data)
+{
+       unsigned long val;
+       unsigned long pstate_ul =
+               ((struct powernv_smp_call_data *) freq_data)->pstate_id;
+
+       val = get_pmspr(SPRN_PMCR);
+       val = val & 0x0000FFFFFFFFFFFFULL;
+
+       pstate_ul = pstate_ul & 0xFF;
+
+       /* Set both global(bits 56..63) and local(bits 48..55) PStates */
+       val = val | (pstate_ul << 56) | (pstate_ul << 48);
+
+       pr_debug("Setting cpu %d pmcr to %016lX\n",
+                       raw_smp_processor_id(), val);
+       set_pmspr(SPRN_PMCR, val);
+}
+
+/*
+ * powernv_cpufreq_target_index: Sets the frequency corresponding to
+ * the cpufreq table entry indexed by new_index on the cpus in the
+ * mask policy->cpus
+ */
+static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
+                                       unsigned int new_index)
+{
+       struct powernv_smp_call_data freq_data;
+
+       freq_data.pstate_id = powernv_freqs[new_index].driver_data;
+
+       /*
+        * Use smp_call_function to send IPI and execute the
+        * mtspr on target CPU.  We could do that without IPI
+        * if current CPU is within policy->cpus (core)
+        */
+       smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
+
+       return 0;
+}
+
+static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+       int base, i;
+
+       base = cpu_first_thread_sibling(policy->cpu);
+
+       for (i = 0; i < threads_per_core; i++)
+               cpumask_set_cpu(base + i, policy->cpus);
+
+       return cpufreq_table_validate_and_show(policy, powernv_freqs);
+}
+
+static struct cpufreq_driver powernv_cpufreq_driver = {
+       .name           = "powernv-cpufreq",
+       .flags          = CPUFREQ_CONST_LOOPS,
+       .init           = powernv_cpufreq_cpu_init,
+       .verify         = cpufreq_generic_frequency_table_verify,
+       .target_index   = powernv_cpufreq_target_index,
+       .get            = powernv_cpufreq_get,
+       .attr           = powernv_cpu_freq_attr,
+};
+
+static int __init powernv_cpufreq_init(void)
+{
+       int rc = 0;
+
+       /* Discover pstates from device tree and init */
+       rc = init_powernv_pstates();
+       if (rc) {
+               pr_info("powernv-cpufreq disabled. System does not support PState control\n");
+               return rc;
+       }
+
+       return cpufreq_register_driver(&powernv_cpufreq_driver);
+}
+module_init(powernv_cpufreq_init);
+
+static void __exit powernv_cpufreq_exit(void)
+{
+       cpufreq_unregister_driver(&powernv_cpufreq_driver);
+}
+module_exit(powernv_cpufreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");
index 3bd9123e702667e29f33d62ffae237a5e44ffdcd..b7e677be1df034cdebdea6580b5726bc77c9aa39 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/clk.h>
 #include <linux/cpufreq.h>
 #include <linux/errno.h>
-#include <sysdev/fsl_soc.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
index af7b1cabd1e76f643ae652de1b05e4b57107429c..5be8a48dba74fdca1d3d8b33ac02c93229b792c6 100644 (file)
 
 /* the CBE supports an 8 step frequency scaling */
 static struct cpufreq_frequency_table cbe_freqs[] = {
-       {1,     0},
-       {2,     0},
-       {3,     0},
-       {4,     0},
-       {5,     0},
-       {6,     0},
-       {8,     0},
-       {10,    0},
-       {0,     CPUFREQ_TABLE_END},
+       {0, 1,  0},
+       {0, 2,  0},
+       {0, 3,  0},
+       {0, 4,  0},
+       {0, 5,  0},
+       {0, 6,  0},
+       {0, 8,  0},
+       {0, 10, 0},
+       {0, 0,  CPUFREQ_TABLE_END},
 };
 
 /*
index 826b8be2309942a2b43548806516aa43bca183aa..4626f90559b55167869b86aa6c0349cb45d29d46 100644 (file)
@@ -72,19 +72,19 @@ static struct s3c2416_dvfs s3c2416_dvfs_table[] = {
 #endif
 
 static struct cpufreq_frequency_table s3c2416_freq_table[] = {
-       { SOURCE_HCLK, FREQ_DVS },
-       { SOURCE_ARMDIV, 133333 },
-       { SOURCE_ARMDIV, 266666 },
-       { SOURCE_ARMDIV, 400000 },
-       { 0, CPUFREQ_TABLE_END },
+       { 0, SOURCE_HCLK, FREQ_DVS },
+       { 0, SOURCE_ARMDIV, 133333 },
+       { 0, SOURCE_ARMDIV, 266666 },
+       { 0, SOURCE_ARMDIV, 400000 },
+       { 0, 0, CPUFREQ_TABLE_END },
 };
 
 static struct cpufreq_frequency_table s3c2450_freq_table[] = {
-       { SOURCE_HCLK, FREQ_DVS },
-       { SOURCE_ARMDIV, 133500 },
-       { SOURCE_ARMDIV, 267000 },
-       { SOURCE_ARMDIV, 534000 },
-       { 0, CPUFREQ_TABLE_END },
+       { 0, SOURCE_HCLK, FREQ_DVS },
+       { 0, SOURCE_ARMDIV, 133500 },
+       { 0, SOURCE_ARMDIV, 267000 },
+       { 0, SOURCE_ARMDIV, 534000 },
+       { 0, 0, CPUFREQ_TABLE_END },
 };
 
 static unsigned int s3c2416_cpufreq_get_speed(unsigned int cpu)
index a3dc192d21f98c6e3b1bb22713426f8de1c40658..be1b2b5c9753b6c6f2d0bc359890bfd7f8566ab9 100644 (file)
@@ -586,7 +586,7 @@ static int s3c_cpufreq_build_freq(void)
        size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
        size++;
 
-       ftab = kmalloc(sizeof(*ftab) * size, GFP_KERNEL);
+       ftab = kzalloc(sizeof(*ftab) * size, GFP_KERNEL);
        if (!ftab) {
                printk(KERN_ERR "%s: no memory for tables\n", __func__);
                return -ENOMEM;
@@ -664,7 +664,7 @@ int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
 
        size = sizeof(*vals) * (plls_no + 1);
 
-       vals = kmalloc(size, GFP_KERNEL);
+       vals = kzalloc(size, GFP_KERNEL);
        if (vals) {
                memcpy(vals, plls, size);
                pll_reg = vals;
index c4226de079ab4704a04455f349bc9a2286162173..ff7d3ecb85f0f3c45430423942dc79a03f8837a6 100644 (file)
@@ -37,19 +37,19 @@ static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
 };
 
 static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
-       { 0,  66000 },
-       { 0, 100000 },
-       { 0, 133000 },
-       { 1, 200000 },
-       { 1, 222000 },
-       { 1, 266000 },
-       { 2, 333000 },
-       { 2, 400000 },
-       { 2, 532000 },
-       { 2, 533000 },
-       { 3, 667000 },
-       { 4, 800000 },
-       { 0, CPUFREQ_TABLE_END },
+       { 0, 0,  66000 },
+       { 0, 0, 100000 },
+       { 0, 0, 133000 },
+       { 0, 1, 200000 },
+       { 0, 1, 222000 },
+       { 0, 1, 266000 },
+       { 0, 2, 333000 },
+       { 0, 2, 400000 },
+       { 0, 2, 532000 },
+       { 0, 2, 533000 },
+       { 0, 3, 667000 },
+       { 0, 4, 800000 },
+       { 0, 0, CPUFREQ_TABLE_END },
 };
 #endif
 
index 72421534fff57753a5fec3b9660c4842578acd69..ab2c1a40d43752591283162a177b4a61ae0a9487 100644 (file)
@@ -64,12 +64,12 @@ enum s5pv210_dmc_port {
 };
 
 static struct cpufreq_frequency_table s5pv210_freq_table[] = {
-       {L0, 1000*1000},
-       {L1, 800*1000},
-       {L2, 400*1000},
-       {L3, 200*1000},
-       {L4, 100*1000},
-       {0, CPUFREQ_TABLE_END},
+       {0, L0, 1000*1000},
+       {0, L1, 800*1000},
+       {0, L2, 400*1000},
+       {0, L3, 200*1000},
+       {0, L4, 100*1000},
+       {0, 0, CPUFREQ_TABLE_END},
 };
 
 static struct regulator *arm_regulator;
index 69371bf0886d11a17a86bb169dd624777f4b6048..ac84e48180148b9a383b81fe43e328e7a56cd8cb 100644 (file)
@@ -33,9 +33,9 @@ static __u8 __iomem *cpuctl;
 #define PFX "sc520_freq: "
 
 static struct cpufreq_frequency_table sc520_freq_table[] = {
-       {0x01,  100000},
-       {0x02,  133000},
-       {0,     CPUFREQ_TABLE_END},
+       {0, 0x01,       100000},
+       {0, 0x02,       133000},
+       {0, 0,  CPUFREQ_TABLE_END},
 };
 
 static unsigned int sc520_freq_get_cpu_frequency(unsigned int cpu)
index 4cfdcff8a3109826195979495f307c4ff1902d65..38678396636da49d689cde51ec49fc850f912a0b 100644 (file)
@@ -195,18 +195,15 @@ static int spear_cpufreq_probe(struct platform_device *pdev)
        cnt = prop->length / sizeof(u32);
        val = prop->value;
 
-       freq_tbl = kmalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
+       freq_tbl = kzalloc(sizeof(*freq_tbl) * (cnt + 1), GFP_KERNEL);
        if (!freq_tbl) {
                ret = -ENOMEM;
                goto out_put_node;
        }
 
-       for (i = 0; i < cnt; i++) {
-               freq_tbl[i].driver_data = i;
+       for (i = 0; i < cnt; i++)
                freq_tbl[i].frequency = be32_to_cpup(val++);
-       }
 
-       freq_tbl[i].driver_data = i;
        freq_tbl[i].frequency = CPUFREQ_TABLE_END;
 
        spear_cpufreq.freq_tbl = freq_tbl;
index 394ac159312a03ffbd359db7c9e30efda9c044eb..1a07b5904ed55c1504c65e3a7f49061b78c9ea62 100644 (file)
@@ -49,9 +49,9 @@ static u32 pmbase;
  * are in kHz for the time being.
  */
 static struct cpufreq_frequency_table speedstep_freqs[] = {
-       {SPEEDSTEP_HIGH,        0},
-       {SPEEDSTEP_LOW,         0},
-       {0,                     CPUFREQ_TABLE_END},
+       {0, SPEEDSTEP_HIGH,     0},
+       {0, SPEEDSTEP_LOW,      0},
+       {0, 0,                  CPUFREQ_TABLE_END},
 };
 
 
index db5d274dc13ad4e800c0bd51547c92b6619668ed..8635eec96da5c3c7f210467a497b20b53eb52c32 100644 (file)
@@ -42,9 +42,9 @@ static enum speedstep_processor speedstep_processor;
  * are in kHz for the time being.
  */
 static struct cpufreq_frequency_table speedstep_freqs[] = {
-       {SPEEDSTEP_HIGH,        0},
-       {SPEEDSTEP_LOW,         0},
-       {0,                     CPUFREQ_TABLE_END},
+       {0, SPEEDSTEP_HIGH,     0},
+       {0, SPEEDSTEP_LOW,      0},
+       {0, 0,                  CPUFREQ_TABLE_END},
 };
 
 #define GET_SPEEDSTEP_OWNER 0
index 13be802b6170e1a8aefb457e28745ff6318ef125..8d045afa7fb406445b4996334e22a13e9f9572d8 100644 (file)
@@ -45,7 +45,7 @@ static int ucv2_target(struct cpufreq_policy *policy,
        freqs.new = target_freq;
 
        cpufreq_freq_transition_begin(policy, &freqs);
-       ret = clk_set_rate(policy->mclk, target_freq * 1000);
+       ret = clk_set_rate(policy->clk, target_freq * 1000);
        cpufreq_freq_transition_end(policy, &freqs, ret);
 
        return ret;
index e918b6d0caf7558025578be747b920003adb2698..efe2f175168f608f6170d37d516a444cab6c0676 100644 (file)
@@ -293,6 +293,7 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
 }
 
 define_show_state_function(exit_latency)
+define_show_state_function(target_residency)
 define_show_state_function(power_usage)
 define_show_state_ull_function(usage)
 define_show_state_ull_function(time)
@@ -304,6 +305,7 @@ define_store_state_ull_function(disable)
 define_one_state_ro(name, show_state_name);
 define_one_state_ro(desc, show_state_desc);
 define_one_state_ro(latency, show_state_exit_latency);
+define_one_state_ro(residency, show_state_target_residency);
 define_one_state_ro(power, show_state_power_usage);
 define_one_state_ro(usage, show_state_usage);
 define_one_state_ro(time, show_state_time);
@@ -313,6 +315,7 @@ static struct attribute *cpuidle_state_default_attrs[] = {
        &attr_name.attr,
        &attr_desc.attr,
        &attr_latency.attr,
+       &attr_residency.attr,
        &attr_power.attr,
        &attr_usage.attr,
        &attr_time.attr,
index 605b016bcea49dcea25d9515b2cec276ae974372..ba06d1d2f99e39c50e5ed82f08fffdbbe20b7720 100644 (file)
@@ -308,7 +308,7 @@ config DMA_OMAP
 
 config DMA_BCM2835
        tristate "BCM2835 DMA engine support"
-       depends on (ARCH_BCM2835 || MACH_BCM2708)
+       depends on ARCH_BCM2835
        select DMA_ENGINE
        select DMA_VIRTUAL_CHANNELS
 
@@ -350,6 +350,16 @@ config MOXART_DMA
        select DMA_VIRTUAL_CHANNELS
        help
          Enable support for the MOXA ART SoC DMA controller.
+config FSL_EDMA
+       tristate "Freescale eDMA engine support"
+       depends on OF
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       help
+         Support the Freescale eDMA engine with programmable channel
+         multiplexing capability for DMA request sources(slot).
+         This module can be found on Freescale Vybrid and LS-1 SoCs.
 
 config DMA_ENGINE
        bool
@@ -401,4 +411,13 @@ config DMATEST
 config DMA_ENGINE_RAID
        bool
 
+config QCOM_BAM_DMA
+       tristate "QCOM BAM DMA support"
+       depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+       select DMA_ENGINE
+       select DMA_VIRTUAL_CHANNELS
+       ---help---
+         Enable support for the QCOM BAM DMA controller.  This controller
+         provides DMA capabilities for a variety of on-chip devices.
+
 endif
index a029d0f4a1be8088c00c459373f469580925d981..5150c82c9caf2e9203ee8935ebd8192346965d56 100644 (file)
@@ -44,3 +44,5 @@ obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
 obj-$(CONFIG_TI_CPPI41) += cppi41.o
 obj-$(CONFIG_K3_DMA) += k3dma.o
 obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
index 1e506afa33f5e5a9b95753ce0e98ceb2f4ce7e66..de361a156b341ab85a0b22490f2ff9407c93ce30 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/err.h>
 #include <linux/module.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
@@ -265,7 +266,7 @@ EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register);
  */
 void devm_acpi_dma_controller_free(struct device *dev)
 {
-       WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL));
+       WARN_ON(devres_release(dev, devm_acpi_dma_release, NULL, NULL));
 }
 EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
 
@@ -343,7 +344,7 @@ static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data)
  * @index:     index of FixedDMA descriptor for @dev
  *
  * Return:
- * Pointer to appropriate dma channel on success or NULL on error.
+ * Pointer to appropriate dma channel on success or an error pointer.
  */
 struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
                size_t index)
@@ -358,10 +359,10 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
 
        /* Check if the device was enumerated by ACPI */
        if (!dev || !ACPI_HANDLE(dev))
-               return NULL;
+               return ERR_PTR(-ENODEV);
 
        if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev))
-               return NULL;
+               return ERR_PTR(-ENODEV);
 
        memset(&pdata, 0, sizeof(pdata));
        pdata.index = index;
@@ -376,7 +377,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
        acpi_dev_free_resource_list(&resource_list);
 
        if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0)
-               return NULL;
+               return ERR_PTR(-ENODEV);
 
        mutex_lock(&acpi_dma_lock);
 
@@ -399,7 +400,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
        }
 
        mutex_unlock(&acpi_dma_lock);
-       return chan;
+       return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 }
 EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
 
@@ -413,7 +414,7 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index);
  * the first FixedDMA descriptor is TX and second is RX.
  *
  * Return:
- * Pointer to appropriate dma channel on success or NULL on error.
+ * Pointer to appropriate dma channel on success or an error pointer.
  */
 struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
                const char *name)
@@ -425,7 +426,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev,
        else if (!strcmp(name, "rx"))
                index = 1;
        else
-               return NULL;
+               return ERR_PTR(-ENODEV);
 
        return acpi_dma_request_slave_chan_by_index(dev, index);
 }
index e2c04dc81e2a903ea13956e872150c40ed8b00e8..c13a3bb0f5943234fac845d3214309a3c754c63a 100644 (file)
@@ -1569,7 +1569,6 @@ static int at_dma_remove(struct platform_device *pdev)
 
                /* Disable interrupts */
                atc_disable_chan_irq(atdma, chan->chan_id);
-               tasklet_disable(&atchan->tasklet);
 
                tasklet_kill(&atchan->tasklet);
                list_del(&chan->device_node);
index c18aebf7d5aa9a23199b556bc9b54c8b3237253d..d028f36ae655ad56b7afb95a8c4252d096596d5f 100644 (file)
@@ -620,12 +620,15 @@ static int cppi41_stop_chan(struct dma_chan *chan)
        u32 desc_phys;
        int ret;
 
+       desc_phys = lower_32_bits(c->desc_phys);
+       desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+       if (!cdd->chan_busy[desc_num])
+               return 0;
+
        ret = cppi41_tear_down_chan(c);
        if (ret)
                return ret;
 
-       desc_phys = lower_32_bits(c->desc_phys);
-       desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
        WARN_ON(!cdd->chan_busy[desc_num]);
        cdd->chan_busy[desc_num] = NULL;
 
index ed610b4975186b8e389de7360d4d3c8e6f5785e3..a886713937fd05b38fc10866bbce5266ac0bfae1 100644 (file)
@@ -627,18 +627,13 @@ EXPORT_SYMBOL_GPL(__dma_request_channel);
 struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
                                                  const char *name)
 {
-       struct dma_chan *chan;
-
        /* If device-tree is present get slave info from here */
        if (dev->of_node)
                return of_dma_request_slave_channel(dev->of_node, name);
 
        /* If device was enumerated by ACPI get slave info from here */
-       if (ACPI_HANDLE(dev)) {
-               chan = acpi_dma_request_slave_chan_by_name(dev, name);
-               if (chan)
-                       return chan;
-       }
+       if (ACPI_HANDLE(dev))
+               return acpi_dma_request_slave_chan_by_name(dev, name);
 
        return ERR_PTR(-ENODEV);
 }
index 05b6dea770a407fc94e614b82bdbdf0ef0212593..e27cec25c59e4776de0d74681b696e7b0d4203c6 100644 (file)
@@ -340,7 +340,7 @@ static unsigned int min_odd(unsigned int x, unsigned int y)
 static void result(const char *err, unsigned int n, unsigned int src_off,
                   unsigned int dst_off, unsigned int len, unsigned long data)
 {
-       pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+       pr_info("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
                current->comm, n, err, src_off, dst_off, len, data);
 }
 
@@ -348,7 +348,7 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off,
                       unsigned int dst_off, unsigned int len,
                       unsigned long data)
 {
-       pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)",
+       pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n",
                   current->comm, n, err, src_off, dst_off, len, data);
 }
 
index 13ac3f240e7963127c713f44f296bd421edf8999..cfdbb92aae1dece5512f023afe341db2b086d171 100644 (file)
@@ -33,8 +33,8 @@
  * of which use ARM any more).  See the "Databook" from Synopsys for
  * information beyond what licensees probably provide.
  *
- * The driver has currently been tested only with the Atmel AT32AP7000,
- * which does not support descriptor writeback.
+ * The driver has been tested with the Atmel AT32AP7000, which does not
+ * support descriptor writeback.
  */
 
 static inline bool is_request_line_unset(struct dw_dma_chan *dwc)
@@ -1479,7 +1479,6 @@ static void dw_dma_off(struct dw_dma *dw)
 int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
 {
        struct dw_dma           *dw;
-       size_t                  size;
        bool                    autocfg;
        unsigned int            dw_params;
        unsigned int            nr_channels;
@@ -1487,6 +1486,13 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        int                     err;
        int                     i;
 
+       dw = devm_kzalloc(chip->dev, sizeof(*dw), GFP_KERNEL);
+       if (!dw)
+               return -ENOMEM;
+
+       dw->regs = chip->regs;
+       chip->dw = dw;
+
        dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
        autocfg = dw_params >> DW_PARAMS_EN & 0x1;
 
@@ -1509,9 +1515,9 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
        else
                nr_channels = pdata->nr_channels;
 
-       size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan);
-       dw = devm_kzalloc(chip->dev, size, GFP_KERNEL);
-       if (!dw)
+       dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan),
+                               GFP_KERNEL);
+       if (!dw->chan)
                return -ENOMEM;
 
        dw->clk = devm_clk_get(chip->dev, "hclk");
@@ -1519,9 +1525,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
                return PTR_ERR(dw->clk);
        clk_prepare_enable(dw->clk);
 
-       dw->regs = chip->regs;
-       chip->dw = dw;
-
        /* Get hardware configuration parameters */
        if (autocfg) {
                max_blk_size = dma_readl(dw, MAX_BLK_SIZE);
index e89fc24b829307b88b5ce022296fb47328127c44..fec59f1a77bb23e555f87f1ec315a3b728abea74 100644 (file)
@@ -75,6 +75,36 @@ static void dw_pci_remove(struct pci_dev *pdev)
                dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
 }
 
+#ifdef CONFIG_PM_SLEEP
+
+static int dw_pci_suspend_late(struct device *dev)
+{
+       struct pci_dev *pci = to_pci_dev(dev);
+       struct dw_dma_chip *chip = pci_get_drvdata(pci);
+
+       return dw_dma_suspend(chip);
+};
+
+static int dw_pci_resume_early(struct device *dev)
+{
+       struct pci_dev *pci = to_pci_dev(dev);
+       struct dw_dma_chip *chip = pci_get_drvdata(pci);
+
+       return dw_dma_resume(chip);
+};
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define dw_pci_suspend_late    NULL
+#define dw_pci_resume_early    NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops dw_pci_dev_pm_ops = {
+       .suspend_late = dw_pci_suspend_late,
+       .resume_early = dw_pci_resume_early,
+};
+
 static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
        /* Medfield */
        { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata },
@@ -83,6 +113,9 @@ static DEFINE_PCI_DEVICE_TABLE(dw_pci_id_table) = {
        /* BayTrail */
        { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata },
        { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata },
+
+       /* Haswell */
+       { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata },
        { }
 };
 MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
@@ -92,6 +125,9 @@ static struct pci_driver dw_pci_driver = {
        .id_table       = dw_pci_id_table,
        .probe          = dw_pci_probe,
        .remove         = dw_pci_remove,
+       .driver = {
+               .pm     = &dw_pci_dev_pm_ops,
+       },
 };
 
 module_pci_driver(dw_pci_driver);
index deb4274f80f41b9e1a5ed9ba6db9e21bc37d0687..bb98d3e91e8b2a6a656ba58bc31739c620e4c75a 100644 (file)
@@ -252,13 +252,13 @@ struct dw_dma {
        struct tasklet_struct   tasklet;
        struct clk              *clk;
 
+       /* channels */
+       struct dw_dma_chan      *chan;
        u8                      all_chan_mask;
 
        /* hardware configuration */
        unsigned char           nr_masters;
        unsigned char           data_width[4];
-
-       struct dw_dma_chan      chan[0];
 };
 
 static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
index cd8da451d1995fef8b6d076005b17ad1a17b44d1..cd04eb7b182e338994f03f8f1d1f84c28d49fc40 100644 (file)
@@ -539,6 +539,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
                                edma_alloc_slot(EDMA_CTLR(echan->ch_num),
                                                EDMA_SLOT_ANY);
                        if (echan->slot[i] < 0) {
+                               kfree(edesc);
                                dev_err(dev, "Failed to allocate slot\n");
                                return NULL;
                        }
@@ -553,8 +554,10 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
                ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
                                       dst_addr, burst, dev_width, period_len,
                                       direction);
-               if (ret < 0)
+               if (ret < 0) {
+                       kfree(edesc);
                        return NULL;
+               }
 
                if (direction == DMA_DEV_TO_MEM)
                        dst_addr += period_len;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
new file mode 100644 (file)
index 0000000..381e793
--- /dev/null
@@ -0,0 +1,975 @@
+/*
+ * drivers/dma/fsl-edma.c
+ *
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ *
+ * Driver for the Freescale eDMA engine with flexible channel multiplexing
+ * capability for DMA request sources. The eDMA block can be found on some
+ * Vybrid and Layerscape SoCs.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+
+#include "virt-dma.h"
+
+#define EDMA_CR                        0x00
+#define EDMA_ES                        0x04
+#define EDMA_ERQ               0x0C
+#define EDMA_EEI               0x14
+#define EDMA_SERQ              0x1B
+#define EDMA_CERQ              0x1A
+#define EDMA_SEEI              0x19
+#define EDMA_CEEI              0x18
+#define EDMA_CINT              0x1F
+#define EDMA_CERR              0x1E
+#define EDMA_SSRT              0x1D
+#define EDMA_CDNE              0x1C
+#define EDMA_INTR              0x24
+#define EDMA_ERR               0x2C
+
+#define EDMA_TCD_SADDR(x)      (0x1000 + 32 * (x))
+#define EDMA_TCD_SOFF(x)       (0x1004 + 32 * (x))
+#define EDMA_TCD_ATTR(x)       (0x1006 + 32 * (x))
+#define EDMA_TCD_NBYTES(x)     (0x1008 + 32 * (x))
+#define EDMA_TCD_SLAST(x)      (0x100C + 32 * (x))
+#define EDMA_TCD_DADDR(x)      (0x1010 + 32 * (x))
+#define EDMA_TCD_DOFF(x)       (0x1014 + 32 * (x))
+#define EDMA_TCD_CITER_ELINK(x)        (0x1016 + 32 * (x))
+#define EDMA_TCD_CITER(x)      (0x1016 + 32 * (x))
+#define EDMA_TCD_DLAST_SGA(x)  (0x1018 + 32 * (x))
+#define EDMA_TCD_CSR(x)                (0x101C + 32 * (x))
+#define EDMA_TCD_BITER_ELINK(x)        (0x101E + 32 * (x))
+#define EDMA_TCD_BITER(x)      (0x101E + 32 * (x))
+
+#define EDMA_CR_EDBG           BIT(1)
+#define EDMA_CR_ERCA           BIT(2)
+#define EDMA_CR_ERGA           BIT(3)
+#define EDMA_CR_HOE            BIT(4)
+#define EDMA_CR_HALT           BIT(5)
+#define EDMA_CR_CLM            BIT(6)
+#define EDMA_CR_EMLM           BIT(7)
+#define EDMA_CR_ECX            BIT(16)
+#define EDMA_CR_CX             BIT(17)
+
+#define EDMA_SEEI_SEEI(x)      ((x) & 0x1F)
+#define EDMA_CEEI_CEEI(x)      ((x) & 0x1F)
+#define EDMA_CINT_CINT(x)      ((x) & 0x1F)
+#define EDMA_CERR_CERR(x)      ((x) & 0x1F)
+
+#define EDMA_TCD_ATTR_DSIZE(x)         (((x) & 0x0007))
+#define EDMA_TCD_ATTR_DMOD(x)          (((x) & 0x001F) << 3)
+#define EDMA_TCD_ATTR_SSIZE(x)         (((x) & 0x0007) << 8)
+#define EDMA_TCD_ATTR_SMOD(x)          (((x) & 0x001F) << 11)
+#define EDMA_TCD_ATTR_SSIZE_8BIT       (0x0000)
+#define EDMA_TCD_ATTR_SSIZE_16BIT      (0x0100)
+#define EDMA_TCD_ATTR_SSIZE_32BIT      (0x0200)
+#define EDMA_TCD_ATTR_SSIZE_64BIT      (0x0300)
+#define EDMA_TCD_ATTR_SSIZE_32BYTE     (0x0500)
+#define EDMA_TCD_ATTR_DSIZE_8BIT       (0x0000)
+#define EDMA_TCD_ATTR_DSIZE_16BIT      (0x0001)
+#define EDMA_TCD_ATTR_DSIZE_32BIT      (0x0002)
+#define EDMA_TCD_ATTR_DSIZE_64BIT      (0x0003)
+#define EDMA_TCD_ATTR_DSIZE_32BYTE     (0x0005)
+
+#define EDMA_TCD_SOFF_SOFF(x)          (x)
+#define EDMA_TCD_NBYTES_NBYTES(x)      (x)
+#define EDMA_TCD_SLAST_SLAST(x)                (x)
+#define EDMA_TCD_DADDR_DADDR(x)                (x)
+#define EDMA_TCD_CITER_CITER(x)                ((x) & 0x7FFF)
+#define EDMA_TCD_DOFF_DOFF(x)          (x)
+#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x)        (x)
+#define EDMA_TCD_BITER_BITER(x)                ((x) & 0x7FFF)
+
+#define EDMA_TCD_CSR_START             BIT(0)
+#define EDMA_TCD_CSR_INT_MAJOR         BIT(1)
+#define EDMA_TCD_CSR_INT_HALF          BIT(2)
+#define EDMA_TCD_CSR_D_REQ             BIT(3)
+#define EDMA_TCD_CSR_E_SG              BIT(4)
+#define EDMA_TCD_CSR_E_LINK            BIT(5)
+#define EDMA_TCD_CSR_ACTIVE            BIT(6)
+#define EDMA_TCD_CSR_DONE              BIT(7)
+
+#define EDMAMUX_CHCFG_DIS              0x0
+#define EDMAMUX_CHCFG_ENBL             0x80
+#define EDMAMUX_CHCFG_SOURCE(n)                ((n) & 0x3F)
+
+#define DMAMUX_NR      2
+
+#define FSL_EDMA_BUSWIDTHS     BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                               BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                               BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+                               BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
+
+struct fsl_edma_hw_tcd {
+       u32     saddr;
+       u16     soff;
+       u16     attr;
+       u32     nbytes;
+       u32     slast;
+       u32     daddr;
+       u16     doff;
+       u16     citer;
+       u32     dlast_sga;
+       u16     csr;
+       u16     biter;
+};
+
+struct fsl_edma_sw_tcd {
+       dma_addr_t                      ptcd;
+       struct fsl_edma_hw_tcd          *vtcd;
+};
+
+struct fsl_edma_slave_config {
+       enum dma_transfer_direction     dir;
+       enum dma_slave_buswidth         addr_width;
+       u32                             dev_addr;
+       u32                             burst;
+       u32                             attr;
+};
+
+struct fsl_edma_chan {
+       struct virt_dma_chan            vchan;
+       enum dma_status                 status;
+       struct fsl_edma_engine          *edma;
+       struct fsl_edma_desc            *edesc;
+       struct fsl_edma_slave_config    fsc;
+       struct dma_pool                 *tcd_pool;
+};
+
+struct fsl_edma_desc {
+       struct virt_dma_desc            vdesc;
+       struct fsl_edma_chan            *echan;
+       bool                            iscyclic;
+       unsigned int                    n_tcds;
+       struct fsl_edma_sw_tcd          tcd[];
+};
+
+struct fsl_edma_engine {
+       struct dma_device       dma_dev;
+       void __iomem            *membase;
+       void __iomem            *muxbase[DMAMUX_NR];
+       struct clk              *muxclk[DMAMUX_NR];
+       struct mutex            fsl_edma_mutex;
+       u32                     n_chans;
+       int                     txirq;
+       int                     errirq;
+       bool                    big_endian;
+       struct fsl_edma_chan    chans[];
+};
+
+/*
+ * R/W functions for big- or little-endian registers
+ * the eDMA controller's endian is independent of the CPU core's endian.
+ */
+
+static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+       if (edma->big_endian)
+               return ioread16be(addr);
+       else
+               return ioread16(addr);
+}
+
+static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+       if (edma->big_endian)
+               return ioread32be(addr);
+       else
+               return ioread32(addr);
+}
+
+static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
+{
+       iowrite8(val, addr);
+}
+
+static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
+{
+       if (edma->big_endian)
+               iowrite16be(val, addr);
+       else
+               iowrite16(val, addr);
+}
+
+static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
+{
+       if (edma->big_endian)
+               iowrite32be(val, addr);
+       else
+               iowrite32(val, addr);
+}
+
+static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
+{
+       return container_of(chan, struct fsl_edma_chan, vchan.chan);
+}
+
+static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
+{
+       return container_of(vd, struct fsl_edma_desc, vdesc);
+}
+
+static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+       void __iomem *addr = fsl_chan->edma->membase;
+       u32 ch = fsl_chan->vchan.chan.chan_id;
+
+       edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
+       edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
+}
+
+static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+       void __iomem *addr = fsl_chan->edma->membase;
+       u32 ch = fsl_chan->vchan.chan.chan_id;
+
+       edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
+       edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
+}
+
+static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
+                       unsigned int slot, bool enable)
+{
+       u32 ch = fsl_chan->vchan.chan.chan_id;
+       void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR];
+       unsigned chans_per_mux, ch_off;
+
+       chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
+       ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+
+       if (enable)
+               edma_writeb(fsl_chan->edma,
+                               EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
+                               muxaddr + ch_off);
+       else
+               edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
+}
+
+static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
+{
+       switch (addr_width) {
+       case 1:
+               return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
+       case 2:
+               return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
+       case 4:
+               return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+       case 8:
+               return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
+       default:
+               return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+       }
+}
+
+static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
+{
+       struct fsl_edma_desc *fsl_desc;
+       int i;
+
+       fsl_desc = to_fsl_edma_desc(vdesc);
+       for (i = 0; i < fsl_desc->n_tcds; i++)
+                       dma_pool_free(fsl_desc->echan->tcd_pool,
+                                       fsl_desc->tcd[i].vtcd,
+                                       fsl_desc->tcd[i].ptcd);
+       kfree(fsl_desc);
+}
+
+static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+               unsigned long arg)
+{
+       struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+       struct dma_slave_config *cfg = (void *)arg;
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       switch (cmd) {
+       case DMA_TERMINATE_ALL:
+               spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+               fsl_edma_disable_request(fsl_chan);
+               fsl_chan->edesc = NULL;
+               vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+               spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+               vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+               return 0;
+
+       case DMA_SLAVE_CONFIG:
+               fsl_chan->fsc.dir = cfg->direction;
+               if (cfg->direction == DMA_DEV_TO_MEM) {
+                       fsl_chan->fsc.dev_addr = cfg->src_addr;
+                       fsl_chan->fsc.addr_width = cfg->src_addr_width;
+                       fsl_chan->fsc.burst = cfg->src_maxburst;
+                       fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
+               } else if (cfg->direction == DMA_MEM_TO_DEV) {
+                       fsl_chan->fsc.dev_addr = cfg->dst_addr;
+                       fsl_chan->fsc.addr_width = cfg->dst_addr_width;
+                       fsl_chan->fsc.burst = cfg->dst_maxburst;
+                       fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
+               } else {
+                       return -EINVAL;
+               }
+               return 0;
+
+       case DMA_PAUSE:
+               spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+               if (fsl_chan->edesc) {
+                       fsl_edma_disable_request(fsl_chan);
+                       fsl_chan->status = DMA_PAUSED;
+               }
+               spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+               return 0;
+
+       case DMA_RESUME:
+               spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+               if (fsl_chan->edesc) {
+                       fsl_edma_enable_request(fsl_chan);
+                       fsl_chan->status = DMA_IN_PROGRESS;
+               }
+               spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+               return 0;
+
+       default:
+               return -ENXIO;
+       }
+}
+
+static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
+               struct virt_dma_desc *vdesc, bool in_progress)
+{
+       struct fsl_edma_desc *edesc = fsl_chan->edesc;
+       void __iomem *addr = fsl_chan->edma->membase;
+       u32 ch = fsl_chan->vchan.chan.chan_id;
+       enum dma_transfer_direction dir = fsl_chan->fsc.dir;
+       dma_addr_t cur_addr, dma_addr;
+       size_t len, size;
+       int i;
+
+       /* calculate the total size in this desc */
+       for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
+               len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
+                       * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+
+       if (!in_progress)
+               return len;
+
+       if (dir == DMA_MEM_TO_DEV)
+               cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
+       else
+               cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
+
+       /* figure out the finished and calculate the residue */
+       for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+               size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes))
+                       * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter));
+               if (dir == DMA_MEM_TO_DEV)
+                       dma_addr = edma_readl(fsl_chan->edma,
+                                       &(edesc->tcd[i].vtcd->saddr));
+               else
+                       dma_addr = edma_readl(fsl_chan->edma,
+                                       &(edesc->tcd[i].vtcd->daddr));
+
+               len -= size;
+               if (cur_addr > dma_addr && cur_addr < dma_addr + size) {
+                       len += dma_addr + size - cur_addr;
+                       break;
+               }
+       }
+
+       return len;
+}
+
+static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
+               dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+       struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+       struct virt_dma_desc *vdesc;
+       enum dma_status status;
+       unsigned long flags;
+
+       status = dma_cookie_status(chan, cookie, txstate);
+       if (status == DMA_COMPLETE)
+               return status;
+
+       if (!txstate)
+               return fsl_chan->status;
+
+       spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+       vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
+       if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
+               txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
+       else if (vdesc)
+               txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
+       else
+               txstate->residue = 0;
+
+       spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+       return fsl_chan->status;
+}
+
+static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan,
+               u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes,
+               u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
+               u16 csr)
+{
+       void __iomem *addr = fsl_chan->edma->membase;
+       u32 ch = fsl_chan->vchan.chan.chan_id;
+
+       /*
+        * TCD parameters have been swapped in fill_tcd_params(),
+        * so just write them to registers in the cpu endian here
+        */
+       writew(0, addr + EDMA_TCD_CSR(ch));
+       writel(src, addr + EDMA_TCD_SADDR(ch));
+       writel(dst, addr + EDMA_TCD_DADDR(ch));
+       writew(attr, addr + EDMA_TCD_ATTR(ch));
+       writew(soff, addr + EDMA_TCD_SOFF(ch));
+       writel(nbytes, addr + EDMA_TCD_NBYTES(ch));
+       writel(slast, addr + EDMA_TCD_SLAST(ch));
+       writew(citer, addr + EDMA_TCD_CITER(ch));
+       writew(biter, addr + EDMA_TCD_BITER(ch));
+       writew(doff, addr + EDMA_TCD_DOFF(ch));
+       writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch));
+       writew(csr, addr + EDMA_TCD_CSR(ch));
+}
+
+static void fill_tcd_params(struct fsl_edma_engine *edma,
+               struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+               u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
+               u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+               bool disable_req, bool enable_sg)
+{
+       u16 csr = 0;
+
+       /*
+        * eDMA hardware SGs require the TCD parameters stored in memory
+        * the same endian as the eDMA module so that they can be loaded
+        * automatically by the engine
+        */
+       edma_writel(edma, src, &(tcd->saddr));
+       edma_writel(edma, dst, &(tcd->daddr));
+       edma_writew(edma, attr, &(tcd->attr));
+       edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff));
+       edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes));
+       edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast));
+       edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer));
+       edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff));
+       edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga));
+       edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter));
+       if (major_int)
+               csr |= EDMA_TCD_CSR_INT_MAJOR;
+
+       if (disable_req)
+               csr |= EDMA_TCD_CSR_D_REQ;
+
+       if (enable_sg)
+               csr |= EDMA_TCD_CSR_E_SG;
+
+       edma_writew(edma, csr, &(tcd->csr));
+}
+
+static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
+               int sg_len)
+{
+       struct fsl_edma_desc *fsl_desc;
+       int i;
+
+       fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
+                               GFP_NOWAIT);
+       if (!fsl_desc)
+               return NULL;
+
+       fsl_desc->echan = fsl_chan;
+       fsl_desc->n_tcds = sg_len;
+       for (i = 0; i < sg_len; i++) {
+               fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
+                                       GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
+               if (!fsl_desc->tcd[i].vtcd)
+                       goto err;
+       }
+       return fsl_desc;
+
+err:
+       while (--i >= 0)
+               dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
+                               fsl_desc->tcd[i].ptcd);
+       kfree(fsl_desc);
+       return NULL;
+}
+
+static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
+               struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+               size_t period_len, enum dma_transfer_direction direction,
+               unsigned long flags, void *context)
+{
+       struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+       struct fsl_edma_desc *fsl_desc;
+       dma_addr_t dma_buf_next;
+       int sg_len, i;
+       u32 src_addr, dst_addr, last_sg, nbytes;
+       u16 soff, doff, iter;
+
+       if (!is_slave_direction(fsl_chan->fsc.dir))
+               return NULL;
+
+       sg_len = buf_len / period_len;
+       fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+       if (!fsl_desc)
+               return NULL;
+       fsl_desc->iscyclic = true;
+
+       dma_buf_next = dma_addr;
+       nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
+       iter = period_len / nbytes;
+
+       for (i = 0; i < sg_len; i++) {
+               if (dma_buf_next >= dma_addr + buf_len)
+                       dma_buf_next = dma_addr;
+
+               /* get next sg's physical address */
+               last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+               if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+                       src_addr = dma_buf_next;
+                       dst_addr = fsl_chan->fsc.dev_addr;
+                       soff = fsl_chan->fsc.addr_width;
+                       doff = 0;
+               } else {
+                       src_addr = fsl_chan->fsc.dev_addr;
+                       dst_addr = dma_buf_next;
+                       soff = 0;
+                       doff = fsl_chan->fsc.addr_width;
+               }
+
+               fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr,
+                               dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0,
+                               iter, iter, doff, last_sg, true, false, true);
+               dma_buf_next += period_len;
+       }
+
+       return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
+               struct dma_chan *chan, struct scatterlist *sgl,
+               unsigned int sg_len, enum dma_transfer_direction direction,
+               unsigned long flags, void *context)
+{
+       struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+       struct fsl_edma_desc *fsl_desc;
+       struct scatterlist *sg;
+       u32 src_addr, dst_addr, last_sg, nbytes;
+       u16 soff, doff, iter;
+       int i;
+
+       if (!is_slave_direction(fsl_chan->fsc.dir))
+               return NULL;
+
+       fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+       if (!fsl_desc)
+               return NULL;
+       fsl_desc->iscyclic = false;
+
+       nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
+       for_each_sg(sgl, sg, sg_len, i) {
+               /* get next sg's physical address */
+               last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+               if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
+                       src_addr = sg_dma_address(sg);
+                       dst_addr = fsl_chan->fsc.dev_addr;
+                       soff = fsl_chan->fsc.addr_width;
+                       doff = 0;
+               } else {
+                       src_addr = fsl_chan->fsc.dev_addr;
+                       dst_addr = sg_dma_address(sg);
+                       soff = 0;
+                       doff = fsl_chan->fsc.addr_width;
+               }
+
+               iter = sg_dma_len(sg) / nbytes;
+               if (i < sg_len - 1) {
+                       last_sg = fsl_desc->tcd[(i + 1)].ptcd;
+                       fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
+                                       src_addr, dst_addr, fsl_chan->fsc.attr,
+                                       soff, nbytes, 0, iter, iter, doff, last_sg,
+                                       false, false, true);
+               } else {
+                       last_sg = 0;
+                       fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd,
+                                       src_addr, dst_addr, fsl_chan->fsc.attr,
+                                       soff, nbytes, 0, iter, iter, doff, last_sg,
+                                       true, true, false);
+               }
+       }
+
+       return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+
+static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
+{
+       struct fsl_edma_hw_tcd *tcd;
+       struct virt_dma_desc *vdesc;
+
+       vdesc = vchan_next_desc(&fsl_chan->vchan);
+       if (!vdesc)
+               return;
+       fsl_chan->edesc = to_fsl_edma_desc(vdesc);
+       tcd = fsl_chan->edesc->tcd[0].vtcd;
+       fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
+                       tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
+                       tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
+       fsl_edma_enable_request(fsl_chan);
+       fsl_chan->status = DMA_IN_PROGRESS;
+}
+
+static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
+{
+       struct fsl_edma_engine *fsl_edma = dev_id;
+       unsigned int intr, ch;
+       void __iomem *base_addr;
+       struct fsl_edma_chan *fsl_chan;
+
+       base_addr = fsl_edma->membase;
+
+       intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
+       if (!intr)
+               return IRQ_NONE;
+
+       for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+               if (intr & (0x1 << ch)) {
+                       edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
+                               base_addr + EDMA_CINT);
+
+                       fsl_chan = &fsl_edma->chans[ch];
+
+                       spin_lock(&fsl_chan->vchan.lock);
+                       if (!fsl_chan->edesc->iscyclic) {
+                               list_del(&fsl_chan->edesc->vdesc.node);
+                               vchan_cookie_complete(&fsl_chan->edesc->vdesc);
+                               fsl_chan->edesc = NULL;
+                               fsl_chan->status = DMA_COMPLETE;
+                       } else {
+                               vchan_cyclic_callback(&fsl_chan->edesc->vdesc);
+                       }
+
+                       if (!fsl_chan->edesc)
+                               fsl_edma_xfer_desc(fsl_chan);
+
+                       spin_unlock(&fsl_chan->vchan.lock);
+               }
+       }
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
+{
+       struct fsl_edma_engine *fsl_edma = dev_id;
+       unsigned int err, ch;
+
+       err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
+       if (!err)
+               return IRQ_NONE;
+
+       for (ch = 0; ch < fsl_edma->n_chans; ch++) {
+               if (err & (0x1 << ch)) {
+                       fsl_edma_disable_request(&fsl_edma->chans[ch]);
+                       edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
+                               fsl_edma->membase + EDMA_CERR);
+                       fsl_edma->chans[ch].status = DMA_ERROR;
+               }
+       }
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
+{
+       if (fsl_edma_tx_handler(irq, dev_id) == IRQ_HANDLED)
+               return IRQ_HANDLED;
+
+       return fsl_edma_err_handler(irq, dev_id);
+}
+
+static void fsl_edma_issue_pending(struct dma_chan *chan)
+{
+       struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+
+       if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
+               fsl_edma_xfer_desc(fsl_chan);
+
+       spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+}
+
+static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
+               struct of_dma *ofdma)
+{
+       struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
+       struct dma_chan *chan, *_chan;
+
+       if (dma_spec->args_count != 2)
+               return NULL;
+
+       mutex_lock(&fsl_edma->fsl_edma_mutex);
+       list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
+               if (chan->client_count)
+                       continue;
+               if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) {
+                       chan = dma_get_slave_channel(chan);
+                       if (chan) {
+                               chan->device->privatecnt++;
+                               fsl_edma_chan_mux(to_fsl_edma_chan(chan),
+                                       dma_spec->args[1], true);
+                               mutex_unlock(&fsl_edma->fsl_edma_mutex);
+                               return chan;
+                       }
+               }
+       }
+       mutex_unlock(&fsl_edma->fsl_edma_mutex);
+       return NULL;
+}
+
+static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+       fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
+                               sizeof(struct fsl_edma_hw_tcd),
+                               32, 0);
+       return 0;
+}
+
+static void fsl_edma_free_chan_resources(struct dma_chan *chan)
+{
+       struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+       unsigned long flags;
+       LIST_HEAD(head);
+
+       spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+       fsl_edma_disable_request(fsl_chan);
+       fsl_edma_chan_mux(fsl_chan, 0, false);
+       fsl_chan->edesc = NULL;
+       vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+       spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+       vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+       dma_pool_destroy(fsl_chan->tcd_pool);
+       fsl_chan->tcd_pool = NULL;
+}
+
+static int fsl_dma_device_slave_caps(struct dma_chan *dchan,
+               struct dma_slave_caps *caps)
+{
+       caps->src_addr_widths = FSL_EDMA_BUSWIDTHS;
+       caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS;
+       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       caps->cmd_pause = true;
+       caps->cmd_terminate = true;
+
+       return 0;
+}
+
+static int
+fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
+{
+       int ret;
+
+       fsl_edma->txirq = platform_get_irq_byname(pdev, "edma-tx");
+       if (fsl_edma->txirq < 0) {
+               dev_err(&pdev->dev, "Can't get edma-tx irq.\n");
+               return fsl_edma->txirq;
+       }
+
+       fsl_edma->errirq = platform_get_irq_byname(pdev, "edma-err");
+       if (fsl_edma->errirq < 0) {
+               dev_err(&pdev->dev, "Can't get edma-err irq.\n");
+               return fsl_edma->errirq;
+       }
+
+       if (fsl_edma->txirq == fsl_edma->errirq) {
+               ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+                               fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
+               if (ret) {
+                       dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
+                        return  ret;
+               }
+       } else {
+               ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
+                               fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
+               if (ret) {
+                       dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
+                       return  ret;
+               }
+
+               ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
+                               fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
+               if (ret) {
+                       dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
+                       return  ret;
+               }
+       }
+
+       return 0;
+}
+
+static int fsl_edma_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct fsl_edma_engine *fsl_edma;
+       struct fsl_edma_chan *fsl_chan;
+       struct resource *res;
+       int len, chans;
+       int ret, i;
+
+       ret = of_property_read_u32(np, "dma-channels", &chans);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't get dma-channels.\n");
+               return ret;
+       }
+
+       len = sizeof(*fsl_edma) + sizeof(*fsl_chan) * chans;
+       fsl_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+       if (!fsl_edma)
+               return -ENOMEM;
+
+       fsl_edma->n_chans = chans;
+       mutex_init(&fsl_edma->fsl_edma_mutex);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(fsl_edma->membase))
+               return PTR_ERR(fsl_edma->membase);
+
+       for (i = 0; i < DMAMUX_NR; i++) {
+               char clkname[32];
+
+               res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
+               fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(fsl_edma->muxbase[i]))
+                       return PTR_ERR(fsl_edma->muxbase[i]);
+
+               sprintf(clkname, "dmamux%d", i);
+               fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname);
+               if (IS_ERR(fsl_edma->muxclk[i])) {
+                       dev_err(&pdev->dev, "Missing DMAMUX block clock.\n");
+                       return PTR_ERR(fsl_edma->muxclk[i]);
+               }
+
+               ret = clk_prepare_enable(fsl_edma->muxclk[i]);
+               if (ret) {
+                       dev_err(&pdev->dev, "DMAMUX clk block failed.\n");
+                       return ret;
+               }
+
+       }
+
+       ret = fsl_edma_irq_init(pdev, fsl_edma);
+       if (ret)
+               return ret;
+
+       fsl_edma->big_endian = of_property_read_bool(np, "big-endian");
+
+       INIT_LIST_HEAD(&fsl_edma->dma_dev.channels);
+       for (i = 0; i < fsl_edma->n_chans; i++) {
+               struct fsl_edma_chan *fsl_chan = &fsl_edma->chans[i];
+
+               fsl_chan->edma = fsl_edma;
+
+               fsl_chan->vchan.desc_free = fsl_edma_free_desc;
+               vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
+
+               edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+               fsl_edma_chan_mux(fsl_chan, 0, false);
+       }
+
+       dma_cap_set(DMA_PRIVATE, fsl_edma->dma_dev.cap_mask);
+       dma_cap_set(DMA_SLAVE, fsl_edma->dma_dev.cap_mask);
+       dma_cap_set(DMA_CYCLIC, fsl_edma->dma_dev.cap_mask);
+
+       fsl_edma->dma_dev.dev = &pdev->dev;
+       fsl_edma->dma_dev.device_alloc_chan_resources
+               = fsl_edma_alloc_chan_resources;
+       fsl_edma->dma_dev.device_free_chan_resources
+               = fsl_edma_free_chan_resources;
+       fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
+       fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
+       fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic;
+       fsl_edma->dma_dev.device_control = fsl_edma_control;
+       fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
+       fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps;
+
+       platform_set_drvdata(pdev, fsl_edma);
+
+       ret = dma_async_device_register(&fsl_edma->dma_dev);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't register Freescale eDMA engine.\n");
+               return ret;
+       }
+
+       ret = of_dma_controller_register(np, fsl_edma_xlate, fsl_edma);
+       if (ret) {
+               dev_err(&pdev->dev, "Can't register Freescale eDMA of_dma.\n");
+               dma_async_device_unregister(&fsl_edma->dma_dev);
+               return ret;
+       }
+
+       /* enable round robin arbitration */
+       edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
+
+       return 0;
+}
+
+static int fsl_edma_remove(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct fsl_edma_engine *fsl_edma = platform_get_drvdata(pdev);
+       int i;
+
+       of_dma_controller_free(np);
+       dma_async_device_unregister(&fsl_edma->dma_dev);
+
+       for (i = 0; i < DMAMUX_NR; i++)
+               clk_disable_unprepare(fsl_edma->muxclk[i]);
+
+       return 0;
+}
+
+static const struct of_device_id fsl_edma_dt_ids[] = {
+       { .compatible = "fsl,vf610-edma", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
+
+static struct platform_driver fsl_edma_driver = {
+       .driver         = {
+               .name   = "fsl-edma",
+               .owner  = THIS_MODULE,
+               .of_match_table = fsl_edma_dt_ids,
+       },
+       .probe          = fsl_edma_probe,
+       .remove         = fsl_edma_remove,
+};
+
+module_platform_driver(fsl_edma_driver);
+
+MODULE_ALIAS("platform:fsl-edma");
+MODULE_DESCRIPTION("Freescale eDMA engine driver");
+MODULE_LICENSE("GPL v2");
index 6f9ac2022abd8b3d23c739bc7face1cf662a39f3..286660a12cc695b699b1e319968f89e148431dad 100644 (file)
@@ -422,12 +422,12 @@ static irqreturn_t imxdma_err_handler(int irq, void *dev_id)
                /* Tasklet error handler */
                tasklet_schedule(&imxdma->channel[i].dma_tasklet);
 
-               printk(KERN_WARNING
-                      "DMA timeout on channel %d -%s%s%s%s\n", i,
-                      errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
-                      errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
-                      errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
-                      errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
+               dev_warn(imxdma->dev,
+                        "DMA timeout on channel %d -%s%s%s%s\n", i,
+                        errcode & IMX_DMA_ERR_BURST ?    " burst" : "",
+                        errcode & IMX_DMA_ERR_REQUEST ?  " request" : "",
+                        errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
+                        errcode & IMX_DMA_ERR_BUFFER ?   " buffer" : "");
        }
        return IRQ_HANDLED;
 }
@@ -1236,6 +1236,7 @@ static int imxdma_remove(struct platform_device *pdev)
 static struct platform_driver imxdma_driver = {
        .driver         = {
                .name   = "imx-dma",
+               .owner  = THIS_MODULE,
                .of_match_table = imx_dma_of_dev_id,
        },
        .id_table       = imx_dma_devtype,
index b439679f4126e98dcd6484607971276b2baf512f..bf02e7beb51ad5c206cd65dd684a3a3ce83a0f77 100644 (file)
@@ -867,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
        phy->base = pdev->base;
 
        if (irq) {
-               ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0,
-                                      "pdma", phy);
+               ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
+                                      IRQF_SHARED, "pdma", phy);
                if (ret) {
                        dev_err(pdev->dev, "channel request irq fail!\n");
                        return ret;
@@ -957,8 +957,8 @@ static int mmp_pdma_probe(struct platform_device *op)
        if (irq_num != dma_channels) {
                /* all chan share one irq, demux inside */
                irq = platform_get_irq(op, 0);
-               ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0,
-                                      "pdma", pdev);
+               ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
+                                      IRQF_SHARED, "pdma", pdev);
                if (ret)
                        return ret;
        }
index 33f96aaa80c759aff2f8098e2135dd6f1b67b90a..724f7f4c9720dba720691911a3eb96d68fbe76a0 100644 (file)
@@ -22,6 +22,7 @@
 #include <mach/regs-icu.h>
 #include <linux/platform_data/dma-mmp_tdma.h>
 #include <linux/of_device.h>
+#include <linux/of_dma.h>
 
 #include "dmaengine.h"
 
@@ -541,6 +542,45 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
        return 0;
 }
 
+struct mmp_tdma_filter_param {
+       struct device_node *of_node;
+       unsigned int chan_id;
+};
+
+static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
+{
+       struct mmp_tdma_filter_param *param = fn_param;
+       struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+       struct dma_device *pdma_device = tdmac->chan.device;
+
+       if (pdma_device->dev->of_node != param->of_node)
+               return false;
+
+       if (chan->chan_id != param->chan_id)
+               return false;
+
+       return true;
+}
+
+struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
+                              struct of_dma *ofdma)
+{
+       struct mmp_tdma_device *tdev = ofdma->of_dma_data;
+       dma_cap_mask_t mask = tdev->device.cap_mask;
+       struct mmp_tdma_filter_param param;
+
+       if (dma_spec->args_count != 1)
+               return NULL;
+
+       param.of_node = ofdma->of_node;
+       param.chan_id = dma_spec->args[0];
+
+       if (param.chan_id >= TDMA_CHANNEL_NUM)
+               return NULL;
+
+       return dma_request_channel(mask, mmp_tdma_filter_fn, &param);
+}
+
 static struct of_device_id mmp_tdma_dt_ids[] = {
        { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA},
        { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU},
@@ -631,6 +671,16 @@ static int mmp_tdma_probe(struct platform_device *pdev)
                return ret;
        }
 
+       if (pdev->dev.of_node) {
+               ret = of_dma_controller_register(pdev->dev.of_node,
+                                                       mmp_tdma_xlate, tdev);
+               if (ret) {
+                       dev_err(tdev->device.dev,
+                               "failed to register controller\n");
+                       dma_async_device_unregister(&tdev->device);
+               }
+       }
+
        dev_info(tdev->device.dev, "initialized\n");
        return 0;
 }
index 64ceca2920b87402230159d8111021248d1d2b60..b19f04f4390bc1907c7e4a181e2bf53cc967bfa3 100644 (file)
@@ -1088,6 +1088,23 @@ static void omap_dma_free(struct omap_dmadev *od)
        }
 }
 
+#define OMAP_DMA_BUSWIDTHS     (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+                                BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+static int omap_dma_device_slave_caps(struct dma_chan *dchan,
+                                     struct dma_slave_caps *caps)
+{
+       caps->src_addr_widths = OMAP_DMA_BUSWIDTHS;
+       caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS;
+       caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+       caps->cmd_pause = true;
+       caps->cmd_terminate = true;
+       caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+
+       return 0;
+}
+
 static int omap_dma_probe(struct platform_device *pdev)
 {
        struct omap_dmadev *od;
@@ -1118,6 +1135,7 @@ static int omap_dma_probe(struct platform_device *pdev)
        od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
        od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
        od->ddev.device_control = omap_dma_control;
+       od->ddev.device_slave_caps = omap_dma_device_slave_caps;
        od->ddev.dev = &pdev->dev;
        INIT_LIST_HEAD(&od->ddev.channels);
        INIT_LIST_HEAD(&od->pending);
index 61fdc54a3c889d133058e046356d5b2f96bfeede..05fa548bd6595ac9fd63deb1200ddc0c595ef0d7 100644 (file)
@@ -964,16 +964,16 @@ static void pch_dma_remove(struct pci_dev *pdev)
        if (pd) {
                dma_async_device_unregister(&pd->dma);
 
+               free_irq(pdev->irq, pd);
+
                list_for_each_entry_safe(chan, _c, &pd->dma.channels,
                                         device_node) {
                        pd_chan = to_pd_chan(chan);
 
-                       tasklet_disable(&pd_chan->tasklet);
                        tasklet_kill(&pd_chan->tasklet);
                }
 
                pci_pool_destroy(pd->pool);
-               free_irq(pdev->irq, pd);
                pci_iounmap(pdev, pd->membase);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
new file mode 100644 (file)
index 0000000..82c9231
--- /dev/null
@@ -0,0 +1,1111 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * QCOM BAM DMA engine driver
+ *
+ * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
+ * peripherals on the MSM 8x74.  The configuration of the channels are dependent
+ * on the way they are hard wired to that specific peripheral.  The peripheral
+ * device tree entries specify the configuration of each channel.
+ *
+ * The DMA controller requires the use of external memory for storage of the
+ * hardware descriptors for each channel.  The descriptor FIFO is accessed as a
+ * circular buffer and operations are managed according to the offset within the
+ * FIFO.  After pipe/channel reset, all of the pipe registers and internal state
+ * are back to defaults.
+ *
+ * During DMA operations, we write descriptors to the FIFO, being careful to
+ * handle wrapping and then write the last FIFO offset to that channel's
+ * P_EVNT_REG register to kick off the transaction.  The P_SW_OFSTS register
+ * indicates the current FIFO offset that is being processed, so there is some
+ * indication of where the hardware is currently working.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+struct bam_desc_hw {
+       u32 addr;               /* Buffer physical address */
+       u16 size;               /* Buffer size in bytes */
+       u16 flags;
+};
+
+#define DESC_FLAG_INT BIT(15)
+#define DESC_FLAG_EOT BIT(14)
+#define DESC_FLAG_EOB BIT(13)
+
+struct bam_async_desc {
+       struct virt_dma_desc vd;
+
+       u32 num_desc;
+       u32 xfer_len;
+       struct bam_desc_hw *curr_desc;
+
+       enum dma_transfer_direction dir;
+       size_t length;
+       struct bam_desc_hw desc[0];
+};
+
+#define BAM_CTRL                       0x0000
+#define BAM_REVISION                   0x0004
+#define BAM_SW_REVISION                        0x0080
+#define BAM_NUM_PIPES                  0x003C
+#define BAM_TIMER                      0x0040
+#define BAM_TIMER_CTRL                 0x0044
+#define BAM_DESC_CNT_TRSHLD            0x0008
+#define BAM_IRQ_SRCS                   0x000C
+#define BAM_IRQ_SRCS_MSK               0x0010
+#define BAM_IRQ_SRCS_UNMASKED          0x0030
+#define BAM_IRQ_STTS                   0x0014
+#define BAM_IRQ_CLR                    0x0018
+#define BAM_IRQ_EN                     0x001C
+#define BAM_CNFG_BITS                  0x007C
+#define BAM_IRQ_SRCS_EE(ee)            (0x0800 + ((ee) * 0x80))
+#define BAM_IRQ_SRCS_MSK_EE(ee)                (0x0804 + ((ee) * 0x80))
+#define BAM_P_CTRL(pipe)               (0x1000 + ((pipe) * 0x1000))
+#define BAM_P_RST(pipe)                        (0x1004 + ((pipe) * 0x1000))
+#define BAM_P_HALT(pipe)               (0x1008 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_STTS(pipe)           (0x1010 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_CLR(pipe)            (0x1014 + ((pipe) * 0x1000))
+#define BAM_P_IRQ_EN(pipe)             (0x1018 + ((pipe) * 0x1000))
+#define BAM_P_EVNT_DEST_ADDR(pipe)     (0x182C + ((pipe) * 0x1000))
+#define BAM_P_EVNT_REG(pipe)           (0x1818 + ((pipe) * 0x1000))
+#define BAM_P_SW_OFSTS(pipe)           (0x1800 + ((pipe) * 0x1000))
+#define BAM_P_DATA_FIFO_ADDR(pipe)     (0x1824 + ((pipe) * 0x1000))
+#define BAM_P_DESC_FIFO_ADDR(pipe)     (0x181C + ((pipe) * 0x1000))
+#define BAM_P_EVNT_TRSHLD(pipe)                (0x1828 + ((pipe) * 0x1000))
+#define BAM_P_FIFO_SIZES(pipe)         (0x1820 + ((pipe) * 0x1000))
+
+/* BAM CTRL */
+#define BAM_SW_RST                     BIT(0)
+#define BAM_EN                         BIT(1)
+#define BAM_EN_ACCUM                   BIT(4)
+#define BAM_TESTBUS_SEL_SHIFT          5
+#define BAM_TESTBUS_SEL_MASK           0x3F
+#define BAM_DESC_CACHE_SEL_SHIFT       13
+#define BAM_DESC_CACHE_SEL_MASK                0x3
+#define BAM_CACHED_DESC_STORE          BIT(15)
+#define IBC_DISABLE                    BIT(16)
+
+/* BAM REVISION */
+#define REVISION_SHIFT         0
+#define REVISION_MASK          0xFF
+#define NUM_EES_SHIFT          8
+#define NUM_EES_MASK           0xF
+#define CE_BUFFER_SIZE         BIT(13)
+#define AXI_ACTIVE             BIT(14)
+#define USE_VMIDMT             BIT(15)
+#define SECURED                        BIT(16)
+#define BAM_HAS_NO_BYPASS      BIT(17)
+#define HIGH_FREQUENCY_BAM     BIT(18)
+#define INACTIV_TMRS_EXST      BIT(19)
+#define NUM_INACTIV_TMRS       BIT(20)
+#define DESC_CACHE_DEPTH_SHIFT 21
+#define DESC_CACHE_DEPTH_1     (0 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_2     (1 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_3     (2 << DESC_CACHE_DEPTH_SHIFT)
+#define DESC_CACHE_DEPTH_4     (3 << DESC_CACHE_DEPTH_SHIFT)
+#define CMD_DESC_EN            BIT(23)
+#define INACTIV_TMR_BASE_SHIFT 24
+#define INACTIV_TMR_BASE_MASK  0xFF
+
+/* BAM NUM PIPES */
+#define BAM_NUM_PIPES_SHIFT            0
+#define BAM_NUM_PIPES_MASK             0xFF
+#define PERIPH_NON_PIPE_GRP_SHIFT      16
+#define PERIPH_NON_PIP_GRP_MASK                0xFF
+#define BAM_NON_PIPE_GRP_SHIFT         24
+#define BAM_NON_PIPE_GRP_MASK          0xFF
+
+/* BAM CNFG BITS */
+#define BAM_PIPE_CNFG          BIT(2)
+#define BAM_FULL_PIPE          BIT(11)
+#define BAM_NO_EXT_P_RST       BIT(12)
+#define BAM_IBC_DISABLE                BIT(13)
+#define BAM_SB_CLK_REQ         BIT(14)
+#define BAM_PSM_CSW_REQ                BIT(15)
+#define BAM_PSM_P_RES          BIT(16)
+#define BAM_AU_P_RES           BIT(17)
+#define BAM_SI_P_RES           BIT(18)
+#define BAM_WB_P_RES           BIT(19)
+#define BAM_WB_BLK_CSW         BIT(20)
+#define BAM_WB_CSW_ACK_IDL     BIT(21)
+#define BAM_WB_RETR_SVPNT      BIT(22)
+#define BAM_WB_DSC_AVL_P_RST   BIT(23)
+#define BAM_REG_P_EN           BIT(24)
+#define BAM_PSM_P_HD_DATA      BIT(25)
+#define BAM_AU_ACCUMED         BIT(26)
+#define BAM_CMD_ENABLE         BIT(27)
+
+#define BAM_CNFG_BITS_DEFAULT  (BAM_PIPE_CNFG |        \
+                                BAM_NO_EXT_P_RST |     \
+                                BAM_IBC_DISABLE |      \
+                                BAM_SB_CLK_REQ |       \
+                                BAM_PSM_CSW_REQ |      \
+                                BAM_PSM_P_RES |        \
+                                BAM_AU_P_RES |         \
+                                BAM_SI_P_RES |         \
+                                BAM_WB_P_RES |         \
+                                BAM_WB_BLK_CSW |       \
+                                BAM_WB_CSW_ACK_IDL |   \
+                                BAM_WB_RETR_SVPNT |    \
+                                BAM_WB_DSC_AVL_P_RST | \
+                                BAM_REG_P_EN |         \
+                                BAM_PSM_P_HD_DATA |    \
+                                BAM_AU_ACCUMED |       \
+                                BAM_CMD_ENABLE)
+
+/* PIPE CTRL */
+#define P_EN                   BIT(1)
+#define P_DIRECTION            BIT(3)
+#define P_SYS_STRM             BIT(4)
+#define P_SYS_MODE             BIT(5)
+#define P_AUTO_EOB             BIT(6)
+#define P_AUTO_EOB_SEL_SHIFT   7
+#define P_AUTO_EOB_SEL_512     (0 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_256     (1 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_128     (2 << P_AUTO_EOB_SEL_SHIFT)
+#define P_AUTO_EOB_SEL_64      (3 << P_AUTO_EOB_SEL_SHIFT)
+#define P_PREFETCH_LIMIT_SHIFT 9
+#define P_PREFETCH_LIMIT_32    (0 << P_PREFETCH_LIMIT_SHIFT)
+#define P_PREFETCH_LIMIT_16    (1 << P_PREFETCH_LIMIT_SHIFT)
+#define P_PREFETCH_LIMIT_4     (2 << P_PREFETCH_LIMIT_SHIFT)
+#define P_WRITE_NWD            BIT(11)
+#define P_LOCK_GROUP_SHIFT     16
+#define P_LOCK_GROUP_MASK      0x1F
+
+/* BAM_DESC_CNT_TRSHLD */
+#define CNT_TRSHLD             0xffff
+#define DEFAULT_CNT_THRSHLD    0x4
+
+/* BAM_IRQ_SRCS */
+#define BAM_IRQ                        BIT(31)
+#define P_IRQ                  0x7fffffff
+
+/* BAM_IRQ_SRCS_MSK */
+#define BAM_IRQ_MSK            BAM_IRQ
+#define P_IRQ_MSK              P_IRQ
+
+/* BAM_IRQ_STTS */
+#define BAM_TIMER_IRQ          BIT(4)
+#define BAM_EMPTY_IRQ          BIT(3)
+#define BAM_ERROR_IRQ          BIT(2)
+#define BAM_HRESP_ERR_IRQ      BIT(1)
+
+/* BAM_IRQ_CLR */
+#define BAM_TIMER_CLR          BIT(4)
+#define BAM_EMPTY_CLR          BIT(3)
+#define BAM_ERROR_CLR          BIT(2)
+#define BAM_HRESP_ERR_CLR      BIT(1)
+
+/* BAM_IRQ_EN */
+#define BAM_TIMER_EN           BIT(4)
+#define BAM_EMPTY_EN           BIT(3)
+#define BAM_ERROR_EN           BIT(2)
+#define BAM_HRESP_ERR_EN       BIT(1)
+
+/* BAM_P_IRQ_EN */
+#define P_PRCSD_DESC_EN                BIT(0)
+#define P_TIMER_EN             BIT(1)
+#define P_WAKE_EN              BIT(2)
+#define P_OUT_OF_DESC_EN       BIT(3)
+#define P_ERR_EN               BIT(4)
+#define P_TRNSFR_END_EN                BIT(5)
+#define P_DEFAULT_IRQS_EN      (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
+
+/* BAM_P_SW_OFSTS */
+#define P_SW_OFSTS_MASK                0xffff
+
+#define BAM_DESC_FIFO_SIZE     SZ_32K
+#define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
+#define BAM_MAX_DATA_SIZE      (SZ_32K - 8)
+
+struct bam_chan {
+       struct virt_dma_chan vc;
+
+       struct bam_device *bdev;
+
+       /* configuration from device tree */
+       u32 id;
+
+       struct bam_async_desc *curr_txd;        /* current running dma */
+
+       /* runtime configuration */
+       struct dma_slave_config slave;
+
+       /* fifo storage */
+       struct bam_desc_hw *fifo_virt;
+       dma_addr_t fifo_phys;
+
+       /* fifo markers */
+       unsigned short head;            /* start of active descriptor entries */
+       unsigned short tail;            /* end of active descriptor entries */
+
+       unsigned int initialized;       /* is the channel hw initialized? */
+       unsigned int paused;            /* is the channel paused? */
+       unsigned int reconfigure;       /* new slave config? */
+
+       struct list_head node;
+};
+
+static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
+{
+       return container_of(common, struct bam_chan, vc.chan);
+}
+
+struct bam_device {
+       void __iomem *regs;
+       struct device *dev;
+       struct dma_device common;
+       struct device_dma_parameters dma_parms;
+       struct bam_chan *channels;
+       u32 num_channels;
+
+       /* execution environment ID, from DT */
+       u32 ee;
+
+       struct clk *bamclk;
+       int irq;
+
+       /* dma start transaction tasklet */
+       struct tasklet_struct task;
+};
+
+/**
+ * bam_reset_channel - Reset individual BAM DMA channel
+ * @bchan: bam channel
+ *
+ * This function resets a specific BAM channel
+ */
+static void bam_reset_channel(struct bam_chan *bchan)
+{
+       struct bam_device *bdev = bchan->bdev;
+
+       lockdep_assert_held(&bchan->vc.lock);
+
+       /* reset channel */
+       writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id));
+       writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id));
+
+       /* don't allow cpu to reorder BAM register accesses done after this */
+       wmb();
+
+       /* make sure hw is initialized when channel is used the first time  */
+       bchan->initialized = 0;
+}
+
+/**
+ * bam_chan_init_hw - Initialize channel hardware
+ * @bchan: bam channel
+ *
+ * This function resets and initializes the BAM channel
+ */
+static void bam_chan_init_hw(struct bam_chan *bchan,
+       enum dma_transfer_direction dir)
+{
+       struct bam_device *bdev = bchan->bdev;
+       u32 val;
+
+       /* Reset the channel to clear internal state of the FIFO */
+       bam_reset_channel(bchan);
+
+       /*
+        * write out 8 byte aligned address.  We have enough space for this
+        * because we allocated 1 more descriptor (8 bytes) than we can use
+        */
+       writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
+                       bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id));
+       writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs +
+                       BAM_P_FIFO_SIZES(bchan->id));
+
+       /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
+       writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+
+       /* unmask the specific pipe and EE combo */
+       val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+       val |= BIT(bchan->id);
+       writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+       /* don't allow cpu to reorder the channel enable done below */
+       wmb();
+
+       /* set fixed direction and mode, then enable channel */
+       val = P_EN | P_SYS_MODE;
+       if (dir == DMA_DEV_TO_MEM)
+               val |= P_DIRECTION;
+
+       writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id));
+
+       bchan->initialized = 1;
+
+       /* init FIFO pointers */
+       bchan->head = 0;
+       bchan->tail = 0;
+}
+
+/**
+ * bam_alloc_chan - Allocate channel resources for DMA channel.
+ * @chan: specified channel
+ *
+ * This function allocates the FIFO descriptor memory
+ */
+static int bam_alloc_chan(struct dma_chan *chan)
+{
+       struct bam_chan *bchan = to_bam_chan(chan);
+       struct bam_device *bdev = bchan->bdev;
+
+       if (bchan->fifo_virt)
+               return 0;
+
+       /* allocate FIFO descriptor space, but only if necessary */
+       bchan->fifo_virt = dma_alloc_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
+                               &bchan->fifo_phys, GFP_KERNEL);
+
+       if (!bchan->fifo_virt) {
+               dev_err(bdev->dev, "Failed to allocate desc fifo\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+/**
+ * bam_free_chan - Frees dma resources associated with specific channel
+ * @chan: specified channel
+ *
+ * Free the allocated fifo descriptor memory and channel resources
+ *
+ */
+static void bam_free_chan(struct dma_chan *chan)
+{
+       struct bam_chan *bchan = to_bam_chan(chan);
+       struct bam_device *bdev = bchan->bdev;
+       u32 val;
+       unsigned long flags;
+
+       vchan_free_chan_resources(to_virt_chan(chan));
+
+       if (bchan->curr_txd) {
+               dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
+               return;
+       }
+
+       spin_lock_irqsave(&bchan->vc.lock, flags);
+       bam_reset_channel(bchan);
+       spin_unlock_irqrestore(&bchan->vc.lock, flags);
+
+       dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
+                               bchan->fifo_phys);
+       bchan->fifo_virt = NULL;
+
+       /* mask irq for pipe/channel */
+       val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+       val &= ~BIT(bchan->id);
+       writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+       /* disable irq */
+       writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id));
+}
+
+/**
+ * bam_slave_config - set slave configuration for channel
+ * @chan: dma channel
+ * @cfg: slave configuration
+ *
+ * Sets slave configuration for channel
+ *
+ */
+static void bam_slave_config(struct bam_chan *bchan,
+               struct dma_slave_config *cfg)
+{
+       memcpy(&bchan->slave, cfg, sizeof(*cfg));
+       bchan->reconfigure = 1;
+}
+
+/**
+ * bam_prep_slave_sg - Prep slave sg transaction
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
+       struct scatterlist *sgl, unsigned int sg_len,
+       enum dma_transfer_direction direction, unsigned long flags,
+       void *context)
+{
+       struct bam_chan *bchan = to_bam_chan(chan);
+       struct bam_device *bdev = bchan->bdev;
+       struct bam_async_desc *async_desc;
+       struct scatterlist *sg;
+       u32 i;
+       struct bam_desc_hw *desc;
+       unsigned int num_alloc = 0;
+
+
+       if (!is_slave_direction(direction)) {
+               dev_err(bdev->dev, "invalid dma direction\n");
+               return NULL;
+       }
+
+       /* calculate number of required entries */
+       for_each_sg(sgl, sg, sg_len, i)
+               num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_MAX_DATA_SIZE);
+
+       /* allocate enough room to accomodate the number of entries */
+       async_desc = kzalloc(sizeof(*async_desc) +
+                       (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
+
+       if (!async_desc)
+               goto err_out;
+
+       async_desc->num_desc = num_alloc;
+       async_desc->curr_desc = async_desc->desc;
+       async_desc->dir = direction;
+
+       /* fill in temporary descriptors */
+       desc = async_desc->desc;
+       for_each_sg(sgl, sg, sg_len, i) {
+               unsigned int remainder = sg_dma_len(sg);
+               unsigned int curr_offset = 0;
+
+               do {
+                       desc->addr = sg_dma_address(sg) + curr_offset;
+
+                       if (remainder > BAM_MAX_DATA_SIZE) {
+                               desc->size = BAM_MAX_DATA_SIZE;
+                               remainder -= BAM_MAX_DATA_SIZE;
+                               curr_offset += BAM_MAX_DATA_SIZE;
+                       } else {
+                               desc->size = remainder;
+                               remainder = 0;
+                       }
+
+                       async_desc->length += desc->size;
+                       desc++;
+               } while (remainder > 0);
+       }
+
+       return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
+
+err_out:
+       kfree(async_desc);
+       return NULL;
+}
+
+/**
+ * bam_dma_terminate_all - terminate all transactions on a channel
+ * @bchan: bam dma channel
+ *
+ * Dequeues and frees all transactions
+ * No callbacks are done
+ *
+ */
+static void bam_dma_terminate_all(struct bam_chan *bchan)
+{
+       unsigned long flag;
+       LIST_HEAD(head);
+
+       /* remove all transactions, including active transaction */
+       spin_lock_irqsave(&bchan->vc.lock, flag);
+       if (bchan->curr_txd) {
+               list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
+               bchan->curr_txd = NULL;
+       }
+
+       vchan_get_all_descriptors(&bchan->vc, &head);
+       spin_unlock_irqrestore(&bchan->vc.lock, flag);
+
+       vchan_dma_desc_free_list(&bchan->vc, &head);
+}
+
+/**
+ * bam_control - DMA device control
+ * @chan: dma channel
+ * @cmd: control cmd
+ * @arg: cmd argument
+ *
+ * Perform DMA control command
+ *
+ */
+static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+       unsigned long arg)
+{
+       struct bam_chan *bchan = to_bam_chan(chan);
+       struct bam_device *bdev = bchan->bdev;
+       int ret = 0;
+       unsigned long flag;
+
+       switch (cmd) {
+       case DMA_PAUSE:
+               spin_lock_irqsave(&bchan->vc.lock, flag);
+               writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id));
+               bchan->paused = 1;
+               spin_unlock_irqrestore(&bchan->vc.lock, flag);
+               break;
+
+       case DMA_RESUME:
+               spin_lock_irqsave(&bchan->vc.lock, flag);
+               writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id));
+               bchan->paused = 0;
+               spin_unlock_irqrestore(&bchan->vc.lock, flag);
+               break;
+
+       case DMA_TERMINATE_ALL:
+               bam_dma_terminate_all(bchan);
+               break;
+
+       case DMA_SLAVE_CONFIG:
+               spin_lock_irqsave(&bchan->vc.lock, flag);
+               bam_slave_config(bchan, (struct dma_slave_config *)arg);
+               spin_unlock_irqrestore(&bchan->vc.lock, flag);
+               break;
+
+       default:
+               ret = -ENXIO;
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * process_channel_irqs - processes the channel interrupts
+ * @bdev: bam controller
+ *
+ * This function processes the channel interrupts
+ *
+ */
+static u32 process_channel_irqs(struct bam_device *bdev)
+{
+       u32 i, srcs, pipe_stts;
+       unsigned long flags;
+       struct bam_async_desc *async_desc;
+
+       srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee));
+
+       /* return early if no pipe/channel interrupts are present */
+       if (!(srcs & P_IRQ))
+               return srcs;
+
+       for (i = 0; i < bdev->num_channels; i++) {
+               struct bam_chan *bchan = &bdev->channels[i];
+
+               if (!(srcs & BIT(i)))
+                       continue;
+
+               /* clear pipe irq */
+               pipe_stts = readl_relaxed(bdev->regs +
+                       BAM_P_IRQ_STTS(i));
+
+               writel_relaxed(pipe_stts, bdev->regs +
+                               BAM_P_IRQ_CLR(i));
+
+               spin_lock_irqsave(&bchan->vc.lock, flags);
+               async_desc = bchan->curr_txd;
+
+               if (async_desc) {
+                       async_desc->num_desc -= async_desc->xfer_len;
+                       async_desc->curr_desc += async_desc->xfer_len;
+                       bchan->curr_txd = NULL;
+
+                       /* manage FIFO */
+                       bchan->head += async_desc->xfer_len;
+                       bchan->head %= MAX_DESCRIPTORS;
+
+                       /*
+                        * if complete, process cookie.  Otherwise
+                        * push back to front of desc_issued so that
+                        * it gets restarted by the tasklet
+                        */
+                       if (!async_desc->num_desc)
+                               vchan_cookie_complete(&async_desc->vd);
+                       else
+                               list_add(&async_desc->vd.node,
+                                       &bchan->vc.desc_issued);
+               }
+
+               spin_unlock_irqrestore(&bchan->vc.lock, flags);
+       }
+
+       return srcs;
+}
+
+/**
+ * bam_dma_irq - irq handler for bam controller
+ * @irq: IRQ of interrupt
+ * @data: callback data
+ *
+ * IRQ handler for the bam controller
+ */
+static irqreturn_t bam_dma_irq(int irq, void *data)
+{
+       struct bam_device *bdev = data;
+       u32 clr_mask = 0, srcs = 0;
+
+       srcs |= process_channel_irqs(bdev);
+
+       /* kick off tasklet to start next dma transfer */
+       if (srcs & P_IRQ)
+               tasklet_schedule(&bdev->task);
+
+       if (srcs & BAM_IRQ)
+               clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS);
+
+       /* don't allow reorder of the various accesses to the BAM registers */
+       mb();
+
+       writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR);
+
+       return IRQ_HANDLED;
+}
+
+/**
+ * bam_tx_status - returns status of transaction
+ * @chan: dma channel
+ * @cookie: transaction cookie
+ * @txstate: DMA transaction state
+ *
+ * Return status of dma transaction
+ */
+static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+               struct dma_tx_state *txstate)
+{
+       struct bam_chan *bchan = to_bam_chan(chan);
+       struct virt_dma_desc *vd;
+       int ret;
+       size_t residue = 0;
+       unsigned int i;
+       unsigned long flags;
+
+       ret = dma_cookie_status(chan, cookie, txstate);
+       if (ret == DMA_COMPLETE)
+               return ret;
+
+       if (!txstate)
+               return bchan->paused ? DMA_PAUSED : ret;
+
+       spin_lock_irqsave(&bchan->vc.lock, flags);
+       vd = vchan_find_desc(&bchan->vc, cookie);
+       if (vd)
+               residue = container_of(vd, struct bam_async_desc, vd)->length;
+       else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
+               for (i = 0; i < bchan->curr_txd->num_desc; i++)
+                       residue += bchan->curr_txd->curr_desc[i].size;
+
+       spin_unlock_irqrestore(&bchan->vc.lock, flags);
+
+       dma_set_residue(txstate, residue);
+
+       if (ret == DMA_IN_PROGRESS && bchan->paused)
+               ret = DMA_PAUSED;
+
+       return ret;
+}
+
+/**
+ * bam_apply_new_config
+ * @bchan: bam dma channel
+ * @dir: DMA direction
+ */
+static void bam_apply_new_config(struct bam_chan *bchan,
+       enum dma_transfer_direction dir)
+{
+       struct bam_device *bdev = bchan->bdev;
+       u32 maxburst;
+
+       if (dir == DMA_DEV_TO_MEM)
+               maxburst = bchan->slave.src_maxburst;
+       else
+               maxburst = bchan->slave.dst_maxburst;
+
+       writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD);
+
+       bchan->reconfigure = 0;
+}
+
+/**
+ * bam_start_dma - start next transaction
+ * @bchan - bam dma channel
+ */
+static void bam_start_dma(struct bam_chan *bchan)
+{
+       struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
+       struct bam_device *bdev = bchan->bdev;
+       struct bam_async_desc *async_desc;
+       struct bam_desc_hw *desc;
+       struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
+                                       sizeof(struct bam_desc_hw));
+
+       lockdep_assert_held(&bchan->vc.lock);
+
+       if (!vd)
+               return;
+
+       list_del(&vd->node);
+
+       async_desc = container_of(vd, struct bam_async_desc, vd);
+       bchan->curr_txd = async_desc;
+
+       /* on first use, initialize the channel hardware */
+       if (!bchan->initialized)
+               bam_chan_init_hw(bchan, async_desc->dir);
+
+       /* apply new slave config changes, if necessary */
+       if (bchan->reconfigure)
+               bam_apply_new_config(bchan, async_desc->dir);
+
+       desc = bchan->curr_txd->curr_desc;
+
+       if (async_desc->num_desc > MAX_DESCRIPTORS)
+               async_desc->xfer_len = MAX_DESCRIPTORS;
+       else
+               async_desc->xfer_len = async_desc->num_desc;
+
+       /* set INT on last descriptor */
+       desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT;
+
+       if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
+               u32 partial = MAX_DESCRIPTORS - bchan->tail;
+
+               memcpy(&fifo[bchan->tail], desc,
+                               partial * sizeof(struct bam_desc_hw));
+               memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
+                               sizeof(struct bam_desc_hw));
+       } else {
+               memcpy(&fifo[bchan->tail], desc,
+                       async_desc->xfer_len * sizeof(struct bam_desc_hw));
+       }
+
+       bchan->tail += async_desc->xfer_len;
+       bchan->tail %= MAX_DESCRIPTORS;
+
+       /* ensure descriptor writes and dma start not reordered */
+       wmb();
+       writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
+                       bdev->regs + BAM_P_EVNT_REG(bchan->id));
+}
+
+/**
+ * dma_tasklet - DMA IRQ tasklet
+ * @data: tasklet argument (bam controller structure)
+ *
+ * Sets up next DMA operation and then processes all completed transactions
+ */
+static void dma_tasklet(unsigned long data)
+{
+       struct bam_device *bdev = (struct bam_device *)data;
+       struct bam_chan *bchan;
+       unsigned long flags;
+       unsigned int i;
+
+       /* go through the channels and kick off transactions */
+       for (i = 0; i < bdev->num_channels; i++) {
+               bchan = &bdev->channels[i];
+               spin_lock_irqsave(&bchan->vc.lock, flags);
+
+               if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
+                       bam_start_dma(bchan);
+               spin_unlock_irqrestore(&bchan->vc.lock, flags);
+       }
+}
+
+/**
+ * bam_issue_pending - starts pending transactions
+ * @chan: dma channel
+ *
+ * Calls tasklet directly which in turn starts any pending transactions
+ */
+static void bam_issue_pending(struct dma_chan *chan)
+{
+       struct bam_chan *bchan = to_bam_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&bchan->vc.lock, flags);
+
+       /* if work pending and idle, start a transaction */
+       if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
+               bam_start_dma(bchan);
+
+       spin_unlock_irqrestore(&bchan->vc.lock, flags);
+}
+
+/**
+ * bam_dma_free_desc - free descriptor memory
+ * @vd: virtual descriptor
+ *
+ */
+static void bam_dma_free_desc(struct virt_dma_desc *vd)
+{
+       struct bam_async_desc *async_desc = container_of(vd,
+                       struct bam_async_desc, vd);
+
+       kfree(async_desc);
+}
+
+static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
+               struct of_dma *of)
+{
+       struct bam_device *bdev = container_of(of->of_dma_data,
+                                       struct bam_device, common);
+       unsigned int request;
+
+       if (dma_spec->args_count != 1)
+               return NULL;
+
+       request = dma_spec->args[0];
+       if (request >= bdev->num_channels)
+               return NULL;
+
+       return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
+}
+
+/**
+ * bam_init
+ * @bdev: bam device
+ *
+ * Initialization helper for global bam registers
+ */
+static int bam_init(struct bam_device *bdev)
+{
+       u32 val;
+
+       /* read revision and configuration information */
+       val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT;
+       val &= NUM_EES_MASK;
+
+       /* check that configured EE is within range */
+       if (bdev->ee >= val)
+               return -EINVAL;
+
+       val = readl_relaxed(bdev->regs + BAM_NUM_PIPES);
+       bdev->num_channels = val & BAM_NUM_PIPES_MASK;
+
+       /* s/w reset bam */
+       /* after reset all pipes are disabled and idle */
+       val = readl_relaxed(bdev->regs + BAM_CTRL);
+       val |= BAM_SW_RST;
+       writel_relaxed(val, bdev->regs + BAM_CTRL);
+       val &= ~BAM_SW_RST;
+       writel_relaxed(val, bdev->regs + BAM_CTRL);
+
+       /* make sure previous stores are visible before enabling BAM */
+       wmb();
+
+       /* enable bam */
+       val |= BAM_EN;
+       writel_relaxed(val, bdev->regs + BAM_CTRL);
+
+       /* set descriptor threshhold, start with 4 bytes */
+       writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD);
+
+       /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
+       writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS);
+
+       /* enable irqs for errors */
+       writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
+                               bdev->regs + BAM_IRQ_EN);
+
+       /* unmask global bam interrupt */
+       writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+       return 0;
+}
+
+static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
+       u32 index)
+{
+       bchan->id = index;
+       bchan->bdev = bdev;
+
+       vchan_init(&bchan->vc, &bdev->common);
+       bchan->vc.desc_free = bam_dma_free_desc;
+}
+
+static int bam_dma_probe(struct platform_device *pdev)
+{
+       struct bam_device *bdev;
+       struct resource *iores;
+       int ret, i;
+
+       bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
+       if (!bdev)
+               return -ENOMEM;
+
+       bdev->dev = &pdev->dev;
+
+       iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
+       if (IS_ERR(bdev->regs))
+               return PTR_ERR(bdev->regs);
+
+       bdev->irq = platform_get_irq(pdev, 0);
+       if (bdev->irq < 0)
+               return bdev->irq;
+
+       ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
+       if (ret) {
+               dev_err(bdev->dev, "Execution environment unspecified\n");
+               return ret;
+       }
+
+       bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
+       if (IS_ERR(bdev->bamclk))
+               return PTR_ERR(bdev->bamclk);
+
+       ret = clk_prepare_enable(bdev->bamclk);
+       if (ret) {
+               dev_err(bdev->dev, "failed to prepare/enable clock\n");
+               return ret;
+       }
+
+       ret = bam_init(bdev);
+       if (ret)
+               goto err_disable_clk;
+
+       tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
+
+       bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
+                               sizeof(*bdev->channels), GFP_KERNEL);
+
+       if (!bdev->channels) {
+               ret = -ENOMEM;
+               goto err_disable_clk;
+       }
+
+       /* allocate and initialize channels */
+       INIT_LIST_HEAD(&bdev->common.channels);
+
+       for (i = 0; i < bdev->num_channels; i++)
+               bam_channel_init(bdev, &bdev->channels[i], i);
+
+       ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
+                       IRQF_TRIGGER_HIGH, "bam_dma", bdev);
+       if (ret)
+               goto err_disable_clk;
+
+       /* set max dma segment size */
+       bdev->common.dev = bdev->dev;
+       bdev->common.dev->dma_parms = &bdev->dma_parms;
+       ret = dma_set_max_seg_size(bdev->common.dev, BAM_MAX_DATA_SIZE);
+       if (ret) {
+               dev_err(bdev->dev, "cannot set maximum segment size\n");
+               goto err_disable_clk;
+       }
+
+       platform_set_drvdata(pdev, bdev);
+
+       /* set capabilities */
+       dma_cap_zero(bdev->common.cap_mask);
+       dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
+
+       /* initialize dmaengine apis */
+       bdev->common.device_alloc_chan_resources = bam_alloc_chan;
+       bdev->common.device_free_chan_resources = bam_free_chan;
+       bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
+       bdev->common.device_control = bam_control;
+       bdev->common.device_issue_pending = bam_issue_pending;
+       bdev->common.device_tx_status = bam_tx_status;
+       bdev->common.dev = bdev->dev;
+
+       ret = dma_async_device_register(&bdev->common);
+       if (ret) {
+               dev_err(bdev->dev, "failed to register dma async device\n");
+               goto err_disable_clk;
+       }
+
+       ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
+                                       &bdev->common);
+       if (ret)
+               goto err_unregister_dma;
+
+       return 0;
+
+err_unregister_dma:
+       dma_async_device_unregister(&bdev->common);
+err_disable_clk:
+       clk_disable_unprepare(bdev->bamclk);
+       return ret;
+}
+
+static int bam_dma_remove(struct platform_device *pdev)
+{
+       struct bam_device *bdev = platform_get_drvdata(pdev);
+       u32 i;
+
+       of_dma_controller_free(pdev->dev.of_node);
+       dma_async_device_unregister(&bdev->common);
+
+       /* mask all interrupts for this execution environment */
+       writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee));
+
+       devm_free_irq(bdev->dev, bdev->irq, bdev);
+
+       for (i = 0; i < bdev->num_channels; i++) {
+               bam_dma_terminate_all(&bdev->channels[i]);
+               tasklet_kill(&bdev->channels[i].vc.task);
+
+               dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE,
+                       bdev->channels[i].fifo_virt,
+                       bdev->channels[i].fifo_phys);
+       }
+
+       tasklet_kill(&bdev->task);
+
+       clk_disable_unprepare(bdev->bamclk);
+
+       return 0;
+}
+
+static const struct of_device_id bam_of_match[] = {
+       { .compatible = "qcom,bam-v1.4.0", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, bam_of_match);
+
+static struct platform_driver bam_dma_driver = {
+       .probe = bam_dma_probe,
+       .remove = bam_dma_remove,
+       .driver = {
+               .name = "bam-dma-engine",
+               .owner = THIS_MODULE,
+               .of_match_table = bam_of_match,
+       },
+};
+
+module_platform_driver(bam_dma_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
+MODULE_LICENSE("GPL v2");
index 4eddedb6eb7dd3deb68550c1b8065e8b9d882278..b209a0f173444f4df6431744b56f7815665e6dff 100644 (file)
@@ -192,7 +192,7 @@ struct s3c24xx_dma_phy {
        unsigned int                    id;
        bool                            valid;
        void __iomem                    *base;
-       unsigned int                    irq;
+       int                             irq;
        struct clk                      *clk;
        spinlock_t                      lock;
        struct s3c24xx_dma_chan         *serving;
index dadd9e010c0b0979f15a5a195109af1e1e1e13de..b4c813831006ed1ad665c35c7d7e48a4d88ff589 100644 (file)
@@ -29,6 +29,12 @@ config RCAR_HPB_DMAE
        help
          Enable support for the Renesas R-Car series DMA controllers.
 
+config RCAR_AUDMAC_PP
+       tristate "Renesas R-Car Audio DMAC Peripheral Peripheral support"
+       depends on SH_DMAE_BASE
+       help
+         Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers.
+
 config SHDMA_R8A73A4
        def_bool y
        depends on ARCH_R8A73A4 && SH_DMAE != n
index e856af23b789567da9986ad2bc487d3f26487991..1ce88b28cfc62595240b7d54c79d731d4ec4fc22 100644 (file)
@@ -7,3 +7,4 @@ endif
 shdma-objs := $(shdma-y)
 obj-$(CONFIG_SUDMAC) += sudmac.o
 obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o
+obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
new file mode 100644 (file)
index 0000000..2de7728
--- /dev/null
@@ -0,0 +1,320 @@
+/*
+ * This is for Renesas R-Car Audio-DMAC-peri-peri.
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * based on the drivers/dma/sh/shdma.c
+ *
+ * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
+ * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_data/dma-rcar-audmapp.h>
+#include <linux/platform_device.h>
+#include <linux/shdma-base.h>
+
+/*
+ * DMA register
+ */
+#define PDMASAR                0x00
+#define PDMADAR                0x04
+#define PDMACHCR       0x0c
+
+/* PDMACHCR */
+#define PDMACHCR_DE            (1 << 0)
+
+#define AUDMAPP_MAX_CHANNELS   29
+
+/* Default MEMCPY transfer size = 2^2 = 4 bytes */
+#define LOG2_DEFAULT_XFER_SIZE 2
+#define AUDMAPP_SLAVE_NUMBER   256
+#define AUDMAPP_LEN_MAX                (16 * 1024 * 1024)
+
+struct audmapp_chan {
+       struct shdma_chan shdma_chan;
+       struct audmapp_slave_config *config;
+       void __iomem *base;
+};
+
+struct audmapp_device {
+       struct shdma_dev shdma_dev;
+       struct audmapp_pdata *pdata;
+       struct device *dev;
+       void __iomem *chan_reg;
+};
+
+#define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan)
+#define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device,    \
+                                 struct audmapp_device, shdma_dev.dma_dev)
+
+static void audmapp_write(struct audmapp_chan *auchan, u32 data, u32 reg)
+{
+       struct audmapp_device *audev = to_dev(auchan);
+       struct device *dev = audev->dev;
+
+       dev_dbg(dev, "w %p : %08x\n", auchan->base + reg, data);
+
+       iowrite32(data, auchan->base + reg);
+}
+
+static u32 audmapp_read(struct audmapp_chan *auchan, u32 reg)
+{
+       return ioread32(auchan->base + reg);
+}
+
+static void audmapp_halt(struct shdma_chan *schan)
+{
+       struct audmapp_chan *auchan = to_chan(schan);
+       int i;
+
+       audmapp_write(auchan, 0, PDMACHCR);
+
+       for (i = 0; i < 1024; i++) {
+               if (0 == audmapp_read(auchan, PDMACHCR))
+                       return;
+               udelay(1);
+       }
+}
+
+static void audmapp_start_xfer(struct shdma_chan *schan,
+                              struct shdma_desc *sdecs)
+{
+       struct audmapp_chan *auchan = to_chan(schan);
+       struct audmapp_device *audev = to_dev(auchan);
+       struct audmapp_slave_config *cfg = auchan->config;
+       struct device *dev = audev->dev;
+       u32 chcr = cfg->chcr | PDMACHCR_DE;
+
+       dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n",
+               &cfg->src, &cfg->dst, cfg->chcr);
+
+       audmapp_write(auchan, cfg->src, PDMASAR);
+       audmapp_write(auchan, cfg->dst, PDMADAR);
+       audmapp_write(auchan, chcr,     PDMACHCR);
+}
+
+static struct audmapp_slave_config *
+audmapp_find_slave(struct audmapp_chan *auchan, int slave_id)
+{
+       struct audmapp_device *audev = to_dev(auchan);
+       struct audmapp_pdata *pdata = audev->pdata;
+       struct audmapp_slave_config *cfg;
+       int i;
+
+       if (slave_id >= AUDMAPP_SLAVE_NUMBER)
+               return NULL;
+
+       for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++)
+               if (cfg->slave_id == slave_id)
+                       return cfg;
+
+       return NULL;
+}
+
+static int audmapp_set_slave(struct shdma_chan *schan, int slave_id,
+                            dma_addr_t slave_addr, bool try)
+{
+       struct audmapp_chan *auchan = to_chan(schan);
+       struct audmapp_slave_config *cfg =
+               audmapp_find_slave(auchan, slave_id);
+
+       if (!cfg)
+               return -ENODEV;
+       if (try)
+               return 0;
+
+       auchan->config  = cfg;
+
+       return 0;
+}
+
+static int audmapp_desc_setup(struct shdma_chan *schan,
+                             struct shdma_desc *sdecs,
+                             dma_addr_t src, dma_addr_t dst, size_t *len)
+{
+       struct audmapp_chan *auchan = to_chan(schan);
+       struct audmapp_slave_config *cfg = auchan->config;
+
+       if (!cfg)
+               return -ENODEV;
+
+       if (*len > (size_t)AUDMAPP_LEN_MAX)
+               *len = (size_t)AUDMAPP_LEN_MAX;
+
+       return 0;
+}
+
+static void audmapp_setup_xfer(struct shdma_chan *schan,
+                              int slave_id)
+{
+}
+
+static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan)
+{
+       return 0; /* always fixed address */
+}
+
+static bool audmapp_channel_busy(struct shdma_chan *schan)
+{
+       struct audmapp_chan *auchan = to_chan(schan);
+       u32 chcr = audmapp_read(auchan, PDMACHCR);
+
+       return chcr & ~PDMACHCR_DE;
+}
+
+static bool audmapp_desc_completed(struct shdma_chan *schan,
+                                  struct shdma_desc *sdesc)
+{
+       return true;
+}
+
+static struct shdma_desc *audmapp_embedded_desc(void *buf, int i)
+{
+       return &((struct shdma_desc *)buf)[i];
+}
+
+static const struct shdma_ops audmapp_shdma_ops = {
+       .halt_channel   = audmapp_halt,
+       .desc_setup     = audmapp_desc_setup,
+       .set_slave      = audmapp_set_slave,
+       .start_xfer     = audmapp_start_xfer,
+       .embedded_desc  = audmapp_embedded_desc,
+       .setup_xfer     = audmapp_setup_xfer,
+       .slave_addr     = audmapp_slave_addr,
+       .channel_busy   = audmapp_channel_busy,
+       .desc_completed = audmapp_desc_completed,
+};
+
+static int audmapp_chan_probe(struct platform_device *pdev,
+                             struct audmapp_device *audev, int id)
+{
+       struct shdma_dev *sdev = &audev->shdma_dev;
+       struct audmapp_chan *auchan;
+       struct shdma_chan *schan;
+       struct device *dev = audev->dev;
+
+       auchan = devm_kzalloc(dev, sizeof(*auchan), GFP_KERNEL);
+       if (!auchan)
+               return -ENOMEM;
+
+       schan = &auchan->shdma_chan;
+       schan->max_xfer_len = AUDMAPP_LEN_MAX;
+
+       shdma_chan_probe(sdev, schan, id);
+
+       auchan->base = audev->chan_reg + 0x20 + (0x10 * id);
+       dev_dbg(dev, "%02d : %p / %p", id, auchan->base, audev->chan_reg);
+
+       return 0;
+}
+
+static void audmapp_chan_remove(struct audmapp_device *audev)
+{
+       struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
+       struct shdma_chan *schan;
+       int i;
+
+       shdma_for_each_chan(schan, &audev->shdma_dev, i) {
+               BUG_ON(!schan);
+               shdma_chan_remove(schan);
+       }
+       dma_dev->chancnt = 0;
+}
+
+static int audmapp_probe(struct platform_device *pdev)
+{
+       struct audmapp_pdata *pdata = pdev->dev.platform_data;
+       struct audmapp_device *audev;
+       struct shdma_dev *sdev;
+       struct dma_device *dma_dev;
+       struct resource *res;
+       int err, i;
+
+       if (!pdata)
+               return -ENODEV;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+       audev = devm_kzalloc(&pdev->dev, sizeof(*audev), GFP_KERNEL);
+       if (!audev)
+               return -ENOMEM;
+
+       audev->dev      = &pdev->dev;
+       audev->pdata    = pdata;
+       audev->chan_reg = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(audev->chan_reg))
+               return PTR_ERR(audev->chan_reg);
+
+       sdev            = &audev->shdma_dev;
+       sdev->ops       = &audmapp_shdma_ops;
+       sdev->desc_size = sizeof(struct shdma_desc);
+
+       dma_dev                 = &sdev->dma_dev;
+       dma_dev->copy_align     = LOG2_DEFAULT_XFER_SIZE;
+       dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+
+       err = shdma_init(&pdev->dev, sdev, AUDMAPP_MAX_CHANNELS);
+       if (err < 0)
+               return err;
+
+       platform_set_drvdata(pdev, audev);
+
+       /* Create DMA Channel */
+       for (i = 0; i < AUDMAPP_MAX_CHANNELS; i++) {
+               err = audmapp_chan_probe(pdev, audev, i);
+               if (err)
+                       goto chan_probe_err;
+       }
+
+       err = dma_async_device_register(dma_dev);
+       if (err < 0)
+               goto chan_probe_err;
+
+       return err;
+
+chan_probe_err:
+       audmapp_chan_remove(audev);
+       shdma_cleanup(sdev);
+
+       return err;
+}
+
+static int audmapp_remove(struct platform_device *pdev)
+{
+       struct audmapp_device *audev = platform_get_drvdata(pdev);
+       struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
+
+       dma_async_device_unregister(dma_dev);
+
+       audmapp_chan_remove(audev);
+       shdma_cleanup(&audev->shdma_dev);
+
+       return 0;
+}
+
+static struct platform_driver audmapp_driver = {
+       .probe          = audmapp_probe,
+       .remove         = audmapp_remove,
+       .driver         = {
+               .owner  = THIS_MODULE,
+               .name   = "rcar-audmapp-engine",
+       },
+};
+module_platform_driver(audmapp_driver);
+
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_DESCRIPTION("Renesas R-Car Audio DMAC peri-peri driver");
+MODULE_LICENSE("GPL");
index 2e7b394def8058e4d1216ad8a07c785db4d87048..52396771acbe53951c3d080660621355b6500e3d 100644 (file)
@@ -227,7 +227,7 @@ bool shdma_chan_filter(struct dma_chan *chan, void *arg)
        struct shdma_chan *schan = to_shdma_chan(chan);
        struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
        const struct shdma_ops *ops = sdev->ops;
-       int match = (int)arg;
+       int match = (long)arg;
        int ret;
 
        if (match < 0)
@@ -491,8 +491,8 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
        }
 
        dev_dbg(schan->dev,
-               "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
-               copy_size, *len, *src, *dst, &new->async_tx,
+               "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
+               copy_size, *len, src, dst, &new->async_tx,
                new->async_tx.cookie);
 
        new->mark = DESC_PREPARED;
@@ -555,8 +555,8 @@ static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
                        goto err_get_desc;
 
                do {
-                       dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
-                               i, sg, len, (unsigned long long)sg_addr);
+                       dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
+                               i, sg, len, &sg_addr);
 
                        if (direction == DMA_DEV_TO_MEM)
                                new = shdma_add_desc(schan, flags,
index 06473a05fe4ebc97a7b925772604987dd476c3a4..b4ff9d3e56d18561893f31522de0648273f72ace 100644 (file)
@@ -33,7 +33,8 @@ static struct dma_chan *shdma_of_xlate(struct of_phandle_args *dma_spec,
        /* Only slave DMA channels can be allocated via DT */
        dma_cap_set(DMA_SLAVE, mask);
 
-       chan = dma_request_channel(mask, shdma_chan_filter, (void *)id);
+       chan = dma_request_channel(mask, shdma_chan_filter,
+                                  (void *)(uintptr_t)id);
        if (chan)
                to_shdma_chan(chan)->hw_req = id;
 
index 0d765c0e21ec9de8cb4e25ff66d5fd53ee78d95d..dda7e7563f5dce28fd85b9617f0fc6bb16821382 100644 (file)
@@ -443,6 +443,7 @@ static bool sh_dmae_reset(struct sh_dmae_device *shdev)
        return ret;
 }
 
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
 static irqreturn_t sh_dmae_err(int irq, void *data)
 {
        struct sh_dmae_device *shdev = data;
@@ -453,6 +454,7 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
        sh_dmae_reset(shdev);
        return IRQ_HANDLED;
 }
+#endif
 
 static bool sh_dmae_desc_completed(struct shdma_chan *schan,
                                   struct shdma_desc *sdesc)
@@ -637,7 +639,7 @@ static int sh_dmae_resume(struct device *dev)
 #define sh_dmae_resume NULL
 #endif
 
-const struct dev_pm_ops sh_dmae_pm = {
+static const struct dev_pm_ops sh_dmae_pm = {
        .suspend                = sh_dmae_suspend,
        .resume                 = sh_dmae_resume,
        .runtime_suspend        = sh_dmae_runtime_suspend,
@@ -685,9 +687,12 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
 static int sh_dmae_probe(struct platform_device *pdev)
 {
        const struct sh_dmae_pdata *pdata;
-       unsigned long irqflags = 0,
-               chan_flag[SH_DMAE_MAX_CHANNELS] = {};
-       int errirq, chan_irq[SH_DMAE_MAX_CHANNELS];
+       unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {};
+       int chan_irq[SH_DMAE_MAX_CHANNELS];
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARM)
+       unsigned long irqflags = 0;
+       int errirq;
+#endif
        int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
        struct sh_dmae_device *shdev;
        struct dma_device *dma_dev;
index c7e9cdff0708125d885e301324f4f5d52c5269d5..4e7df43b50d6c5a49026779687930aa254942773 100644 (file)
@@ -178,8 +178,8 @@ static int sudmac_desc_setup(struct shdma_chan *schan,
        struct sudmac_chan *sc = to_chan(schan);
        struct sudmac_desc *sd = to_desc(sdesc);
 
-       dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n",
-               __func__, src, dst, *len);
+       dev_dbg(sc->shdma_chan.dev, "%s: src=%pad, dst=%pad, len=%zu\n",
+               __func__, &src, &dst, *len);
 
        if (*len > schan->max_xfer_len)
                *len = schan->max_xfer_len;
index d4d3a3109b163f3c3a4a471cfdbc82c838024437..a1bd8298d55f1973313b969bf560c76967212dc4 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #include <linux/clk.h>
+#include <linux/of_dma.h>
 #include <linux/sirfsoc_dma.h>
 
 #include "dmaengine.h"
@@ -659,6 +660,18 @@ static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan,
        return 0;
 }
 
+static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
+       struct of_dma *ofdma)
+{
+       struct sirfsoc_dma *sdma = ofdma->of_dma_data;
+       unsigned int request = dma_spec->args[0];
+
+       if (request > SIRFSOC_DMA_CHANNELS)
+               return NULL;
+
+       return dma_get_slave_channel(&sdma->channels[request].chan);
+}
+
 static int sirfsoc_dma_probe(struct platform_device *op)
 {
        struct device_node *dn = op->dev.of_node;
@@ -764,11 +777,20 @@ static int sirfsoc_dma_probe(struct platform_device *op)
        if (ret)
                goto free_irq;
 
+       /* Device-tree DMA controller registration */
+       ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
+       if (ret) {
+               dev_err(dev, "failed to register DMA controller\n");
+               goto unreg_dma_dev;
+       }
+
        pm_runtime_enable(&op->dev);
        dev_info(dev, "initialized SIRFSOC DMAC driver\n");
 
        return 0;
 
+unreg_dma_dev:
+       dma_async_device_unregister(dma);
 free_irq:
        free_irq(sdma->irq, sdma);
 irq_dispose:
@@ -781,6 +803,7 @@ static int sirfsoc_dma_remove(struct platform_device *op)
        struct device *dev = &op->dev;
        struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
 
+       of_dma_controller_free(op->dev.of_node);
        dma_async_device_unregister(&sdma->dma);
        free_irq(sdma->irq, sdma);
        irq_dispose_mapping(sdma->irq);
index ff50aeebf0d98828072eea4b40944ebfa7594c77..2c41eaece2c17370008efeffdbe4986050e55adf 100644 (file)
@@ -397,7 +397,7 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
                                else
                                        chunksize = size;
 
-                               status = efi_file_read(fh, files[j].handle,
+                               status = efi_file_read(files[j].handle,
                                                       &chunksize,
                                                       (void *)addr);
                                if (status != EFI_SUCCESS) {
@@ -408,7 +408,7 @@ static efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
                                size -= chunksize;
                        }
 
-                       efi_file_close(fh, files[j].handle);
+                       efi_file_close(files[j].handle);
                }
 
        }
@@ -425,7 +425,7 @@ free_file_total:
 
 close_handles:
        for (k = j; k < i; k++)
-               efi_file_close(fh, files[k].handle);
+               efi_file_close(files[k].handle);
 free_files:
        efi_call_early(free_pool, files);
 fail:
index 51493ed4643b019a1cd80e423485a75f363b8dbe..a43220c2e3d943a3437e29df1352aca30593e4cf 100644 (file)
@@ -196,6 +196,53 @@ static struct cpuidle_state snb_cstates[] = {
                .enter = NULL }
 };
 
+static struct cpuidle_state byt_cstates[] = {
+       {
+               .name = "C1-BYT",
+               .desc = "MWAIT 0x00",
+               .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 1,
+               .target_residency = 1,
+               .enter = &intel_idle },
+       {
+               .name = "C1E-BYT",
+               .desc = "MWAIT 0x01",
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 15,
+               .target_residency = 30,
+               .enter = &intel_idle },
+       {
+               .name = "C6N-BYT",
+               .desc = "MWAIT 0x58",
+               .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 40,
+               .target_residency = 275,
+               .enter = &intel_idle },
+       {
+               .name = "C6S-BYT",
+               .desc = "MWAIT 0x52",
+               .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 140,
+               .target_residency = 560,
+               .enter = &intel_idle },
+       {
+               .name = "C7-BYT",
+               .desc = "MWAIT 0x60",
+               .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 1200,
+               .target_residency = 1500,
+               .enter = &intel_idle },
+       {
+               .name = "C7S-BYT",
+               .desc = "MWAIT 0x64",
+               .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 10000,
+               .target_residency = 20000,
+               .enter = &intel_idle },
+       {
+               .enter = NULL }
+};
+
 static struct cpuidle_state ivb_cstates[] = {
        {
                .name = "C1-IVB",
@@ -236,6 +283,105 @@ static struct cpuidle_state ivb_cstates[] = {
                .enter = NULL }
 };
 
+static struct cpuidle_state ivt_cstates[] = {
+       {
+               .name = "C1-IVT",
+               .desc = "MWAIT 0x00",
+               .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 1,
+               .target_residency = 1,
+               .enter = &intel_idle },
+       {
+               .name = "C1E-IVT",
+               .desc = "MWAIT 0x01",
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 10,
+               .target_residency = 80,
+               .enter = &intel_idle },
+       {
+               .name = "C3-IVT",
+               .desc = "MWAIT 0x10",
+               .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 59,
+               .target_residency = 156,
+               .enter = &intel_idle },
+       {
+               .name = "C6-IVT",
+               .desc = "MWAIT 0x20",
+               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 82,
+               .target_residency = 300,
+               .enter = &intel_idle },
+       {
+               .enter = NULL }
+};
+
+static struct cpuidle_state ivt_cstates_4s[] = {
+       {
+               .name = "C1-IVT-4S",
+               .desc = "MWAIT 0x00",
+               .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 1,
+               .target_residency = 1,
+               .enter = &intel_idle },
+       {
+               .name = "C1E-IVT-4S",
+               .desc = "MWAIT 0x01",
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 10,
+               .target_residency = 250,
+               .enter = &intel_idle },
+       {
+               .name = "C3-IVT-4S",
+               .desc = "MWAIT 0x10",
+               .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 59,
+               .target_residency = 300,
+               .enter = &intel_idle },
+       {
+               .name = "C6-IVT-4S",
+               .desc = "MWAIT 0x20",
+               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 84,
+               .target_residency = 400,
+               .enter = &intel_idle },
+       {
+               .enter = NULL }
+};
+
+static struct cpuidle_state ivt_cstates_8s[] = {
+       {
+               .name = "C1-IVT-8S",
+               .desc = "MWAIT 0x00",
+               .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 1,
+               .target_residency = 1,
+               .enter = &intel_idle },
+       {
+               .name = "C1E-IVT-8S",
+               .desc = "MWAIT 0x01",
+               .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_TIME_VALID,
+               .exit_latency = 10,
+               .target_residency = 500,
+               .enter = &intel_idle },
+       {
+               .name = "C3-IVT-8S",
+               .desc = "MWAIT 0x10",
+               .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 59,
+               .target_residency = 600,
+               .enter = &intel_idle },
+       {
+               .name = "C6-IVT-8S",
+               .desc = "MWAIT 0x20",
+               .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
+               .exit_latency = 88,
+               .target_residency = 700,
+               .enter = &intel_idle },
+       {
+               .enter = NULL }
+};
+
 static struct cpuidle_state hsw_cstates[] = {
        {
                .name = "C1-HSW",
@@ -464,11 +610,21 @@ static const struct idle_cpu idle_cpu_snb = {
        .disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_byt = {
+       .state_table = byt_cstates,
+       .disable_promotion_to_c1e = true,
+};
+
 static const struct idle_cpu idle_cpu_ivb = {
        .state_table = ivb_cstates,
        .disable_promotion_to_c1e = true,
 };
 
+static const struct idle_cpu idle_cpu_ivt = {
+       .state_table = ivt_cstates,
+       .disable_promotion_to_c1e = true,
+};
+
 static const struct idle_cpu idle_cpu_hsw = {
        .state_table = hsw_cstates,
        .disable_promotion_to_c1e = true,
@@ -494,8 +650,10 @@ static const struct x86_cpu_id intel_idle_ids[] = {
        ICPU(0x2f, idle_cpu_nehalem),
        ICPU(0x2a, idle_cpu_snb),
        ICPU(0x2d, idle_cpu_snb),
+       ICPU(0x36, idle_cpu_atom),
+       ICPU(0x37, idle_cpu_byt),
        ICPU(0x3a, idle_cpu_ivb),
-       ICPU(0x3e, idle_cpu_ivb),
+       ICPU(0x3e, idle_cpu_ivt),
        ICPU(0x3c, idle_cpu_hsw),
        ICPU(0x3f, idle_cpu_hsw),
        ICPU(0x45, idle_cpu_hsw),
@@ -572,6 +730,39 @@ static void intel_idle_cpuidle_devices_uninit(void)
        free_percpu(intel_idle_cpuidle_devices);
        return;
 }
+
+/*
+ * intel_idle_state_table_update()
+ *
+ * Update the default state_table for this CPU-id
+ *
+ * Currently used to access tuned IVT multi-socket targets
+ * Assumption: num_sockets == (max_package_num + 1)
+ */
+void intel_idle_state_table_update(void)
+{
+       /* IVT uses a different table for 1-2, 3-4, and > 4 sockets */
+       if (boot_cpu_data.x86_model == 0x3e) { /* IVT */
+               int cpu, package_num, num_sockets = 1;
+
+               for_each_online_cpu(cpu) {
+                       package_num = topology_physical_package_id(cpu);
+                       if (package_num + 1 > num_sockets) {
+                               num_sockets = package_num + 1;
+
+                               if (num_sockets > 4)
+                                       cpuidle_state_table = ivt_cstates_8s;
+                                       return;
+                       }
+               }
+
+               if (num_sockets > 2)
+                       cpuidle_state_table = ivt_cstates_4s;
+               /* else, 1 and 2 socket systems use default ivt_cstates */
+       }
+       return;
+}
+
 /*
  * intel_idle_cpuidle_driver_init()
  * allocate, initialize cpuidle_states
@@ -581,10 +772,12 @@ static int __init intel_idle_cpuidle_driver_init(void)
        int cstate;
        struct cpuidle_driver *drv = &intel_idle_driver;
 
+       intel_idle_state_table_update();
+
        drv->state_count = 1;
 
        for (cstate = 0; cstate < CPUIDLE_STATE_MAX; ++cstate) {
-               int num_substates, mwait_hint, mwait_cstate, mwait_substate;
+               int num_substates, mwait_hint, mwait_cstate;
 
                if (cpuidle_state_table[cstate].enter == NULL)
                        break;
@@ -597,14 +790,13 @@ static int __init intel_idle_cpuidle_driver_init(void)
 
                mwait_hint = flg2MWAIT(cpuidle_state_table[cstate].flags);
                mwait_cstate = MWAIT_HINT2CSTATE(mwait_hint);
-               mwait_substate = MWAIT_HINT2SUBSTATE(mwait_hint);
 
-               /* does the state exist in CPUID.MWAIT? */
+               /* number of sub-states for this state in CPUID.MWAIT */
                num_substates = (mwait_substates >> ((mwait_cstate + 1) * 4))
                                        & MWAIT_SUBSTATE_MASK;
 
-               /* if sub-state in table is not enumerated by CPUID */
-               if ((mwait_substate + 1) > num_substates)
+               /* if NO sub-states for this state in CPUID, skip it */
+               if (num_substates == 0)
                        continue;
 
                if (((mwait_cstate + 1) > 2) &&
index 8ee228e9ab5aa28b85ee2893a92b3ab243c35388..c98fdb185931644bd3ed92d337f85aa0d2517149 100644 (file)
@@ -51,6 +51,8 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
 static int
 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
               struct isert_rdma_wr *wr);
+static int
+isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
 
 static void
 isert_qp_event_callback(struct ib_event *e, void *context)
@@ -87,7 +89,8 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
 }
 
 static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
+                   u8 protection)
 {
        struct isert_device *device = isert_conn->conn_device;
        struct ib_qp_init_attr attr;
@@ -119,6 +122,8 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
        attr.cap.max_recv_sge = 1;
        attr.sq_sig_type = IB_SIGNAL_REQ_WR;
        attr.qp_type = IB_QPT_RC;
+       if (protection)
+               attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
 
        pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
                 cma_id->device);
@@ -226,7 +231,8 @@ isert_create_device_ib_res(struct isert_device *device)
                return ret;
 
        /* asign function handlers */
-       if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+       if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
+           dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
                device->use_fastreg = 1;
                device->reg_rdma_mem = isert_reg_rdma;
                device->unreg_rdma_mem = isert_unreg_rdma;
@@ -236,13 +242,18 @@ isert_create_device_ib_res(struct isert_device *device)
                device->unreg_rdma_mem = isert_unmap_cmd;
        }
 
+       /* Check signature cap */
+       device->pi_capable = dev_attr->device_cap_flags &
+                            IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
+
        device->cqs_used = min_t(int, num_online_cpus(),
                                 device->ib_device->num_comp_vectors);
        device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
        pr_debug("Using %d CQs, device %s supports %d vectors support "
-                "Fast registration %d\n",
+                "Fast registration %d pi_capable %d\n",
                 device->cqs_used, device->ib_device->name,
-                device->ib_device->num_comp_vectors, device->use_fastreg);
+                device->ib_device->num_comp_vectors, device->use_fastreg,
+                device->pi_capable);
        device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
                                device->cqs_used, GFP_KERNEL);
        if (!device->cq_desc) {
@@ -395,6 +406,12 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
                list_del(&fr_desc->list);
                ib_free_fast_reg_page_list(fr_desc->data_frpl);
                ib_dereg_mr(fr_desc->data_mr);
+               if (fr_desc->pi_ctx) {
+                       ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
+                       ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
+                       ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
+                       kfree(fr_desc->pi_ctx);
+               }
                kfree(fr_desc);
                ++i;
        }
@@ -406,8 +423,10 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
 
 static int
 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
-                    struct fast_reg_descriptor *fr_desc)
+                    struct fast_reg_descriptor *fr_desc, u8 protection)
 {
+       int ret;
+
        fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
                                                         ISCSI_ISER_SG_TABLESIZE);
        if (IS_ERR(fr_desc->data_frpl)) {
@@ -420,27 +439,88 @@ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
        if (IS_ERR(fr_desc->data_mr)) {
                pr_err("Failed to allocate data frmr err=%ld\n",
                       PTR_ERR(fr_desc->data_mr));
-               ib_free_fast_reg_page_list(fr_desc->data_frpl);
-               return PTR_ERR(fr_desc->data_mr);
+               ret = PTR_ERR(fr_desc->data_mr);
+               goto err_data_frpl;
        }
        pr_debug("Create fr_desc %p page_list %p\n",
                 fr_desc, fr_desc->data_frpl->page_list);
+       fr_desc->ind |= ISERT_DATA_KEY_VALID;
+
+       if (protection) {
+               struct ib_mr_init_attr mr_init_attr = {0};
+               struct pi_context *pi_ctx;
+
+               fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
+               if (!fr_desc->pi_ctx) {
+                       pr_err("Failed to allocate pi context\n");
+                       ret = -ENOMEM;
+                       goto err_data_mr;
+               }
+               pi_ctx = fr_desc->pi_ctx;
+
+               pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
+                                                   ISCSI_ISER_SG_TABLESIZE);
+               if (IS_ERR(pi_ctx->prot_frpl)) {
+                       pr_err("Failed to allocate prot frpl err=%ld\n",
+                              PTR_ERR(pi_ctx->prot_frpl));
+                       ret = PTR_ERR(pi_ctx->prot_frpl);
+                       goto err_pi_ctx;
+               }
 
-       fr_desc->valid = true;
+               pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+               if (IS_ERR(pi_ctx->prot_mr)) {
+                       pr_err("Failed to allocate prot frmr err=%ld\n",
+                              PTR_ERR(pi_ctx->prot_mr));
+                       ret = PTR_ERR(pi_ctx->prot_mr);
+                       goto err_prot_frpl;
+               }
+               fr_desc->ind |= ISERT_PROT_KEY_VALID;
+
+               mr_init_attr.max_reg_descriptors = 2;
+               mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+               pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+               if (IS_ERR(pi_ctx->sig_mr)) {
+                       pr_err("Failed to allocate signature enabled mr err=%ld\n",
+                              PTR_ERR(pi_ctx->sig_mr));
+                       ret = PTR_ERR(pi_ctx->sig_mr);
+                       goto err_prot_mr;
+               }
+               fr_desc->ind |= ISERT_SIG_KEY_VALID;
+       }
+       fr_desc->ind &= ~ISERT_PROTECTED;
 
        return 0;
+err_prot_mr:
+       ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
+err_prot_frpl:
+       ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
+err_pi_ctx:
+       kfree(fr_desc->pi_ctx);
+err_data_mr:
+       ib_dereg_mr(fr_desc->data_mr);
+err_data_frpl:
+       ib_free_fast_reg_page_list(fr_desc->data_frpl);
+
+       return ret;
 }
 
 static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
 {
        struct fast_reg_descriptor *fr_desc;
        struct isert_device *device = isert_conn->conn_device;
-       int i, ret;
+       struct se_session *se_sess = isert_conn->conn->sess->se_sess;
+       struct se_node_acl *se_nacl = se_sess->se_node_acl;
+       int i, ret, tag_num;
+       /*
+        * Setup the number of FRMRs based upon the number of tags
+        * available to session in iscsi_target_locate_portal().
+        */
+       tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
+       tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
 
-       INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
        isert_conn->conn_fr_pool_size = 0;
-       for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
+       for (i = 0; i < tag_num; i++) {
                fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
                if (!fr_desc) {
                        pr_err("Failed to allocate fast_reg descriptor\n");
@@ -449,7 +529,8 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
                }
 
                ret = isert_create_fr_desc(device->ib_device,
-                                          isert_conn->conn_pd, fr_desc);
+                                          isert_conn->conn_pd, fr_desc,
+                                          pi_support);
                if (ret) {
                        pr_err("Failed to create fastreg descriptor err=%d\n",
                               ret);
@@ -480,6 +561,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        struct isert_device *device;
        struct ib_device *ib_dev = cma_id->device;
        int ret = 0;
+       u8 pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
 
        pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
                 cma_id, cma_id->context);
@@ -498,6 +580,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        kref_get(&isert_conn->conn_kref);
        mutex_init(&isert_conn->conn_mutex);
        spin_lock_init(&isert_conn->conn_lock);
+       INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
 
        cma_id->context = isert_conn;
        isert_conn->conn_cm_id = cma_id;
@@ -569,16 +652,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                goto out_mr;
        }
 
-       if (device->use_fastreg) {
-               ret = isert_conn_create_fastreg_pool(isert_conn);
-               if (ret) {
-                       pr_err("Conn: %p failed to create fastreg pool\n",
-                              isert_conn);
-                       goto out_fastreg;
-               }
+       if (pi_support && !device->pi_capable) {
+               pr_err("Protection information requested but not supported\n");
+               ret = -EINVAL;
+               goto out_mr;
        }
 
-       ret = isert_conn_setup_qp(isert_conn, cma_id);
+       ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
        if (ret)
                goto out_conn_dev;
 
@@ -591,9 +671,6 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        return 0;
 
 out_conn_dev:
-       if (device->use_fastreg)
-               isert_conn_free_fastreg_pool(isert_conn);
-out_fastreg:
        ib_dereg_mr(isert_conn->conn_mr);
 out_mr:
        ib_dealloc_pd(isert_conn->conn_pd);
@@ -967,6 +1044,18 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
        }
        if (!login->login_failed) {
                if (login->login_complete) {
+                       if (isert_conn->conn_device->use_fastreg) {
+                               u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
+
+                               ret = isert_conn_create_fastreg_pool(isert_conn,
+                                                                    pi_support);
+                               if (ret) {
+                                       pr_err("Conn: %p failed to create"
+                                              " fastreg pool\n", isert_conn);
+                                       return ret;
+                               }
+                       }
+
                        ret = isert_alloc_rx_descriptors(isert_conn);
                        if (ret)
                                return ret;
@@ -1392,19 +1481,60 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
        }
 }
 
+static int
+isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
+                  struct scatterlist *sg, u32 nents, u32 length, u32 offset,
+                  enum iser_ib_op_code op, struct isert_data_buf *data)
+{
+       struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+       data->dma_dir = op == ISER_IB_RDMA_WRITE ?
+                             DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+       data->len = length - offset;
+       data->offset = offset;
+       data->sg_off = data->offset / PAGE_SIZE;
+
+       data->sg = &sg[data->sg_off];
+       data->nents = min_t(unsigned int, nents - data->sg_off,
+                                         ISCSI_ISER_SG_TABLESIZE);
+       data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
+                                       PAGE_SIZE);
+
+       data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
+                                       data->dma_dir);
+       if (unlikely(!data->dma_nents)) {
+               pr_err("Cmd: unable to dma map SGs %p\n", sg);
+               return -EINVAL;
+       }
+
+       pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
+                isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
+
+       return 0;
+}
+
+static void
+isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
+{
+       struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+
+       ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
+       memset(data, 0, sizeof(*data));
+}
+
+
+
 static void
 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 {
        struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-       struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
 
        pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
-       if (wr->sge) {
+
+       if (wr->data.sg) {
                pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
-               ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
-                               (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
-               wr->sge = NULL;
+               isert_unmap_data_buf(isert_conn, &wr->data);
        }
 
        if (wr->send_wr) {
@@ -1424,7 +1554,6 @@ static void
 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 {
        struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
-       struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
        LIST_HEAD(unmap_list);
 
        pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
@@ -1432,18 +1561,19 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
        if (wr->fr_desc) {
                pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
                         isert_cmd, wr->fr_desc);
+               if (wr->fr_desc->ind & ISERT_PROTECTED) {
+                       isert_unmap_data_buf(isert_conn, &wr->prot);
+                       wr->fr_desc->ind &= ~ISERT_PROTECTED;
+               }
                spin_lock_bh(&isert_conn->conn_lock);
                list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
                spin_unlock_bh(&isert_conn->conn_lock);
                wr->fr_desc = NULL;
        }
 
-       if (wr->sge) {
+       if (wr->data.sg) {
                pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
-               ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
-                               (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-                               DMA_TO_DEVICE : DMA_FROM_DEVICE);
-               wr->sge = NULL;
+               isert_unmap_data_buf(isert_conn, &wr->data);
        }
 
        wr->ib_sge = NULL;
@@ -1451,7 +1581,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 }
 
 static void
-isert_put_cmd(struct isert_cmd *isert_cmd)
+isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
 {
        struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
        struct isert_conn *isert_conn = isert_cmd->conn;
@@ -1467,8 +1597,21 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
                        list_del_init(&cmd->i_conn_node);
                spin_unlock_bh(&conn->cmd_lock);
 
-               if (cmd->data_direction == DMA_TO_DEVICE)
+               if (cmd->data_direction == DMA_TO_DEVICE) {
                        iscsit_stop_dataout_timer(cmd);
+                       /*
+                        * Check for special case during comp_err where
+                        * WRITE_PENDING has been handed off from core,
+                        * but requires an extra target_put_sess_cmd()
+                        * before transport_generic_free_cmd() below.
+                        */
+                       if (comp_err &&
+                           cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
+                               struct se_cmd *se_cmd = &cmd->se_cmd;
+
+                               target_put_sess_cmd(se_cmd->se_sess, se_cmd);
+                       }
+               }
 
                device->unreg_rdma_mem(isert_cmd, isert_conn);
                transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1523,7 +1666,7 @@ isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
 
 static void
 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
-                    struct ib_device *ib_dev)
+                    struct ib_device *ib_dev, bool comp_err)
 {
        if (isert_cmd->pdu_buf_dma != 0) {
                pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
@@ -1533,7 +1676,77 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
        }
 
        isert_unmap_tx_desc(tx_desc, ib_dev);
-       isert_put_cmd(isert_cmd);
+       isert_put_cmd(isert_cmd, comp_err);
+}
+
+static int
+isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
+{
+       struct ib_mr_status mr_status;
+       int ret;
+
+       ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
+       if (ret) {
+               pr_err("ib_check_mr_status failed, ret %d\n", ret);
+               goto fail_mr_status;
+       }
+
+       if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
+               u64 sec_offset_err;
+               u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
+
+               switch (mr_status.sig_err.err_type) {
+               case IB_SIG_BAD_GUARD:
+                       se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+                       break;
+               case IB_SIG_BAD_REFTAG:
+                       se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+                       break;
+               case IB_SIG_BAD_APPTAG:
+                       se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+                       break;
+               }
+               sec_offset_err = mr_status.sig_err.sig_err_offset;
+               do_div(sec_offset_err, block_size);
+               se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
+
+               pr_err("isert: PI error found type %d at sector 0x%llx "
+                      "expected 0x%x vs actual 0x%x\n",
+                      mr_status.sig_err.err_type,
+                      (unsigned long long)se_cmd->bad_sector,
+                      mr_status.sig_err.expected,
+                      mr_status.sig_err.actual);
+               ret = 1;
+       }
+
+fail_mr_status:
+       return ret;
+}
+
+static void
+isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
+                           struct isert_cmd *isert_cmd)
+{
+       struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
+       struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+       struct se_cmd *se_cmd = &cmd->se_cmd;
+       struct isert_conn *isert_conn = isert_cmd->conn;
+       struct isert_device *device = isert_conn->conn_device;
+       int ret = 0;
+
+       if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+               ret = isert_check_pi_status(se_cmd,
+                                           wr->fr_desc->pi_ctx->sig_mr);
+               wr->fr_desc->ind &= ~ISERT_PROTECTED;
+       }
+
+       device->unreg_rdma_mem(isert_cmd, isert_conn);
+       wr->send_wr_num = 0;
+       if (ret)
+               transport_send_check_condition_and_sense(se_cmd,
+                                                        se_cmd->pi_err, 0);
+       else
+               isert_put_response(isert_conn->conn, cmd);
 }
 
 static void
@@ -1545,10 +1758,17 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
        struct se_cmd *se_cmd = &cmd->se_cmd;
        struct isert_conn *isert_conn = isert_cmd->conn;
        struct isert_device *device = isert_conn->conn_device;
+       int ret = 0;
+
+       if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
+               ret = isert_check_pi_status(se_cmd,
+                                           wr->fr_desc->pi_ctx->sig_mr);
+               wr->fr_desc->ind &= ~ISERT_PROTECTED;
+       }
 
        iscsit_stop_dataout_timer(cmd);
        device->unreg_rdma_mem(isert_cmd, isert_conn);
-       cmd->write_data_done = wr->cur_rdma_length;
+       cmd->write_data_done = wr->data.len;
        wr->send_wr_num = 0;
 
        pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
@@ -1557,7 +1777,11 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
        cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
        spin_unlock_bh(&cmd->istate_lock);
 
-       target_execute_cmd(se_cmd);
+       if (ret)
+               transport_send_check_condition_and_sense(se_cmd,
+                                                        se_cmd->pi_err, 0);
+       else
+               target_execute_cmd(se_cmd);
 }
 
 static void
@@ -1577,14 +1801,14 @@ isert_do_control_comp(struct work_struct *work)
                iscsit_tmr_post_handler(cmd, cmd->conn);
 
                cmd->i_state = ISTATE_SENT_STATUS;
-               isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+               isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
                break;
        case ISTATE_SEND_REJECT:
                pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
                atomic_dec(&isert_conn->post_send_buf_count);
 
                cmd->i_state = ISTATE_SENT_STATUS;
-               isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+               isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
                break;
        case ISTATE_SEND_LOGOUTRSP:
                pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
@@ -1598,7 +1822,7 @@ isert_do_control_comp(struct work_struct *work)
        case ISTATE_SEND_TEXTRSP:
                atomic_dec(&isert_conn->post_send_buf_count);
                cmd->i_state = ISTATE_SENT_STATUS;
-               isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev);
+               isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
                break;
        default:
                pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
@@ -1626,10 +1850,21 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
                queue_work(isert_comp_wq, &isert_cmd->comp_work);
                return;
        }
-       atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+
+       /**
+        * If send_wr_num is 0 this means that we got
+        * RDMA completion and we cleared it and we should
+        * simply decrement the response post. else the
+        * response is incorporated in send_wr_num, just
+        * sub it.
+        **/
+       if (wr->send_wr_num)
+               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+       else
+               atomic_dec(&isert_conn->post_send_buf_count);
 
        cmd->i_state = ISTATE_SENT_STATUS;
-       isert_completion_put(tx_desc, isert_cmd, ib_dev);
+       isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
 }
 
 static void
@@ -1658,8 +1893,9 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
                                          isert_conn, ib_dev);
                break;
        case ISER_IB_RDMA_WRITE:
-               pr_err("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
-               dump_stack();
+               pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
+               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
+               isert_completion_rdma_write(tx_desc, isert_cmd);
                break;
        case ISER_IB_RDMA_READ:
                pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
@@ -1709,8 +1945,20 @@ isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_de
                llnode = llist_next(llnode);
                wr = &t->isert_cmd->rdma_wr;
 
-               atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
-               isert_completion_put(t, t->isert_cmd, ib_dev);
+               /**
+                * If send_wr_num is 0 this means that we got
+                * RDMA completion and we cleared it and we should
+                * simply decrement the response post. else the
+                * response is incorporated in send_wr_num, just
+                * sub it.
+                **/
+               if (wr->send_wr_num)
+                       atomic_sub(wr->send_wr_num,
+                                  &isert_conn->post_send_buf_count);
+               else
+                       atomic_dec(&isert_conn->post_send_buf_count);
+
+               isert_completion_put(t, t->isert_cmd, ib_dev, true);
        }
 }
 
@@ -1728,15 +1976,27 @@ isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn
                llnode = llist_next(llnode);
                wr = &t->isert_cmd->rdma_wr;
 
-               atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
-               isert_completion_put(t, t->isert_cmd, ib_dev);
+               /**
+                * If send_wr_num is 0 this means that we got
+                * RDMA completion and we cleared it and we should
+                * simply decrement the response post. else the
+                * response is incorporated in send_wr_num, just
+                * sub it.
+                **/
+               if (wr->send_wr_num)
+                       atomic_sub(wr->send_wr_num,
+                                  &isert_conn->post_send_buf_count);
+               else
+                       atomic_dec(&isert_conn->post_send_buf_count);
+
+               isert_completion_put(t, t->isert_cmd, ib_dev, true);
        }
        tx_desc->comp_llnode_batch = NULL;
 
        if (!isert_cmd)
                isert_unmap_tx_desc(tx_desc, ib_dev);
        else
-               isert_completion_put(tx_desc, isert_cmd, ib_dev);
+               isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
 }
 
 static void
@@ -1918,6 +2178,36 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
        return isert_post_response(isert_conn, isert_cmd);
 }
 
+static void
+isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+       struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+       struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+       struct isert_device *device = isert_conn->conn_device;
+
+       spin_lock_bh(&conn->cmd_lock);
+       if (!list_empty(&cmd->i_conn_node))
+               list_del_init(&cmd->i_conn_node);
+       spin_unlock_bh(&conn->cmd_lock);
+
+       if (cmd->data_direction == DMA_TO_DEVICE)
+               iscsit_stop_dataout_timer(cmd);
+
+       device->unreg_rdma_mem(isert_cmd, isert_conn);
+}
+
+static enum target_prot_op
+isert_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+       struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
+       struct isert_device *device = isert_conn->conn_device;
+
+       if (device->pi_capable)
+               return TARGET_PROT_ALL;
+
+       return TARGET_PROT_NORMAL;
+}
+
 static int
 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
                bool nopout_response)
@@ -2099,54 +2389,39 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        struct se_cmd *se_cmd = &cmd->se_cmd;
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
-       struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+       struct isert_data_buf *data = &wr->data;
        struct ib_send_wr *send_wr;
        struct ib_sge *ib_sge;
-       struct scatterlist *sg_start;
-       u32 sg_off = 0, sg_nents;
-       u32 offset = 0, data_len, data_left, rdma_write_max, va_offset = 0;
-       int ret = 0, count, i, ib_sge_cnt;
+       u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
+       int ret = 0, i, ib_sge_cnt;
 
-       if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
-               data_left = se_cmd->data_length;
-       } else {
-               sg_off = cmd->write_data_done / PAGE_SIZE;
-               data_left = se_cmd->data_length - cmd->write_data_done;
-               offset = cmd->write_data_done;
-               isert_cmd->tx_desc.isert_cmd = isert_cmd;
-       }
+       isert_cmd->tx_desc.isert_cmd = isert_cmd;
 
-       sg_start = &cmd->se_cmd.t_data_sg[sg_off];
-       sg_nents = se_cmd->t_data_nents - sg_off;
+       offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+       ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
+                                se_cmd->t_data_nents, se_cmd->data_length,
+                                offset, wr->iser_ib_op, &wr->data);
+       if (ret)
+               return ret;
 
-       count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
-                             (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-                             DMA_TO_DEVICE : DMA_FROM_DEVICE);
-       if (unlikely(!count)) {
-               pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
-               return -EINVAL;
-       }
-       wr->sge = sg_start;
-       wr->num_sge = sg_nents;
-       wr->cur_rdma_length = data_left;
-       pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
-                isert_cmd, count, sg_start, sg_nents, data_left);
+       data_left = data->len;
+       offset = data->offset;
 
-       ib_sge = kzalloc(sizeof(struct ib_sge) * sg_nents, GFP_KERNEL);
+       ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
        if (!ib_sge) {
                pr_warn("Unable to allocate ib_sge\n");
                ret = -ENOMEM;
-               goto unmap_sg;
+               goto unmap_cmd;
        }
        wr->ib_sge = ib_sge;
 
-       wr->send_wr_num = DIV_ROUND_UP(sg_nents, isert_conn->max_sge);
+       wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
        wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
                                GFP_KERNEL);
        if (!wr->send_wr) {
                pr_debug("Unable to allocate wr->send_wr\n");
                ret = -ENOMEM;
-               goto unmap_sg;
+               goto unmap_cmd;
        }
 
        wr->isert_cmd = isert_cmd;
@@ -2185,10 +2460,9 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        }
 
        return 0;
-unmap_sg:
-       ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
-                       (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-                       DMA_TO_DEVICE : DMA_FROM_DEVICE);
+unmap_cmd:
+       isert_unmap_data_buf(isert_conn, data);
+
        return ret;
 }
 
@@ -2232,49 +2506,70 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
 }
 
 static int
-isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
-                 struct isert_conn *isert_conn, struct scatterlist *sg_start,
-                 struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
-                 unsigned int data_len)
+isert_fast_reg_mr(struct isert_conn *isert_conn,
+                 struct fast_reg_descriptor *fr_desc,
+                 struct isert_data_buf *mem,
+                 enum isert_indicator ind,
+                 struct ib_sge *sge)
 {
        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+       struct ib_mr *mr;
+       struct ib_fast_reg_page_list *frpl;
        struct ib_send_wr fr_wr, inv_wr;
        struct ib_send_wr *bad_wr, *wr = NULL;
        int ret, pagelist_len;
        u32 page_off;
        u8 key;
 
-       sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
-       page_off = offset % PAGE_SIZE;
+       if (mem->dma_nents == 1) {
+               sge->lkey = isert_conn->conn_mr->lkey;
+               sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
+               sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
+               pr_debug("%s:%d sge: addr: 0x%llx  length: %u lkey: %x\n",
+                        __func__, __LINE__, sge->addr, sge->length,
+                        sge->lkey);
+               return 0;
+       }
+
+       if (ind == ISERT_DATA_KEY_VALID) {
+               /* Registering data buffer */
+               mr = fr_desc->data_mr;
+               frpl = fr_desc->data_frpl;
+       } else {
+               /* Registering protection buffer */
+               mr = fr_desc->pi_ctx->prot_mr;
+               frpl = fr_desc->pi_ctx->prot_frpl;
+       }
+
+       page_off = mem->offset % PAGE_SIZE;
 
        pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
-                fr_desc, sg_nents, offset);
+                fr_desc, mem->nents, mem->offset);
 
-       pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
-                                            &fr_desc->data_frpl->page_list[0]);
+       pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
+                                            &frpl->page_list[0]);
 
-       if (!fr_desc->valid) {
+       if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
                memset(&inv_wr, 0, sizeof(inv_wr));
                inv_wr.wr_id = ISER_FASTREG_LI_WRID;
                inv_wr.opcode = IB_WR_LOCAL_INV;
-               inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
+               inv_wr.ex.invalidate_rkey = mr->rkey;
                wr = &inv_wr;
                /* Bump the key */
-               key = (u8)(fr_desc->data_mr->rkey & 0x000000FF);
-               ib_update_fast_reg_key(fr_desc->data_mr, ++key);
+               key = (u8)(mr->rkey & 0x000000FF);
+               ib_update_fast_reg_key(mr, ++key);
        }
 
        /* Prepare FASTREG WR */
        memset(&fr_wr, 0, sizeof(fr_wr));
        fr_wr.wr_id = ISER_FASTREG_LI_WRID;
        fr_wr.opcode = IB_WR_FAST_REG_MR;
-       fr_wr.wr.fast_reg.iova_start =
-               fr_desc->data_frpl->page_list[0] + page_off;
-       fr_wr.wr.fast_reg.page_list = fr_desc->data_frpl;
+       fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
+       fr_wr.wr.fast_reg.page_list = frpl;
        fr_wr.wr.fast_reg.page_list_len = pagelist_len;
        fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
-       fr_wr.wr.fast_reg.length = data_len;
-       fr_wr.wr.fast_reg.rkey = fr_desc->data_mr->rkey;
+       fr_wr.wr.fast_reg.length = mem->len;
+       fr_wr.wr.fast_reg.rkey = mr->rkey;
        fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
 
        if (!wr)
@@ -2287,15 +2582,157 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
                pr_err("fast registration failed, ret:%d\n", ret);
                return ret;
        }
-       fr_desc->valid = false;
+       fr_desc->ind &= ~ind;
+
+       sge->lkey = mr->lkey;
+       sge->addr = frpl->page_list[0] + page_off;
+       sge->length = mem->len;
+
+       pr_debug("%s:%d sge: addr: 0x%llx  length: %u lkey: %x\n",
+                __func__, __LINE__, sge->addr, sge->length,
+                sge->lkey);
+
+       return ret;
+}
+
+static inline enum ib_t10_dif_type
+se2ib_prot_type(enum target_prot_type prot_type)
+{
+       switch (prot_type) {
+       case TARGET_DIF_TYPE0_PROT:
+               return IB_T10DIF_NONE;
+       case TARGET_DIF_TYPE1_PROT:
+               return IB_T10DIF_TYPE1;
+       case TARGET_DIF_TYPE2_PROT:
+               return IB_T10DIF_TYPE2;
+       case TARGET_DIF_TYPE3_PROT:
+               return IB_T10DIF_TYPE3;
+       default:
+               return IB_T10DIF_NONE;
+       }
+}
 
-       ib_sge->lkey = fr_desc->data_mr->lkey;
-       ib_sge->addr = fr_desc->data_frpl->page_list[0] + page_off;
-       ib_sge->length = data_len;
+static int
+isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
+{
+       enum ib_t10_dif_type ib_prot_type = se2ib_prot_type(se_cmd->prot_type);
+
+       sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF;
+       sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF;
+       sig_attrs->mem.sig.dif.pi_interval =
+                               se_cmd->se_dev->dev_attrib.block_size;
+       sig_attrs->wire.sig.dif.pi_interval =
+                               se_cmd->se_dev->dev_attrib.block_size;
+
+       switch (se_cmd->prot_op) {
+       case TARGET_PROT_DIN_INSERT:
+       case TARGET_PROT_DOUT_STRIP:
+               sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE;
+               sig_attrs->wire.sig.dif.type = ib_prot_type;
+               sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+               sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
+               break;
+       case TARGET_PROT_DOUT_INSERT:
+       case TARGET_PROT_DIN_STRIP:
+               sig_attrs->mem.sig.dif.type = ib_prot_type;
+               sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+               sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
+               sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE;
+               break;
+       case TARGET_PROT_DIN_PASS:
+       case TARGET_PROT_DOUT_PASS:
+               sig_attrs->mem.sig.dif.type = ib_prot_type;
+               sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC;
+               sig_attrs->mem.sig.dif.ref_tag = se_cmd->reftag_seed;
+               sig_attrs->wire.sig.dif.type = ib_prot_type;
+               sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
+               sig_attrs->wire.sig.dif.ref_tag = se_cmd->reftag_seed;
+               break;
+       default:
+               pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
+               return -EINVAL;
+       }
 
-       pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
-                ib_sge->addr, ib_sge->length, ib_sge->lkey);
+       return 0;
+}
+
+static inline u8
+isert_set_prot_checks(u8 prot_checks)
+{
+       return (prot_checks & TARGET_DIF_CHECK_GUARD  ? 0xc0 : 0) |
+              (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
+              (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
+}
+
+static int
+isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
+                struct fast_reg_descriptor *fr_desc,
+                struct ib_sge *data_sge, struct ib_sge *prot_sge,
+                struct ib_sge *sig_sge)
+{
+       struct ib_send_wr sig_wr, inv_wr;
+       struct ib_send_wr *bad_wr, *wr = NULL;
+       struct pi_context *pi_ctx = fr_desc->pi_ctx;
+       struct ib_sig_attrs sig_attrs;
+       int ret;
+       u32 key;
+
+       memset(&sig_attrs, 0, sizeof(sig_attrs));
+       ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
+       if (ret)
+               goto err;
+
+       sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
+
+       if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
+               memset(&inv_wr, 0, sizeof(inv_wr));
+               inv_wr.opcode = IB_WR_LOCAL_INV;
+               inv_wr.wr_id = ISER_FASTREG_LI_WRID;
+               inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+               wr = &inv_wr;
+               /* Bump the key */
+               key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
+               ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
+       }
+
+       memset(&sig_wr, 0, sizeof(sig_wr));
+       sig_wr.opcode = IB_WR_REG_SIG_MR;
+       sig_wr.wr_id = ISER_FASTREG_LI_WRID;
+       sig_wr.sg_list = data_sge;
+       sig_wr.num_sge = 1;
+       sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
+       sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
+       sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
+       if (se_cmd->t_prot_sg)
+               sig_wr.wr.sig_handover.prot = prot_sge;
+
+       if (!wr)
+               wr = &sig_wr;
+       else
+               wr->next = &sig_wr;
+
+       ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
+       if (ret) {
+               pr_err("fast registration failed, ret:%d\n", ret);
+               goto err;
+       }
+       fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
+
+       sig_sge->lkey = pi_ctx->sig_mr->lkey;
+       sig_sge->addr = 0;
+       sig_sge->length = se_cmd->data_length;
+       if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
+           se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
+               /*
+                * We have protection guards on the wire
+                * so we need to set a larget transfer
+                */
+               sig_sge->length += se_cmd->prot_length;
 
+       pr_debug("sig_sge: addr: 0x%llx  length: %u lkey: %x\n",
+                sig_sge->addr, sig_sge->length,
+                sig_sge->lkey);
+err:
        return ret;
 }
 
@@ -2305,62 +2742,82 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 {
        struct se_cmd *se_cmd = &cmd->se_cmd;
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
-       struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
-       struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+       struct isert_conn *isert_conn = conn->context;
+       struct ib_sge data_sge;
        struct ib_send_wr *send_wr;
-       struct ib_sge *ib_sge;
-       struct scatterlist *sg_start;
-       struct fast_reg_descriptor *fr_desc;
-       u32 sg_off = 0, sg_nents;
-       u32 offset = 0, data_len, data_left, rdma_write_max;
-       int ret = 0, count;
+       struct fast_reg_descriptor *fr_desc = NULL;
+       u32 offset;
+       int ret = 0;
        unsigned long flags;
 
-       if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
-               data_left = se_cmd->data_length;
-       } else {
-               offset = cmd->write_data_done;
-               sg_off = offset / PAGE_SIZE;
-               data_left = se_cmd->data_length - cmd->write_data_done;
-               isert_cmd->tx_desc.isert_cmd = isert_cmd;
-       }
+       isert_cmd->tx_desc.isert_cmd = isert_cmd;
 
-       sg_start = &cmd->se_cmd.t_data_sg[sg_off];
-       sg_nents = se_cmd->t_data_nents - sg_off;
+       offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
+       ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
+                                se_cmd->t_data_nents, se_cmd->data_length,
+                                offset, wr->iser_ib_op, &wr->data);
+       if (ret)
+               return ret;
 
-       count = ib_dma_map_sg(ib_dev, sg_start, sg_nents,
-                             (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-                             DMA_TO_DEVICE : DMA_FROM_DEVICE);
-       if (unlikely(!count)) {
-               pr_err("Cmd: %p unrable to map SGs\n", isert_cmd);
-               return -EINVAL;
+       if (wr->data.dma_nents != 1 ||
+           se_cmd->prot_op != TARGET_PROT_NORMAL) {
+               spin_lock_irqsave(&isert_conn->conn_lock, flags);
+               fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
+                                          struct fast_reg_descriptor, list);
+               list_del(&fr_desc->list);
+               spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
+               wr->fr_desc = fr_desc;
        }
-       wr->sge = sg_start;
-       wr->num_sge = sg_nents;
-       pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
-                isert_cmd, count, sg_start, sg_nents, data_left);
 
-       memset(&wr->s_ib_sge, 0, sizeof(*ib_sge));
-       ib_sge = &wr->s_ib_sge;
-       wr->ib_sge = ib_sge;
+       ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
+                               ISERT_DATA_KEY_VALID, &data_sge);
+       if (ret)
+               goto unmap_cmd;
+
+       if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
+               struct ib_sge prot_sge, sig_sge;
+
+               if (se_cmd->t_prot_sg) {
+                       ret = isert_map_data_buf(isert_conn, isert_cmd,
+                                                se_cmd->t_prot_sg,
+                                                se_cmd->t_prot_nents,
+                                                se_cmd->prot_length,
+                                                0, wr->iser_ib_op, &wr->prot);
+                       if (ret)
+                               goto unmap_cmd;
+
+                       ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
+                                               ISERT_PROT_KEY_VALID, &prot_sge);
+                       if (ret)
+                               goto unmap_prot_cmd;
+               }
+
+               ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
+                                      &data_sge, &prot_sge, &sig_sge);
+               if (ret)
+                       goto unmap_prot_cmd;
 
+               fr_desc->ind |= ISERT_PROTECTED;
+               memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
+       } else
+               memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
+
+       wr->ib_sge = &wr->s_ib_sge;
        wr->send_wr_num = 1;
        memset(&wr->s_send_wr, 0, sizeof(*send_wr));
        wr->send_wr = &wr->s_send_wr;
-
        wr->isert_cmd = isert_cmd;
-       rdma_write_max = ISCSI_ISER_SG_TABLESIZE * PAGE_SIZE;
 
        send_wr = &isert_cmd->rdma_wr.s_send_wr;
-       send_wr->sg_list = ib_sge;
+       send_wr->sg_list = &wr->s_ib_sge;
        send_wr->num_sge = 1;
        send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
        if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
                send_wr->opcode = IB_WR_RDMA_WRITE;
                send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
                send_wr->wr.rdma.rkey = isert_cmd->read_stag;
-               send_wr->send_flags = 0;
-               send_wr->next = &isert_cmd->tx_desc.send_wr;
+               send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
+                                     0 : IB_SEND_SIGNALED;
        } else {
                send_wr->opcode = IB_WR_RDMA_READ;
                send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
@@ -2368,37 +2825,18 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                send_wr->send_flags = IB_SEND_SIGNALED;
        }
 
-       data_len = min(data_left, rdma_write_max);
-       wr->cur_rdma_length = data_len;
-
-       /* if there is a single dma entry, dma mr is sufficient */
-       if (count == 1) {
-               ib_sge->addr = ib_sg_dma_address(ib_dev, &sg_start[0]);
-               ib_sge->length = ib_sg_dma_len(ib_dev, &sg_start[0]);
-               ib_sge->lkey = isert_conn->conn_mr->lkey;
-               wr->fr_desc = NULL;
-       } else {
+       return 0;
+unmap_prot_cmd:
+       if (se_cmd->t_prot_sg)
+               isert_unmap_data_buf(isert_conn, &wr->prot);
+unmap_cmd:
+       if (fr_desc) {
                spin_lock_irqsave(&isert_conn->conn_lock, flags);
-               fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
-                                          struct fast_reg_descriptor, list);
-               list_del(&fr_desc->list);
+               list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
                spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
-               wr->fr_desc = fr_desc;
-
-               ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
-                                       ib_sge, sg_nents, offset, data_len);
-               if (ret) {
-                       list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
-                       goto unmap_sg;
-               }
        }
+       isert_unmap_data_buf(isert_conn, &wr->data);
 
-       return 0;
-
-unmap_sg:
-       ib_dma_unmap_sg(ib_dev, sg_start, sg_nents,
-                       (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
-                       DMA_TO_DEVICE : DMA_FROM_DEVICE);
        return ret;
 }
 
@@ -2422,25 +2860,35 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
                return rc;
        }
 
-       /*
-        * Build isert_conn->tx_desc for iSCSI response PDU and attach
-        */
-       isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
-       iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
-                            &isert_cmd->tx_desc.iscsi_header);
-       isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
-       isert_init_send_wr(isert_conn, isert_cmd,
-                          &isert_cmd->tx_desc.send_wr, true);
+       if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
+               /*
+                * Build isert_conn->tx_desc for iSCSI response PDU and attach
+                */
+               isert_create_send_desc(isert_conn, isert_cmd,
+                                      &isert_cmd->tx_desc);
+               iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
+                                    &isert_cmd->tx_desc.iscsi_header);
+               isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+               isert_init_send_wr(isert_conn, isert_cmd,
+                                  &isert_cmd->tx_desc.send_wr, true);
+               isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
+               wr->send_wr_num += 1;
+       }
 
-       atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+       atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
 
        rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
        if (rc) {
                pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
-               atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
        }
-       pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
-                isert_cmd);
+
+       if (se_cmd->prot_op == TARGET_PROT_NORMAL)
+               pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
+                        "READ\n", isert_cmd);
+       else
+               pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
+                        isert_cmd);
 
        return 1;
 }
@@ -2815,6 +3263,8 @@ static struct iscsit_transport iser_target_transport = {
        .iscsit_get_dataout     = isert_get_dataout,
        .iscsit_queue_data_in   = isert_put_datain,
        .iscsit_queue_status    = isert_put_response,
+       .iscsit_aborted_task    = isert_aborted_task,
+       .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
 };
 
 static int __init isert_init(void)
index f6ae7f5dd4082768f3b6f0dfb36c1d45506b5eb6..4c072ae34c01a3021e57cb5378a3949b97d0876c 100644 (file)
@@ -50,11 +50,35 @@ struct iser_tx_desc {
        struct ib_send_wr send_wr;
 } __packed;
 
+enum isert_indicator {
+       ISERT_PROTECTED         = 1 << 0,
+       ISERT_DATA_KEY_VALID    = 1 << 1,
+       ISERT_PROT_KEY_VALID    = 1 << 2,
+       ISERT_SIG_KEY_VALID     = 1 << 3,
+};
+
+struct pi_context {
+       struct ib_mr                   *prot_mr;
+       struct ib_fast_reg_page_list   *prot_frpl;
+       struct ib_mr                   *sig_mr;
+};
+
 struct fast_reg_descriptor {
-       struct list_head        list;
-       struct ib_mr            *data_mr;
-       struct ib_fast_reg_page_list    *data_frpl;
-       bool                    valid;
+       struct list_head                list;
+       struct ib_mr                   *data_mr;
+       struct ib_fast_reg_page_list   *data_frpl;
+       u8                              ind;
+       struct pi_context              *pi_ctx;
+};
+
+struct isert_data_buf {
+       struct scatterlist     *sg;
+       int                     nents;
+       u32                     sg_off;
+       u32                     len; /* cur_rdma_length */
+       u32                     offset;
+       unsigned int            dma_nents;
+       enum dma_data_direction dma_dir;
 };
 
 struct isert_rdma_wr {
@@ -63,12 +87,11 @@ struct isert_rdma_wr {
        enum iser_ib_op_code    iser_ib_op;
        struct ib_sge           *ib_sge;
        struct ib_sge           s_ib_sge;
-       int                     num_sge;
-       struct scatterlist      *sge;
        int                     send_wr_num;
        struct ib_send_wr       *send_wr;
        struct ib_send_wr       s_send_wr;
-       u32                     cur_rdma_length;
+       struct isert_data_buf   data;
+       struct isert_data_buf   prot;
        struct fast_reg_descriptor *fr_desc;
 };
 
@@ -141,6 +164,7 @@ struct isert_cq_desc {
 
 struct isert_device {
        int                     use_fastreg;
+       bool                    pi_capable;
        int                     cqs_used;
        int                     refcount;
        int                     cq_active_qps[ISERT_MAX_CQ];
index 0e537d8d0e4774312d632f68f0cedd0aaa406b7d..fe09f2788b15e7e42b1c076f67e17b3b2ca2f268 100644 (file)
@@ -1078,6 +1078,7 @@ static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
                                 struct srpt_send_ioctx *ioctx)
 {
+       struct ib_device *dev = ch->sport->sdev->device;
        struct se_cmd *cmd;
        struct scatterlist *sg, *sg_orig;
        int sg_cnt;
@@ -1124,7 +1125,7 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
 
        db = ioctx->rbufs;
        tsize = cmd->data_length;
-       dma_len = sg_dma_len(&sg[0]);
+       dma_len = ib_sg_dma_len(dev, &sg[0]);
        riu = ioctx->rdma_ius;
 
        /*
@@ -1155,7 +1156,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
                                        ++j;
                                        if (j < count) {
                                                sg = sg_next(sg);
-                                               dma_len = sg_dma_len(sg);
+                                               dma_len = ib_sg_dma_len(
+                                                               dev, sg);
                                        }
                                }
                        } else {
@@ -1192,8 +1194,8 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
        tsize = cmd->data_length;
        riu = ioctx->rdma_ius;
        sg = sg_orig;
-       dma_len = sg_dma_len(&sg[0]);
-       dma_addr = sg_dma_address(&sg[0]);
+       dma_len = ib_sg_dma_len(dev, &sg[0]);
+       dma_addr = ib_sg_dma_address(dev, &sg[0]);
 
        /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
        for (i = 0, j = 0;
@@ -1216,8 +1218,10 @@ static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
                                        ++j;
                                        if (j < count) {
                                                sg = sg_next(sg);
-                                               dma_len = sg_dma_len(sg);
-                                               dma_addr = sg_dma_address(sg);
+                                               dma_len = ib_sg_dma_len(
+                                                               dev, sg);
+                                               dma_addr = ib_sg_dma_address(
+                                                               dev, sg);
                                        }
                                }
                        } else {
@@ -2580,7 +2584,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
                goto destroy_ib;
        }
 
-       ch->sess = transport_init_session();
+       ch->sess = transport_init_session(TARGET_PROT_NORMAL);
        if (IS_ERR(ch->sess)) {
                rej->reason = __constant_cpu_to_be32(
                                SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
@@ -3081,6 +3085,14 @@ static void srpt_queue_tm_rsp(struct se_cmd *cmd)
        srpt_queue_response(cmd);
 }
 
+static void srpt_aborted_task(struct se_cmd *cmd)
+{
+       struct srpt_send_ioctx *ioctx = container_of(cmd,
+                               struct srpt_send_ioctx, cmd);
+
+       srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+}
+
 static int srpt_queue_status(struct se_cmd *cmd)
 {
        struct srpt_send_ioctx *ioctx;
@@ -3928,6 +3940,7 @@ static struct target_core_fabric_ops srpt_template = {
        .queue_data_in                  = srpt_queue_data_in,
        .queue_status                   = srpt_queue_status,
        .queue_tm_rsp                   = srpt_queue_tm_rsp,
+       .aborted_task                   = srpt_aborted_task,
        /*
         * Setup function pointers for generic logic in
         * target_core_fabric_configfs.c
index 44c358ecf5a10ac4d681bbe0365d3b65784b884c..6de9dfbf61c197fe6078a633aabc47cd47805bcf 100644 (file)
@@ -416,7 +416,7 @@ config LEDS_MC13783
        depends on MFD_MC13XXX
        help
          This option enable support for on-chip LED drivers found
-         on Freescale Semiconductor MC13783/MC13892 PMIC.
+         on Freescale Semiconductor MC13783/MC13892/MC34708 PMIC.
 
 config LEDS_NS2
        tristate "LED support for Network Space v2 GPIO LEDs"
@@ -474,7 +474,7 @@ config LEDS_LM355x
 
 config LEDS_OT200
        tristate "LED support for the Bachmann OT200"
-       depends on LEDS_CLASS && HAS_IOMEM
+       depends on LEDS_CLASS && HAS_IOMEM && (X86_32 || COMPILE_TEST)
        help
          This option enables support for the LEDs on the Bachmann OT200.
          Say Y to enable LEDs on the Bachmann OT200.
index ce8921a753a32107b30915f8c2ae2b837310d33d..71b40d3bf77604e32829f391b6e804bbeefb0cf1 100644 (file)
@@ -39,9 +39,11 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
        led_cdev->blink_delay_on = delay_on;
        led_cdev->blink_delay_off = delay_off;
 
-       /* never on - don't blink */
-       if (!delay_on)
+       /* never on - just set to off */
+       if (!delay_on) {
+               __led_set_brightness(led_cdev, LED_OFF);
                return;
+       }
 
        /* never off - just set to brightness */
        if (!delay_off) {
index e387f41a9cb7667f3542f510b4a954d299e1d316..c3734f10fdd55143da14b4bbd9f296db5e7ca43f 100644 (file)
@@ -13,7 +13,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/device.h>
@@ -220,9 +219,12 @@ void led_trigger_unregister(struct led_trigger *trig)
 {
        struct led_classdev *led_cdev;
 
+       if (list_empty_careful(&trig->next_trig))
+               return;
+
        /* Remove from the list of led triggers */
        down_write(&triggers_list_lock);
-       list_del(&trig->next_trig);
+       list_del_init(&trig->next_trig);
        up_write(&triggers_list_lock);
 
        /* Remove anyone actively using this trigger */
index 5f588c0a376eb0eaec12843fea7022c65f9a1418..d1e1bca90d11e74599a54b89a358ebd0162c3934 100644 (file)
@@ -11,7 +11,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
index 7e311a120b11abe9f457550ffc85df2c4cbf1748..86b5bdb0c77303665f7bc705e5bd43dca39e7df4 100644 (file)
@@ -15,7 +15,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/workqueue.h>
index 6de216a89a0c1d3dc766f45f1e19466b80fe316f..70c74a7f0dfe13584466e384cba29e33b6c10462 100644 (file)
@@ -7,7 +7,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/slab.h>
index 66d0a57db2210b01ffe2fa0ceb9e5a8f449b6c74..d0452b099aee1512bd70ed152de1085227ad9c8e 100644 (file)
@@ -18,7 +18,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/jiffies.h>
 #include <linux/i2c.h>
@@ -444,7 +443,7 @@ static void led_work(struct work_struct *work)
 {
        int ret;
        struct blinkm_led *led;
-       struct blinkm_data *data ;
+       struct blinkm_data *data;
        struct blinkm_work *blm_work = work_to_blmwork(work);
 
        led = blm_work->blinkm_led;
index d93e2455da5c43af5fbdf59659cf9e61d5c9ffd1..f58a354428e3c1e25f0baa65b80398c95a1f9d75 100644 (file)
@@ -19,7 +19,7 @@ MODULE_AUTHOR("Márton Németh <nm127@freemail.hu>");
 MODULE_DESCRIPTION("Clevo mail LED driver");
 MODULE_LICENSE("GPL");
 
-static bool __initdata nodetect;
+static bool nodetect;
 module_param_named(nodetect, nodetect, bool, 0);
 MODULE_PARM_DESC(nodetect, "Skip DMI hardware detection");
 
@@ -153,7 +153,7 @@ static struct led_classdev clevo_mail_led = {
        .flags                  = LED_CORE_SUSPENDRESUME,
 };
 
-static int clevo_mail_led_probe(struct platform_device *pdev)
+static int __init clevo_mail_led_probe(struct platform_device *pdev)
 {
        return led_classdev_register(&pdev->dev, &clevo_mail_led);
 }
@@ -165,7 +165,6 @@ static int clevo_mail_led_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver clevo_mail_led_driver = {
-       .probe          = clevo_mail_led_probe,
        .remove         = clevo_mail_led_remove,
        .driver         = {
                .name           = KBUILD_MODNAME,
index 8abcb66db01c2c587f7e85c91e296e324947027e..910339d86edf237c00f7bb22c908cbcd13cb098b 100644 (file)
@@ -3,7 +3,6 @@
  *
  * Control the Cobalt Qube/RaQ front LED
  */
-#include <linux/init.h>
 #include <linux/io.h>
 #include <linux/ioport.h>
 #include <linux/leds.h>
index 2a4b87f8091ab699706577e1442efe68de645049..35dffb100388c10e7568202c9fc73cd4c8738be5 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/workqueue.h>
index 865d4faf874a627424611314eb454e8ca2ba9d6e..01486adc7f8ba969502a3ee3e13004da439d6c01 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/workqueue.h>
index b4d5a44cc41b11e1223c28e77a002753c8f5ba91..2b4dc738dcd657092176822fc2613349e8066246 100644 (file)
@@ -16,7 +16,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/module.h>
index 78b0e273a903eec5e64e8607e89418b82a8ff0da..57ff20fecf57e16e9963db1692206b923593258f 100644 (file)
@@ -11,7 +11,6 @@
  *
  */
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/gpio.h>
 #include <linux/leds.h>
@@ -204,6 +203,9 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
                                led.default_state = LEDS_GPIO_DEFSTATE_OFF;
                }
 
+               if (of_get_property(child, "retain-state-suspended", NULL))
+                       led.retain_state_suspended = 1;
+
                ret = create_gpio_led(&led, &priv->leds[priv->num_leds++],
                                      &pdev->dev, NULL);
                if (ret < 0) {
@@ -224,6 +226,8 @@ static const struct of_device_id of_gpio_leds_match[] = {
        { .compatible = "gpio-leds", },
        {},
 };
+
+MODULE_DEVICE_TABLE(of, of_gpio_leds_match);
 #else /* CONFIG_OF_GPIO */
 static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
 {
index 366b6055e33063e5461d5fdc2bdac3ba23fff611..d61a98896c71bdddd74924ebb040ac42edac590d 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <asm/hd64461.h>
index 027ede73b80da01d716aacd6dbd2d07e20b90092..e2c642c1169b93aef41014fc0a2100bb8a7f8057 100644 (file)
@@ -12,7 +12,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/mfd/core.h>
 #include <linux/mutex.h>
index 2ec34cfcedcee613aebd97bae8c06966b826f404..8ca197af2864841391edb94aaee13c85c60854ea 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
index 4ade66a2d9d4758b314c45ed52a222ff0cb0aeca..cb5ed82994baed92157bcebc27c27c6fb1d173a1 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
index bf006f4e44a05c971a429dfbf1ac984dc2f4076f..ca85724ab138b30997f3afc643dbeb6fe590d37b 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/init.h>
 #include <linux/leds.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
@@ -347,9 +346,9 @@ static void lp5562_write_program_memory(struct lp55xx_chip *chip,
 /* check the size of program count */
 static inline bool _is_pc_overflow(struct lp55xx_predef_pattern *ptn)
 {
-       return (ptn->size_r >= LP5562_PROGRAM_LENGTH ||
-               ptn->size_g >= LP5562_PROGRAM_LENGTH ||
-               ptn->size_b >= LP5562_PROGRAM_LENGTH);
+       return ptn->size_r >= LP5562_PROGRAM_LENGTH ||
+              ptn->size_g >= LP5562_PROGRAM_LENGTH ||
+              ptn->size_b >= LP5562_PROGRAM_LENGTH;
 }
 
 static int lp5562_run_predef_led_pattern(struct lp55xx_chip *chip, int mode)
index 3417e5be7b575fa8b44185a2f9fc9bfa9bd154a3..059f5b1f3553402e38eba247b64184564f827be6 100644 (file)
@@ -17,7 +17,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/workqueue.h>
index ca87a1b4a0db228896ca19dc410885eaa53eb7f3..f1db88e25138e7e993a91e1185715eff1219b4fe 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * LEDs driver for Freescale MC13783/MC13892
+ * LEDs driver for Freescale MC13783/MC13892/MC34708
  *
  * Copyright (C) 2010 Philippe Rétornaz
  *
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
+#include <linux/of.h>
 #include <linux/workqueue.h>
 #include <linux/mfd/mc13xxx.h>
 
-#define MC13XXX_REG_LED_CONTROL(x)     (51 + (x))
-
 struct mc13xxx_led_devtype {
        int     led_min;
        int     led_max;
        int     num_regs;
+       u32     ledctrl_base;
 };
 
 struct mc13xxx_led {
        struct led_classdev     cdev;
        struct work_struct      work;
-       struct mc13xxx          *master;
        enum led_brightness     new_brightness;
        int                     id;
+       struct mc13xxx_leds     *leds;
 };
 
 struct mc13xxx_leds {
+       struct mc13xxx                  *master;
        struct mc13xxx_led_devtype      *devtype;
        int                             num_leds;
-       struct mc13xxx_led              led[0];
+       struct mc13xxx_led              *led;
 };
 
+static unsigned int mc13xxx_max_brightness(int id)
+{
+       if (id >= MC13783_LED_MD && id <= MC13783_LED_KP)
+               return 0x0f;
+       else if (id >= MC13783_LED_R1 && id <= MC13783_LED_B3)
+               return 0x1f;
+
+       return 0x3f;
+}
+
 static void mc13xxx_led_work(struct work_struct *work)
 {
        struct mc13xxx_led *led = container_of(work, struct mc13xxx_led, work);
-       int reg, mask, value, bank, off, shift;
+       struct mc13xxx_leds *leds = led->leds;
+       unsigned int reg, bank, off, shift;
 
        switch (led->id) {
        case MC13783_LED_MD:
-               reg = MC13XXX_REG_LED_CONTROL(2);
-               shift = 9;
-               mask = 0x0f;
-               value = led->new_brightness >> 4;
-               break;
        case MC13783_LED_AD:
-               reg = MC13XXX_REG_LED_CONTROL(2);
-               shift = 13;
-               mask = 0x0f;
-               value = led->new_brightness >> 4;
-               break;
        case MC13783_LED_KP:
-               reg = MC13XXX_REG_LED_CONTROL(2);
-               shift = 17;
-               mask = 0x0f;
-               value = led->new_brightness >> 4;
+               reg = 2;
+               shift = 9 + (led->id - MC13783_LED_MD) * 4;
                break;
        case MC13783_LED_R1:
        case MC13783_LED_G1:
@@ -80,44 +79,35 @@ static void mc13xxx_led_work(struct work_struct *work)
        case MC13783_LED_B3:
                off = led->id - MC13783_LED_R1;
                bank = off / 3;
-               reg = MC13XXX_REG_LED_CONTROL(3) + bank;
+               reg = 3 + bank;
                shift = (off - bank * 3) * 5 + 6;
-               value = led->new_brightness >> 3;
-               mask = 0x1f;
                break;
        case MC13892_LED_MD:
-               reg = MC13XXX_REG_LED_CONTROL(0);
-               shift = 3;
-               mask = 0x3f;
-               value = led->new_brightness >> 2;
-               break;
        case MC13892_LED_AD:
-               reg = MC13XXX_REG_LED_CONTROL(0);
-               shift = 15;
-               mask = 0x3f;
-               value = led->new_brightness >> 2;
-               break;
        case MC13892_LED_KP:
-               reg = MC13XXX_REG_LED_CONTROL(1);
-               shift = 3;
-               mask = 0x3f;
-               value = led->new_brightness >> 2;
+               reg = (led->id - MC13892_LED_MD) / 2;
+               shift = 3 + (led->id - MC13892_LED_MD) * 12;
                break;
        case MC13892_LED_R:
        case MC13892_LED_G:
        case MC13892_LED_B:
                off = led->id - MC13892_LED_R;
                bank = off / 2;
-               reg = MC13XXX_REG_LED_CONTROL(2) + bank;
+               reg = 2 + bank;
                shift = (off - bank * 2) * 12 + 3;
-               value = led->new_brightness >> 2;
-               mask = 0x3f;
+               break;
+       case MC34708_LED_R:
+       case MC34708_LED_G:
+               reg = 0;
+               shift = 3 + (led->id - MC34708_LED_R) * 12;
                break;
        default:
                BUG();
        }
 
-       mc13xxx_reg_rmw(led->master, reg, mask << shift, value << shift);
+       mc13xxx_reg_rmw(leds->master, leds->devtype->ledctrl_base + reg,
+                       mc13xxx_max_brightness(led->id) << shift,
+                       led->new_brightness << shift);
 }
 
 static void mc13xxx_led_set(struct led_classdev *led_cdev,
@@ -130,47 +120,121 @@ static void mc13xxx_led_set(struct led_classdev *led_cdev,
        schedule_work(&led->work);
 }
 
-static int __init mc13xxx_led_probe(struct platform_device *pdev)
+#ifdef CONFIG_OF
+static struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
+       struct platform_device *pdev)
 {
-       struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
-       struct mc13xxx *mcdev = dev_get_drvdata(pdev->dev.parent);
-       struct mc13xxx_led_devtype *devtype =
-               (struct mc13xxx_led_devtype *)pdev->id_entry->driver_data;
-       struct mc13xxx_leds *leds;
-       int i, id, num_leds, ret = -ENODATA;
-       u32 reg, init_led = 0;
+       struct mc13xxx_leds *leds = platform_get_drvdata(pdev);
+       struct mc13xxx_leds_platform_data *pdata;
+       struct device_node *parent, *child;
+       struct device *dev = &pdev->dev;
+       int i = 0, ret = -ENODATA;
+
+       pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return ERR_PTR(-ENOMEM);
+
+       of_node_get(dev->parent->of_node);
+
+       parent = of_find_node_by_name(dev->parent->of_node, "leds");
+       if (!parent)
+               goto out_node_put;
 
-       if (!pdata) {
-               dev_err(&pdev->dev, "Missing platform data\n");
-               return -ENODEV;
+       ret = of_property_read_u32_array(parent, "led-control",
+                                        pdata->led_control,
+                                        leds->devtype->num_regs);
+       if (ret)
+               goto out_node_put;
+
+       pdata->num_leds = of_get_child_count(parent);
+
+       pdata->led = devm_kzalloc(dev, pdata->num_leds * sizeof(*pdata->led),
+                                 GFP_KERNEL);
+       if (!pdata->led) {
+               ret = -ENOMEM;
+               goto out_node_put;
        }
 
-       num_leds = pdata->num_leds;
+       for_each_child_of_node(parent, child) {
+               const char *str;
+               u32 tmp;
 
-       if ((num_leds < 1) ||
-           (num_leds > (devtype->led_max - devtype->led_min + 1))) {
-               dev_err(&pdev->dev, "Invalid LED count %d\n", num_leds);
-               return -EINVAL;
+               if (of_property_read_u32(child, "reg", &tmp))
+                       continue;
+               pdata->led[i].id = leds->devtype->led_min + tmp;
+
+               if (!of_property_read_string(child, "label", &str))
+                       pdata->led[i].name = str;
+               if (!of_property_read_string(child, "linux,default-trigger",
+                                            &str))
+                       pdata->led[i].default_trigger = str;
+
+               i++;
        }
 
-       leds = devm_kzalloc(&pdev->dev, num_leds * sizeof(struct mc13xxx_led) +
-                           sizeof(struct mc13xxx_leds), GFP_KERNEL);
+       pdata->num_leds = i;
+       ret = i > 0 ? 0 : -ENODATA;
+
+out_node_put:
+       of_node_put(parent);
+
+       return ret ? ERR_PTR(ret) : pdata;
+}
+#else
+static inline struct mc13xxx_leds_platform_data __init *mc13xxx_led_probe_dt(
+       struct platform_device *pdev)
+{
+       return ERR_PTR(-ENOSYS);
+}
+#endif
+
+static int __init mc13xxx_led_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mc13xxx_leds_platform_data *pdata = dev_get_platdata(dev);
+       struct mc13xxx *mcdev = dev_get_drvdata(dev->parent);
+       struct mc13xxx_led_devtype *devtype =
+               (struct mc13xxx_led_devtype *)pdev->id_entry->driver_data;
+       struct mc13xxx_leds *leds;
+       int i, id, ret = -ENODATA;
+       u32 init_led = 0;
+
+       leds = devm_kzalloc(dev, sizeof(*leds), GFP_KERNEL);
        if (!leds)
                return -ENOMEM;
 
        leds->devtype = devtype;
-       leds->num_leds = num_leds;
+       leds->master = mcdev;
        platform_set_drvdata(pdev, leds);
 
+       if (dev->parent->of_node) {
+               pdata = mc13xxx_led_probe_dt(pdev);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+       } else if (!pdata)
+               return -ENODATA;
+
+       leds->num_leds = pdata->num_leds;
+
+       if ((leds->num_leds < 1) ||
+           (leds->num_leds > (devtype->led_max - devtype->led_min + 1))) {
+               dev_err(dev, "Invalid LED count %d\n", leds->num_leds);
+               return -EINVAL;
+       }
+
+       leds->led = devm_kzalloc(dev, leds->num_leds * sizeof(*leds->led),
+                                GFP_KERNEL);
+       if (!leds->led)
+               return -ENOMEM;
+
        for (i = 0; i < devtype->num_regs; i++) {
-               reg = pdata->led_control[i];
-               WARN_ON(reg >= (1 << 24));
-               ret = mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), reg);
+               ret = mc13xxx_reg_write(mcdev, leds->devtype->ledctrl_base + i,
+                                       pdata->led_control[i]);
                if (ret)
                        return ret;
        }
 
-       for (i = 0; i < num_leds; i++) {
+       for (i = 0; i < leds->num_leds; i++) {
                const char *name, *trig;
 
                ret = -EINVAL;
@@ -180,30 +244,29 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
                trig = pdata->led[i].default_trigger;
 
                if ((id > devtype->led_max) || (id < devtype->led_min)) {
-                       dev_err(&pdev->dev, "Invalid ID %i\n", id);
+                       dev_err(dev, "Invalid ID %i\n", id);
                        break;
                }
 
                if (init_led & (1 << id)) {
-                       dev_warn(&pdev->dev,
-                                "LED %i already initialized\n", id);
+                       dev_warn(dev, "LED %i already initialized\n", id);
                        break;
                }
 
                init_led |= 1 << id;
                leds->led[i].id = id;
-               leds->led[i].master = mcdev;
+               leds->led[i].leds = leds;
                leds->led[i].cdev.name = name;
                leds->led[i].cdev.default_trigger = trig;
+               leds->led[i].cdev.flags = LED_CORE_SUSPENDRESUME;
                leds->led[i].cdev.brightness_set = mc13xxx_led_set;
-               leds->led[i].cdev.brightness = LED_OFF;
+               leds->led[i].cdev.max_brightness = mc13xxx_max_brightness(id);
 
                INIT_WORK(&leds->led[i].work, mc13xxx_led_work);
 
-               ret = led_classdev_register(pdev->dev.parent,
-                                           &leds->led[i].cdev);
+               ret = led_classdev_register(dev->parent, &leds->led[i].cdev);
                if (ret) {
-                       dev_err(&pdev->dev, "Failed to register LED %i\n", id);
+                       dev_err(dev, "Failed to register LED %i\n", id);
                        break;
                }
        }
@@ -219,7 +282,6 @@ static int __init mc13xxx_led_probe(struct platform_device *pdev)
 
 static int mc13xxx_led_remove(struct platform_device *pdev)
 {
-       struct mc13xxx *mcdev = dev_get_drvdata(pdev->dev.parent);
        struct mc13xxx_leds *leds = platform_get_drvdata(pdev);
        int i;
 
@@ -228,9 +290,6 @@ static int mc13xxx_led_remove(struct platform_device *pdev)
                cancel_work_sync(&leds->led[i].work);
        }
 
-       for (i = 0; i < leds->devtype->num_regs; i++)
-               mc13xxx_reg_write(mcdev, MC13XXX_REG_LED_CONTROL(i), 0);
-
        return 0;
 }
 
@@ -238,17 +297,27 @@ static const struct mc13xxx_led_devtype mc13783_led_devtype = {
        .led_min        = MC13783_LED_MD,
        .led_max        = MC13783_LED_B3,
        .num_regs       = 6,
+       .ledctrl_base   = 51,
 };
 
 static const struct mc13xxx_led_devtype mc13892_led_devtype = {
        .led_min        = MC13892_LED_MD,
        .led_max        = MC13892_LED_B,
        .num_regs       = 4,
+       .ledctrl_base   = 51,
+};
+
+static const struct mc13xxx_led_devtype mc34708_led_devtype = {
+       .led_min        = MC34708_LED_R,
+       .led_max        = MC34708_LED_G,
+       .num_regs       = 1,
+       .ledctrl_base   = 54,
 };
 
 static const struct platform_device_id mc13xxx_led_id_table[] = {
        { "mc13783-led", (kernel_ulong_t)&mc13783_led_devtype, },
        { "mc13892-led", (kernel_ulong_t)&mc13892_led_devtype, },
+       { "mc34708-led", (kernel_ulong_t)&mc34708_led_devtype, },
        { }
 };
 MODULE_DEVICE_TABLE(platform, mc13xxx_led_id_table);
index 2f9f141084baa2c2743105fa06c30170b84c89ef..e97f443a6e07720f9652a1fae1dc0e25e8f36aa4 100644 (file)
@@ -21,7 +21,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
index c7a4230233ea991e660dabf6f55ceedff9c5c381..efa625883c836359e42638285cdbc2d52cdb7220 100644 (file)
@@ -23,7 +23,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/gpio.h>
index 98cae529373f44aa202a1a5c0f9d2f2259ab0e98..c9d906098466becfbbbb7d1fb6c90e7996d790fd 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/leds.h>
index 605047428b5ad75e2d67add0edaecb912d35581a..7d0aaed1e23a881ab28997b09df13e372d538c36 100644 (file)
@@ -14,7 +14,6 @@
 
 #include <linux/module.h>
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/of_platform.h>
 #include <linux/fb.h>
@@ -84,6 +83,15 @@ static inline size_t sizeof_pwm_leds_priv(int num_leds)
                      (sizeof(struct led_pwm_data) * num_leds);
 }
 
+static void led_pwm_cleanup(struct led_pwm_priv *priv)
+{
+       while (priv->num_leds--) {
+               led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
+               if (priv->leds[priv->num_leds].can_sleep)
+                       cancel_work_sync(&priv->leds[priv->num_leds].work);
+       }
+}
+
 static int led_pwm_create_of(struct platform_device *pdev,
                             struct led_pwm_priv *priv)
 {
@@ -131,8 +139,7 @@ static int led_pwm_create_of(struct platform_device *pdev,
 
        return 0;
 err:
-       while (priv->num_leds--)
-               led_classdev_unregister(&priv->leds[priv->num_leds].cdev);
+       led_pwm_cleanup(priv);
 
        return ret;
 }
@@ -200,8 +207,8 @@ static int led_pwm_probe(struct platform_device *pdev)
        return 0;
 
 err:
-       while (i--)
-               led_classdev_unregister(&priv->leds[i].cdev);
+       priv->num_leds = i;
+       led_pwm_cleanup(priv);
 
        return ret;
 }
@@ -209,13 +216,8 @@ err:
 static int led_pwm_remove(struct platform_device *pdev)
 {
        struct led_pwm_priv *priv = platform_get_drvdata(pdev);
-       int i;
 
-       for (i = 0; i < priv->num_leds; i++) {
-               led_classdev_unregister(&priv->leds[i].cdev);
-               if (priv->leds[i].can_sleep)
-                       cancel_work_sync(&priv->leds[i].work);
-       }
+       led_pwm_cleanup(priv);
 
        return 0;
 }
index 98174e7240ee9f41b8a4d0cddae494ebff99da65..28988b7b4faba371970027eb6459105bb3ea712d 100644 (file)
@@ -12,7 +12,6 @@
 */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/gpio.h>
index 5b8f938a8d734995e43eda073c3db077d2875183..2eb3ef62962b6fc510bef812dd3bc83fdecfd1ff 100644 (file)
@@ -63,7 +63,7 @@ MODULE_LICENSE("GPL");
 /*
  * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
  */
-static DEFINE_PCI_DEVICE_TABLE(ich7_lpc_pci_id) = {
+static const struct pci_device_id ich7_lpc_pci_id[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_30) },
@@ -78,7 +78,7 @@ static int __init ss4200_led_dmi_callback(const struct dmi_system_id *id)
        return 1;
 }
 
-static bool __initdata nodetect;
+static bool nodetect;
 module_param_named(nodetect, nodetect, bool, 0);
 MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
 
index 0a1a13f3a6a51b1037bc7f61899d645c5aa8b115..e72c974142d00e6cb1962f6c6444e6d55de8c3b1 100644 (file)
@@ -10,7 +10,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/leds.h>
index 3f75fd22fd495d0db8bf552061355be43315a8e8..4133ffe2901529838faa608047f2593553822514 100644 (file)
@@ -10,7 +10,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/err.h>
index 118335eccc563d6944c4dd5a99409e744b5f6018..1c3ee9fcaf34c5a1cb30e0efc1bf8c8109f7dd5b 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/percpu.h>
 #include <linux/syscore_ops.h>
 #include <linux/rwsem.h>
+#include <linux/cpu.h>
 #include "../leds.h"
 
 #define MAX_NAME_LEN   8
@@ -92,6 +93,26 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = {
        .resume         = ledtrig_cpu_syscore_resume,
 };
 
+static int ledtrig_cpu_notify(struct notifier_block *self,
+                                          unsigned long action, void *hcpu)
+{
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_STARTING:
+               ledtrig_cpu(CPU_LED_START);
+               break;
+       case CPU_DYING:
+               ledtrig_cpu(CPU_LED_STOP);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+
+static struct notifier_block ledtrig_cpu_nb = {
+       .notifier_call = ledtrig_cpu_notify,
+};
+
 static int __init ledtrig_cpu_init(void)
 {
        int cpu;
@@ -113,6 +134,7 @@ static int __init ledtrig_cpu_init(void)
        }
 
        register_syscore_ops(&ledtrig_cpu_syscore_ops);
+       register_cpu_notifier(&ledtrig_cpu_nb);
 
        pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
 
@@ -124,6 +146,8 @@ static void __exit ledtrig_cpu_exit(void)
 {
        int cpu;
 
+       unregister_cpu_notifier(&ledtrig_cpu_nb);
+
        for_each_possible_cpu(cpu) {
                struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu);
 
index 4195a01b15359bcf44950b2904c723194c4d048b..9a8e66ae04f51e95c169ea9a38f48b0d83f6ed03 100644 (file)
@@ -1988,7 +1988,6 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
                if (mddev->bitmap_info.file) {
                        struct file *f = mddev->bitmap_info.file;
                        mddev->bitmap_info.file = NULL;
-                       restore_bitmap_write_access(f);
                        fput(f);
                }
        } else {
index 4ad5cc4e63e8438ca3c32fea1f40f69ec71657fb..8fda38d23e3847aa4d96ecd147e996514a5a4af7 100644 (file)
@@ -5181,32 +5181,6 @@ static int restart_array(struct mddev *mddev)
        return 0;
 }
 
-/* similar to deny_write_access, but accounts for our holding a reference
- * to the file ourselves */
-static int deny_bitmap_write_access(struct file * file)
-{
-       struct inode *inode = file->f_mapping->host;
-
-       spin_lock(&inode->i_lock);
-       if (atomic_read(&inode->i_writecount) > 1) {
-               spin_unlock(&inode->i_lock);
-               return -ETXTBSY;
-       }
-       atomic_set(&inode->i_writecount, -1);
-       spin_unlock(&inode->i_lock);
-
-       return 0;
-}
-
-void restore_bitmap_write_access(struct file *file)
-{
-       struct inode *inode = file->f_mapping->host;
-
-       spin_lock(&inode->i_lock);
-       atomic_set(&inode->i_writecount, 1);
-       spin_unlock(&inode->i_lock);
-}
-
 static void md_clean(struct mddev *mddev)
 {
        mddev->array_sectors = 0;
@@ -5427,7 +5401,6 @@ static int do_md_stop(struct mddev * mddev, int mode,
 
                bitmap_destroy(mddev);
                if (mddev->bitmap_info.file) {
-                       restore_bitmap_write_access(mddev->bitmap_info.file);
                        fput(mddev->bitmap_info.file);
                        mddev->bitmap_info.file = NULL;
                }
@@ -5979,7 +5952,7 @@ abort_export:
 
 static int set_bitmap_file(struct mddev *mddev, int fd)
 {
-       int err;
+       int err = 0;
 
        if (mddev->pers) {
                if (!mddev->pers->quiesce)
@@ -5991,6 +5964,7 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
 
 
        if (fd >= 0) {
+               struct inode *inode;
                if (mddev->bitmap)
                        return -EEXIST; /* cannot add when bitmap is present */
                mddev->bitmap_info.file = fget(fd);
@@ -6001,10 +5975,21 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
                        return -EBADF;
                }
 
-               err = deny_bitmap_write_access(mddev->bitmap_info.file);
-               if (err) {
+               inode = mddev->bitmap_info.file->f_mapping->host;
+               if (!S_ISREG(inode->i_mode)) {
+                       printk(KERN_ERR "%s: error: bitmap file must be a regular file\n",
+                              mdname(mddev));
+                       err = -EBADF;
+               } else if (!(mddev->bitmap_info.file->f_mode & FMODE_WRITE)) {
+                       printk(KERN_ERR "%s: error: bitmap file must open for write\n",
+                              mdname(mddev));
+                       err = -EBADF;
+               } else if (atomic_read(&inode->i_writecount) != 1) {
                        printk(KERN_ERR "%s: error: bitmap file is already in use\n",
                               mdname(mddev));
+                       err = -EBUSY;
+               }
+               if (err) {
                        fput(mddev->bitmap_info.file);
                        mddev->bitmap_info.file = NULL;
                        return err;
@@ -6027,10 +6012,8 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
                mddev->pers->quiesce(mddev, 0);
        }
        if (fd < 0) {
-               if (mddev->bitmap_info.file) {
-                       restore_bitmap_write_access(mddev->bitmap_info.file);
+               if (mddev->bitmap_info.file)
                        fput(mddev->bitmap_info.file);
-               }
                mddev->bitmap_info.file = NULL;
        }
 
@@ -7182,11 +7165,14 @@ static int md_seq_open(struct inode *inode, struct file *file)
        return error;
 }
 
+static int md_unloading;
 static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
 {
        struct seq_file *seq = filp->private_data;
        int mask;
 
+       if (md_unloading)
+               return POLLIN|POLLRDNORM|POLLERR|POLLPRI;;
        poll_wait(filp, &md_event_waiters, wait);
 
        /* always allow read */
@@ -8672,6 +8658,7 @@ static __exit void md_exit(void)
 {
        struct mddev *mddev;
        struct list_head *tmp;
+       int delay = 1;
 
        blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS);
        blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
@@ -8680,7 +8667,19 @@ static __exit void md_exit(void)
        unregister_blkdev(mdp_major, "mdp");
        unregister_reboot_notifier(&md_notifier);
        unregister_sysctl_table(raid_table_header);
+
+       /* We cannot unload the modules while some process is
+        * waiting for us in select() or poll() - wake them up
+        */
+       md_unloading = 1;
+       while (waitqueue_active(&md_event_waiters)) {
+               /* not safe to leave yet */
+               wake_up(&md_event_waiters);
+               msleep(delay);
+               delay += delay;
+       }
        remove_proc_entry("mdstat", NULL);
+
        for_each_mddev(mddev, tmp) {
                export_array(mddev);
                mddev->hold_active = 0;
index 07bba96de26047d3f4b868eefc78d3727f0be52d..a49d991f3fe11d4d9fa1622096c1ca2735c0572d 100644 (file)
@@ -605,7 +605,6 @@ extern int md_check_no_bitmap(struct mddev *mddev);
 extern int md_integrity_register(struct mddev *mddev);
 extern void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev);
 extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
-extern void restore_bitmap_write_access(struct file *file);
 
 extern void mddev_init(struct mddev *mddev);
 extern int md_run(struct mddev *mddev);
index 4a6ca1cb2e78539679b96a00b89542f6f0eab8f0..56e24c072b629324ec382037ab17fd43310ec3ef 100644 (file)
@@ -97,6 +97,7 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
        struct pool_info *pi = data;
        struct r1bio *r1_bio;
        struct bio *bio;
+       int need_pages;
        int i, j;
 
        r1_bio = r1bio_pool_alloc(gfp_flags, pi);
@@ -119,15 +120,15 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
         * RESYNC_PAGES for each bio.
         */
        if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
-               j = pi->raid_disks;
+               need_pages = pi->raid_disks;
        else
-               j = 1;
-       while(j--) {
+               need_pages = 1;
+       for (j = 0; j < need_pages; j++) {
                bio = r1_bio->bios[j];
                bio->bi_vcnt = RESYNC_PAGES;
 
                if (bio_alloc_pages(bio, gfp_flags))
-                       goto out_free_bio;
+                       goto out_free_pages;
        }
        /* If not user-requests, copy the page pointers to all bios */
        if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
@@ -141,6 +142,14 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
 
        return r1_bio;
 
+out_free_pages:
+       while (--j >= 0) {
+               struct bio_vec *bv;
+
+               bio_for_each_segment_all(bv, r1_bio->bios[j], i)
+                       __free_page(bv->bv_page);
+       }
+
 out_free_bio:
        while (++j < pi->raid_disks)
                bio_put(r1_bio->bios[j]);
index 16f5c21963db5391ed25fd1e185ab8399f353e74..25247a8529124f7cc93eda0db7033655d52f2ef7 100644 (file)
@@ -679,14 +679,9 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
                                init_stripe(sh, sector, previous);
                                atomic_inc(&sh->count);
                        }
-               } else {
+               } else if (!atomic_inc_not_zero(&sh->count)) {
                        spin_lock(&conf->device_lock);
-                       if (atomic_read(&sh->count)) {
-                               BUG_ON(!list_empty(&sh->lru)
-                                   && !test_bit(STRIPE_EXPANDING, &sh->state)
-                                   && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)
-                                       );
-                       } else {
+                       if (!atomic_read(&sh->count)) {
                                if (!test_bit(STRIPE_HANDLE, &sh->state))
                                        atomic_inc(&conf->active_stripes);
                                BUG_ON(list_empty(&sh->lru) &&
@@ -4552,6 +4547,8 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        struct stripe_head *sh;
        const int rw = bio_data_dir(bi);
        int remaining;
+       DEFINE_WAIT(w);
+       bool do_prepare;
 
        if (unlikely(bi->bi_rw & REQ_FLUSH)) {
                md_flush_request(mddev, bi);
@@ -4575,15 +4572,18 @@ static void make_request(struct mddev *mddev, struct bio * bi)
        bi->bi_next = NULL;
        bi->bi_phys_segments = 1;       /* over-loaded to count active stripes */
 
+       prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
        for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) {
-               DEFINE_WAIT(w);
                int previous;
                int seq;
 
+               do_prepare = false;
        retry:
                seq = read_seqcount_begin(&conf->gen_lock);
                previous = 0;
-               prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
+               if (do_prepare)
+                       prepare_to_wait(&conf->wait_for_overlap, &w,
+                               TASK_UNINTERRUPTIBLE);
                if (unlikely(conf->reshape_progress != MaxSector)) {
                        /* spinlock is needed as reshape_progress may be
                         * 64bit on a 32bit platform, and so it might be
@@ -4604,6 +4604,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                                    : logical_sector >= conf->reshape_safe) {
                                        spin_unlock_irq(&conf->device_lock);
                                        schedule();
+                                       do_prepare = true;
                                        goto retry;
                                }
                        }
@@ -4640,6 +4641,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                                if (must_retry) {
                                        release_stripe(sh);
                                        schedule();
+                                       do_prepare = true;
                                        goto retry;
                                }
                        }
@@ -4663,8 +4665,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                                prepare_to_wait(&conf->wait_for_overlap,
                                                &w, TASK_INTERRUPTIBLE);
                                if (logical_sector >= mddev->suspend_lo &&
-                                   logical_sector < mddev->suspend_hi)
+                                   logical_sector < mddev->suspend_hi) {
                                        schedule();
+                                       do_prepare = true;
+                               }
                                goto retry;
                        }
 
@@ -4677,9 +4681,9 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                                md_wakeup_thread(mddev->thread);
                                release_stripe(sh);
                                schedule();
+                               do_prepare = true;
                                goto retry;
                        }
-                       finish_wait(&conf->wait_for_overlap, &w);
                        set_bit(STRIPE_HANDLE, &sh->state);
                        clear_bit(STRIPE_DELAYED, &sh->state);
                        if ((bi->bi_rw & REQ_SYNC) &&
@@ -4689,10 +4693,10 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                } else {
                        /* cannot get stripe for read-ahead, just give-up */
                        clear_bit(BIO_UPTODATE, &bi->bi_flags);
-                       finish_wait(&conf->wait_for_overlap, &w);
                        break;
                }
        }
+       finish_wait(&conf->wait_for_overlap, &w);
 
        remaining = raid5_dec_bi_active_stripes(bi);
        if (remaining == 0) {
index 15628eb5cf0c04bc561b923071523960b719de11..6c2ccb6a506bca2521e45778fc6d8d7606abc62b 100644 (file)
@@ -1,7 +1,7 @@
 config DVB_DRX39XYJ
        tristate "Micronas DRX-J demodulator"
        depends on DVB_CORE && I2C
-       default m if DVB_FE_CUSTOMISE
+       default m if !MEDIA_SUBDRV_AUTOSELECT
        help
          An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
          to support this frontend.
index 1d2c47378cf856b392534dd2b4599de7d3ac344f..92c891a571abf484e7d931c45e0522af1a0e337d 100644 (file)
@@ -1176,6 +1176,7 @@ static struct dvb_frontend_ops lgdt3304_ops = {
        },
        .i2c_gate_ctrl        = lgdt3305_i2c_gate_ctrl,
        .init                 = lgdt3305_init,
+       .sleep                = lgdt3305_sleep,
        .set_frontend         = lgdt3304_set_parameters,
        .get_frontend         = lgdt3305_get_frontend,
        .get_tune_settings    = lgdt3305_get_tune_settings,
index 32cffca14d0b6ada3e96cdfb8a561c8f0ad1c4a4..d63bc9c13dce80a3e657d17b3eb9e9d15ec73086 100644 (file)
@@ -297,7 +297,7 @@ struct inittab {
        u8 val;
 };
 
-struct inittab m88rs2000_setup[] = {
+static struct inittab m88rs2000_setup[] = {
        {DEMOD_WRITE, 0x9a, 0x30},
        {DEMOD_WRITE, 0x00, 0x01},
        {WRITE_DELAY, 0x19, 0x00},
@@ -315,7 +315,7 @@ struct inittab m88rs2000_setup[] = {
        {0xff, 0xaa, 0xff}
 };
 
-struct inittab m88rs2000_shutdown[] = {
+static struct inittab m88rs2000_shutdown[] = {
        {DEMOD_WRITE, 0x9a, 0x30},
        {DEMOD_WRITE, 0xb0, 0x00},
        {DEMOD_WRITE, 0xf1, 0x89},
@@ -325,7 +325,7 @@ struct inittab m88rs2000_shutdown[] = {
        {0xff, 0xaa, 0xff}
 };
 
-struct inittab fe_reset[] = {
+static struct inittab fe_reset[] = {
        {DEMOD_WRITE, 0x00, 0x01},
        {DEMOD_WRITE, 0x20, 0x81},
        {DEMOD_WRITE, 0x21, 0x80},
@@ -363,7 +363,7 @@ struct inittab fe_reset[] = {
        {0xff, 0xaa, 0xff}
 };
 
-struct inittab fe_trigger[] = {
+static struct inittab fe_trigger[] = {
        {DEMOD_WRITE, 0x97, 0x04},
        {DEMOD_WRITE, 0x99, 0x77},
        {DEMOD_WRITE, 0x9b, 0x64},
index 7a77a5b7a0754edcdb04eb59aa7e15345e10c2b3..5c421886d97c86d6518fa142f73c1c2ddb6b83b5 100644 (file)
@@ -49,8 +49,8 @@
 #define VPE_MODULE_NAME "vpe"
 
 /* minimum and maximum frame sizes */
-#define MIN_W          128
-#define MIN_H          128
+#define MIN_W          32
+#define MIN_H          32
 #define MAX_W          1920
 #define MAX_H          1080
 
@@ -887,6 +887,9 @@ static int job_ready(void *priv)
        if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < needed)
                return 0;
 
+       if (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < needed)
+               return 0;
+
        return 1;
 }
 
@@ -1277,18 +1280,17 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
        s_buf = &s_vb->v4l2_buf;
        d_buf = &d_vb->v4l2_buf;
 
+       d_buf->flags = s_buf->flags;
+
        d_buf->timestamp = s_buf->timestamp;
-       d_buf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       d_buf->flags |= s_buf->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
-       if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE) {
-               d_buf->flags |= V4L2_BUF_FLAG_TIMECODE;
+       if (s_buf->flags & V4L2_BUF_FLAG_TIMECODE)
                d_buf->timecode = s_buf->timecode;
-       }
+
        d_buf->sequence = ctx->sequence;
-       d_buf->field = ctx->field;
 
        d_q_data = &ctx->q_data[Q_DATA_DST];
        if (d_q_data->flags & Q_DATA_INTERLACED) {
+               d_buf->field = ctx->field;
                if (ctx->field == V4L2_FIELD_BOTTOM) {
                        ctx->sequence++;
                        ctx->field = V4L2_FIELD_TOP;
@@ -1297,6 +1299,7 @@ static irqreturn_t vpe_irq(int irq_vpe, void *data)
                        ctx->field = V4L2_FIELD_BOTTOM;
                }
        } else {
+               d_buf->field = V4L2_FIELD_NONE;
                ctx->sequence++;
        }
 
@@ -1335,8 +1338,9 @@ static int vpe_querycap(struct file *file, void *priv,
 {
        strncpy(cap->driver, VPE_MODULE_NAME, sizeof(cap->driver) - 1);
        strncpy(cap->card, VPE_MODULE_NAME, sizeof(cap->card) - 1);
-       strlcpy(cap->bus_info, VPE_MODULE_NAME, sizeof(cap->bus_info));
-       cap->device_caps  = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING;
+       snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+               VPE_MODULE_NAME);
+       cap->device_caps  = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
        cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
        return 0;
 }
@@ -1476,6 +1480,7 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
                }
        }
 
+       memset(pix->reserved, 0, sizeof(pix->reserved));
        for (i = 0; i < pix->num_planes; i++) {
                plane_fmt = &pix->plane_fmt[i];
                depth = fmt->vpdma_fmt[i]->depth;
@@ -1487,6 +1492,8 @@ static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f,
 
                plane_fmt->sizeimage =
                                (pix->height * pix->width * depth) >> 3;
+
+               memset(plane_fmt->reserved, 0, sizeof(plane_fmt->reserved));
        }
 
        return 0;
@@ -1717,6 +1724,16 @@ static int vpe_buf_prepare(struct vb2_buffer *vb)
        q_data = get_q_data(ctx, vb->vb2_queue->type);
        num_planes = q_data->fmt->coplanar ? 2 : 1;
 
+       if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+               if (!(q_data->flags & Q_DATA_INTERLACED)) {
+                       vb->v4l2_buf.field = V4L2_FIELD_NONE;
+               } else {
+                       if (vb->v4l2_buf.field != V4L2_FIELD_TOP &&
+                                       vb->v4l2_buf.field != V4L2_FIELD_BOTTOM)
+                               return -EINVAL;
+               }
+       }
+
        for (i = 0; i < num_planes; i++) {
                if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) {
                        vpe_err(ctx->dev,
@@ -1866,9 +1883,11 @@ static int vpe_open(struct file *file)
        s_q_data->fmt = &vpe_formats[2];
        s_q_data->width = 1920;
        s_q_data->height = 1080;
-       s_q_data->sizeimage[VPE_LUMA] = (s_q_data->width * s_q_data->height *
+       s_q_data->bytesperline[VPE_LUMA] = (s_q_data->width *
                        s_q_data->fmt->vpdma_fmt[VPE_LUMA]->depth) >> 3;
-       s_q_data->colorspace = V4L2_COLORSPACE_SMPTE170M;
+       s_q_data->sizeimage[VPE_LUMA] = (s_q_data->bytesperline[VPE_LUMA] *
+                       s_q_data->height);
+       s_q_data->colorspace = V4L2_COLORSPACE_REC709;
        s_q_data->field = V4L2_FIELD_NONE;
        s_q_data->c_rect.left = 0;
        s_q_data->c_rect.top = 0;
@@ -2002,7 +2021,7 @@ static struct video_device vpe_videodev = {
        .fops           = &vpe_fops,
        .ioctl_ops      = &vpe_ioctl_ops,
        .minor          = -1,
-       .release        = video_device_release,
+       .release        = video_device_release_empty,
        .vfl_dir        = VFL_DIR_M2M,
 };
 
index 579a52b3edce1a883ad2f34e3d2d783d50fb750f..0127dd257a578e8ceb3b7b8ef82d478f25091083 100644 (file)
@@ -504,6 +504,18 @@ unlock:
        return ret;
 }
 
+static int img_ir_set_normal_filter(struct rc_dev *dev,
+                                   struct rc_scancode_filter *sc_filter)
+{
+       return img_ir_set_filter(dev, RC_FILTER_NORMAL, sc_filter); 
+}
+
+static int img_ir_set_wakeup_filter(struct rc_dev *dev,
+                                   struct rc_scancode_filter *sc_filter)
+{
+       return img_ir_set_filter(dev, RC_FILTER_WAKEUP, sc_filter);
+}
+
 /**
  * img_ir_set_decoder() - Set the current decoder.
  * @priv:      IR private data.
@@ -986,7 +998,8 @@ int img_ir_probe_hw(struct img_ir_priv *priv)
        rdev->map_name = RC_MAP_EMPTY;
        rc_set_allowed_protocols(rdev, img_ir_allowed_protos(priv));
        rdev->input_name = "IMG Infrared Decoder";
-       rdev->s_filter = img_ir_set_filter;
+       rdev->s_filter = img_ir_set_normal_filter;
+       rdev->s_wakeup_filter = img_ir_set_wakeup_filter;
 
        /* Register hardware decoder */
        error = rc_register_device(rdev);
index e7a731bc3a9b26b0297aae0b79cd508bdaf16653..751d9d94526931bb78b210eb1e5f18146c6aa789 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include "img-ir-hw.h"
+#include <linux/bitrev.h>
 
 /* Convert NEC data to a scancode */
 static int img_ir_nec_scancode(int len, u64 raw, int *scancode, u64 protocols)
@@ -22,11 +23,11 @@ static int img_ir_nec_scancode(int len, u64 raw, int *scancode, u64 protocols)
        data_inv = (raw >> 24) & 0xff;
        if ((data_inv ^ data) != 0xff) {
                /* 32-bit NEC (used by Apple and TiVo remotes) */
-               /* scan encoding: aaAAddDD */
-               *scancode = addr_inv << 24 |
-                           addr     << 16 |
-                           data_inv <<  8 |
-                           data;
+               /* scan encoding: as transmitted, MSBit = first received bit */
+               *scancode = bitrev8(addr)     << 24 |
+                           bitrev8(addr_inv) << 16 |
+                           bitrev8(data)     <<  8 |
+                           bitrev8(data_inv);
        } else if ((addr_inv ^ addr) != 0xff) {
                /* Extended NEC */
                /* scan encoding: AAaaDD */
@@ -54,13 +55,15 @@ static int img_ir_nec_filter(const struct rc_scancode_filter *in,
 
        if ((in->data | in->mask) & 0xff000000) {
                /* 32-bit NEC (used by Apple and TiVo remotes) */
-               /* scan encoding: aaAAddDD */
-               addr_inv   = (in->data >> 24) & 0xff;
-               addr_inv_m = (in->mask >> 24) & 0xff;
-               addr       = (in->data >> 16) & 0xff;
-               addr_m     = (in->mask >> 16) & 0xff;
-               data_inv   = (in->data >>  8) & 0xff;
-               data_inv_m = (in->mask >>  8) & 0xff;
+               /* scan encoding: as transmitted, MSBit = first received bit */
+               addr       = bitrev8(in->data >> 24);
+               addr_m     = bitrev8(in->mask >> 24);
+               addr_inv   = bitrev8(in->data >> 16);
+               addr_inv_m = bitrev8(in->mask >> 16);
+               data       = bitrev8(in->data >>  8);
+               data_m     = bitrev8(in->mask >>  8);
+               data_inv   = bitrev8(in->data >>  0);
+               data_inv_m = bitrev8(in->mask >>  0);
        } else if ((in->data | in->mask) & 0x00ff0000) {
                /* Extended NEC */
                /* scan encoding AAaaDD */
index 9de1791d24946fcd43f89705b11c7d0f7a90b4bb..35c42e5e270b718d494b63fd12b9a3d123fc650d 100644 (file)
@@ -172,10 +172,7 @@ static int ir_nec_decode(struct rc_dev *dev, struct ir_raw_event ev)
                if (send_32bits) {
                        /* NEC transport, but modified protocol, used by at
                         * least Apple and TiVo remotes */
-                       scancode = not_address << 24 |
-                                  address     << 16 |
-                                  not_command <<  8 |
-                                  command;
+                       scancode = data->bits;
                        IR_dprintk(1, "NEC (modified) scancode 0x%08x\n", scancode);
                } else if ((address ^ not_address) != 0xff) {
                        /* Extended NEC */
index 5cc1b456e3299893b37e27f92310453cfc0e898f..454e062956925ee6b82ad195f10ca6bca82d4516 100644 (file)
  * Initial mapping is for the TiVo remote included in the Nero LiquidTV bundle,
  * which also ships with a TiVo-branded IR transceiver, supported by the mceusb
  * driver. Note that the remote uses an NEC-ish protocol, but instead of having
- * a command/not_command pair, it has a vendor ID of 0x3085, but some keys, the
+ * a command/not_command pair, it has a vendor ID of 0xa10c, but some keys, the
  * NEC extended checksums do pass, so the table presently has the intended
  * values and the checksum-passed versions for those keys.
  */
 static struct rc_map_table tivo[] = {
-       { 0x3085f009, KEY_MEDIA },      /* TiVo Button */
-       { 0x3085e010, KEY_POWER2 },     /* TV Power */
-       { 0x3085e011, KEY_TV },         /* Live TV/Swap */
-       { 0x3085c034, KEY_VIDEO_NEXT }, /* TV Input */
-       { 0x3085e013, KEY_INFO },
-       { 0x3085a05f, KEY_CYCLEWINDOWS }, /* Window */
+       { 0xa10c900f, KEY_MEDIA },      /* TiVo Button */
+       { 0xa10c0807, KEY_POWER2 },     /* TV Power */
+       { 0xa10c8807, KEY_TV },         /* Live TV/Swap */
+       { 0xa10c2c03, KEY_VIDEO_NEXT }, /* TV Input */
+       { 0xa10cc807, KEY_INFO },
+       { 0xa10cfa05, KEY_CYCLEWINDOWS }, /* Window */
        { 0x0085305f, KEY_CYCLEWINDOWS },
-       { 0x3085c036, KEY_EPG },        /* Guide */
+       { 0xa10c6c03, KEY_EPG },        /* Guide */
 
-       { 0x3085e014, KEY_UP },
-       { 0x3085e016, KEY_DOWN },
-       { 0x3085e017, KEY_LEFT },
-       { 0x3085e015, KEY_RIGHT },
+       { 0xa10c2807, KEY_UP },
+       { 0xa10c6807, KEY_DOWN },
+       { 0xa10ce807, KEY_LEFT },
+       { 0xa10ca807, KEY_RIGHT },
 
-       { 0x3085e018, KEY_SCROLLDOWN }, /* Red Thumbs Down */
-       { 0x3085e019, KEY_SELECT },
-       { 0x3085e01a, KEY_SCROLLUP },   /* Green Thumbs Up */
+       { 0xa10c1807, KEY_SCROLLDOWN }, /* Red Thumbs Down */
+       { 0xa10c9807, KEY_SELECT },
+       { 0xa10c5807, KEY_SCROLLUP },   /* Green Thumbs Up */
 
-       { 0x3085e01c, KEY_VOLUMEUP },
-       { 0x3085e01d, KEY_VOLUMEDOWN },
-       { 0x3085e01b, KEY_MUTE },
-       { 0x3085d020, KEY_RECORD },
-       { 0x3085e01e, KEY_CHANNELUP },
-       { 0x3085e01f, KEY_CHANNELDOWN },
+       { 0xa10c3807, KEY_VOLUMEUP },
+       { 0xa10cb807, KEY_VOLUMEDOWN },
+       { 0xa10cd807, KEY_MUTE },
+       { 0xa10c040b, KEY_RECORD },
+       { 0xa10c7807, KEY_CHANNELUP },
+       { 0xa10cf807, KEY_CHANNELDOWN },
        { 0x0085301f, KEY_CHANNELDOWN },
 
-       { 0x3085d021, KEY_PLAY },
-       { 0x3085d023, KEY_PAUSE },
-       { 0x3085d025, KEY_SLOW },
-       { 0x3085d022, KEY_REWIND },
-       { 0x3085d024, KEY_FASTFORWARD },
-       { 0x3085d026, KEY_PREVIOUS },
-       { 0x3085d027, KEY_NEXT },       /* ->| */
+       { 0xa10c840b, KEY_PLAY },
+       { 0xa10cc40b, KEY_PAUSE },
+       { 0xa10ca40b, KEY_SLOW },
+       { 0xa10c440b, KEY_REWIND },
+       { 0xa10c240b, KEY_FASTFORWARD },
+       { 0xa10c640b, KEY_PREVIOUS },
+       { 0xa10ce40b, KEY_NEXT },       /* ->| */
 
-       { 0x3085b044, KEY_ZOOM },       /* Aspect */
-       { 0x3085b048, KEY_STOP },
-       { 0x3085b04a, KEY_DVD },        /* DVD Menu */
+       { 0xa10c220d, KEY_ZOOM },       /* Aspect */
+       { 0xa10c120d, KEY_STOP },
+       { 0xa10c520d, KEY_DVD },        /* DVD Menu */
 
-       { 0x3085d028, KEY_NUMERIC_1 },
-       { 0x3085d029, KEY_NUMERIC_2 },
-       { 0x3085d02a, KEY_NUMERIC_3 },
-       { 0x3085d02b, KEY_NUMERIC_4 },
-       { 0x3085d02c, KEY_NUMERIC_5 },
-       { 0x3085d02d, KEY_NUMERIC_6 },
-       { 0x3085d02e, KEY_NUMERIC_7 },
-       { 0x3085d02f, KEY_NUMERIC_8 },
+       { 0xa10c140b, KEY_NUMERIC_1 },
+       { 0xa10c940b, KEY_NUMERIC_2 },
+       { 0xa10c540b, KEY_NUMERIC_3 },
+       { 0xa10cd40b, KEY_NUMERIC_4 },
+       { 0xa10c340b, KEY_NUMERIC_5 },
+       { 0xa10cb40b, KEY_NUMERIC_6 },
+       { 0xa10c740b, KEY_NUMERIC_7 },
+       { 0xa10cf40b, KEY_NUMERIC_8 },
        { 0x0085302f, KEY_NUMERIC_8 },
-       { 0x3085c030, KEY_NUMERIC_9 },
-       { 0x3085c031, KEY_NUMERIC_0 },
-       { 0x3085c033, KEY_ENTER },
-       { 0x3085c032, KEY_CLEAR },
+       { 0xa10c0c03, KEY_NUMERIC_9 },
+       { 0xa10c8c03, KEY_NUMERIC_0 },
+       { 0xa10ccc03, KEY_ENTER },
+       { 0xa10c4c03, KEY_CLEAR },
 };
 
 static struct rc_map_list tivo_map = {
index 99697aae92ff7eac40b880aa937944d6771aec5f..970b93d6f399b1ba87fbd22c3065e98a938a077e 100644 (file)
@@ -633,19 +633,13 @@ EXPORT_SYMBOL_GPL(rc_repeat);
 static void ir_do_keydown(struct rc_dev *dev, int scancode,
                          u32 keycode, u8 toggle)
 {
-       struct rc_scancode_filter *filter;
-       bool new_event = !dev->keypressed ||
-                        dev->last_scancode != scancode ||
-                        dev->last_toggle != toggle;
+       bool new_event = (!dev->keypressed               ||
+                         dev->last_scancode != scancode ||
+                         dev->last_toggle != toggle);
 
        if (new_event && dev->keypressed)
                ir_do_keyup(dev, false);
 
-       /* Generic scancode filtering */
-       filter = &dev->scancode_filters[RC_FILTER_NORMAL];
-       if (filter->mask && ((scancode ^ filter->data) & filter->mask))
-               return;
-
        input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
 
        if (new_event && keycode != KEY_RESERVED) {
@@ -923,6 +917,7 @@ static ssize_t store_protocols(struct device *device,
        int rc, i, count = 0;
        ssize_t ret;
        int (*change_protocol)(struct rc_dev *dev, u64 *rc_type);
+       int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
        struct rc_scancode_filter local_filter, *filter;
 
        /* Device is being removed */
@@ -1007,24 +1002,23 @@ static ssize_t store_protocols(struct device *device,
         * Fall back to clearing the filter.
         */
        filter = &dev->scancode_filters[fattr->type];
-       if (old_type != type && filter->mask) {
+       set_filter = (fattr->type == RC_FILTER_NORMAL)
+               ? dev->s_filter : dev->s_wakeup_filter;
+
+       if (set_filter && old_type != type && filter->mask) {
                local_filter = *filter;
                if (!type) {
                        /* no protocol => clear filter */
                        ret = -1;
-               } else if (!dev->s_filter) {
-                       /* generic filtering => accept any filter */
-                       ret = 0;
                } else {
                        /* hardware filtering => try setting, otherwise clear */
-                       ret = dev->s_filter(dev, fattr->type, &local_filter);
+                       ret = set_filter(dev, &local_filter);
                }
                if (ret < 0) {
                        /* clear the filter */
                        local_filter.data = 0;
                        local_filter.mask = 0;
-                       if (dev->s_filter)
-                               dev->s_filter(dev, fattr->type, &local_filter);
+                       set_filter(dev, &local_filter);
                }
 
                /* commit the new filter */
@@ -1068,7 +1062,10 @@ static ssize_t show_filter(struct device *device,
                return -EINVAL;
 
        mutex_lock(&dev->lock);
-       if (fattr->mask)
+       if ((fattr->type == RC_FILTER_NORMAL && !dev->s_filter) ||
+           (fattr->type == RC_FILTER_WAKEUP && !dev->s_wakeup_filter))
+               val = 0;
+       else if (fattr->mask)
                val = dev->scancode_filters[fattr->type].mask;
        else
                val = dev->scancode_filters[fattr->type].data;
@@ -1106,6 +1103,7 @@ static ssize_t store_filter(struct device *device,
        struct rc_scancode_filter local_filter, *filter;
        int ret;
        unsigned long val;
+       int (*set_filter)(struct rc_dev *dev, struct rc_scancode_filter *filter);
 
        /* Device is being removed */
        if (!dev)
@@ -1115,9 +1113,11 @@ static ssize_t store_filter(struct device *device,
        if (ret < 0)
                return ret;
 
-       /* Scancode filter not supported (but still accept 0) */
-       if (!dev->s_filter && fattr->type != RC_FILTER_NORMAL)
-               return val ? -EINVAL : count;
+       /* Can the scancode filter be set? */
+       set_filter = (fattr->type == RC_FILTER_NORMAL) ? dev->s_filter :
+                                                        dev->s_wakeup_filter;
+       if (!set_filter)
+               return -EINVAL;
 
        mutex_lock(&dev->lock);
 
@@ -1128,16 +1128,16 @@ static ssize_t store_filter(struct device *device,
                local_filter.mask = val;
        else
                local_filter.data = val;
+
        if (!dev->enabled_protocols[fattr->type] && local_filter.mask) {
                /* refuse to set a filter unless a protocol is enabled */
                ret = -EINVAL;
                goto unlock;
        }
-       if (dev->s_filter) {
-               ret = dev->s_filter(dev, fattr->type, &local_filter);
-               if (ret < 0)
-                       goto unlock;
-       }
+
+       ret = set_filter(dev, &local_filter);
+       if (ret < 0)
+               goto unlock;
 
        /* Success, commit the new filter */
        *filter = local_filter;
@@ -1189,27 +1189,45 @@ static RC_FILTER_ATTR(wakeup_filter, S_IRUGO|S_IWUSR,
 static RC_FILTER_ATTR(wakeup_filter_mask, S_IRUGO|S_IWUSR,
                      show_filter, store_filter, RC_FILTER_WAKEUP, true);
 
-static struct attribute *rc_dev_attrs[] = {
+static struct attribute *rc_dev_protocol_attrs[] = {
        &dev_attr_protocols.attr.attr,
+       NULL,
+};
+
+static struct attribute_group rc_dev_protocol_attr_grp = {
+       .attrs  = rc_dev_protocol_attrs,
+};
+
+static struct attribute *rc_dev_wakeup_protocol_attrs[] = {
        &dev_attr_wakeup_protocols.attr.attr,
+       NULL,
+};
+
+static struct attribute_group rc_dev_wakeup_protocol_attr_grp = {
+       .attrs  = rc_dev_wakeup_protocol_attrs,
+};
+
+static struct attribute *rc_dev_filter_attrs[] = {
        &dev_attr_filter.attr.attr,
        &dev_attr_filter_mask.attr.attr,
-       &dev_attr_wakeup_filter.attr.attr,
-       &dev_attr_wakeup_filter_mask.attr.attr,
        NULL,
 };
 
-static struct attribute_group rc_dev_attr_grp = {
-       .attrs  = rc_dev_attrs,
+static struct attribute_group rc_dev_filter_attr_grp = {
+       .attrs  = rc_dev_filter_attrs,
 };
 
-static const struct attribute_group *rc_dev_attr_groups[] = {
-       &rc_dev_attr_grp,
-       NULL
+static struct attribute *rc_dev_wakeup_filter_attrs[] = {
+       &dev_attr_wakeup_filter.attr.attr,
+       &dev_attr_wakeup_filter_mask.attr.attr,
+       NULL,
+};
+
+static struct attribute_group rc_dev_wakeup_filter_attr_grp = {
+       .attrs  = rc_dev_wakeup_filter_attrs,
 };
 
 static struct device_type rc_dev_type = {
-       .groups         = rc_dev_attr_groups,
        .release        = rc_dev_release,
        .uevent         = rc_dev_uevent,
 };
@@ -1266,7 +1284,7 @@ int rc_register_device(struct rc_dev *dev)
        static bool raw_init = false; /* raw decoders loaded? */
        struct rc_map *rc_map;
        const char *path;
-       int rc, devno;
+       int rc, devno, attr = 0;
 
        if (!dev || !dev->map_name)
                return -EINVAL;
@@ -1294,6 +1312,16 @@ int rc_register_device(struct rc_dev *dev)
                        return -ENOMEM;
        } while (test_and_set_bit(devno, ir_core_dev_number));
 
+       dev->dev.groups = dev->sysfs_groups;
+       dev->sysfs_groups[attr++] = &rc_dev_protocol_attr_grp;
+       if (dev->s_filter)
+               dev->sysfs_groups[attr++] = &rc_dev_filter_attr_grp;    
+       if (dev->s_wakeup_filter)
+               dev->sysfs_groups[attr++] = &rc_dev_wakeup_filter_attr_grp;
+       if (dev->change_wakeup_protocol)
+               dev->sysfs_groups[attr++] = &rc_dev_wakeup_protocol_attr_grp;
+       dev->sysfs_groups[attr++] = NULL;
+
        /*
         * Take the lock here, as the device sysfs node will appear
         * when device_add() is called, which may trigger an ir-keytable udev
index 319adc4f0561a0fff3f671dc38dcc20bf65ca8c2..96ccfebce7cad32f8891c04be9bdddc71b6e152d 100644 (file)
@@ -1468,7 +1468,8 @@ static int r820t_imr_prepare(struct r820t_priv *priv)
 static int r820t_multi_read(struct r820t_priv *priv)
 {
        int rc, i;
-       u8 data[2], min = 0, max = 255, sum = 0;
+       u16 sum = 0;
+       u8 data[2], min = 255, max = 0;
 
        usleep_range(5000, 6000);
 
index 76a816511f2f34f9bd9d5886d549c59e8b6b5d96..6ef93ee1fdcb266780e32cd11d5736af3f24c24d 100644 (file)
@@ -1107,6 +1107,7 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
                                offset += 200000;
                }
 #endif
+               break;
        default:
                tuner_err("Unsupported tuner type %d.\n", new_type);
                break;
index c83c16cece01632a128ebcfac7a8d3eed94eb05e..61d196e8b3abde6dc0d97e26fe3ca7cae9292957 100644 (file)
@@ -1503,8 +1503,6 @@ static const struct usb_device_id rtl28xxu_id_table[] = {
        /* RTL2832P devices: */
        { DVB_USB_DEVICE(USB_VID_HANFTEK, 0x0131,
                &rtl2832u_props, "Astrometa DVB-T2", NULL) },
-       { DVB_USB_DEVICE(USB_VID_KYE, 0x707f,
-               &rtl2832u_props, "Genius TVGo DVB-T03", NULL) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, rtl28xxu_id_table);
index ab54910418b4ccfa7e07a179f4dbe82ecef00d17..0aa2b671faa417120217c9ae960e79013171f213 100644 (file)
@@ -154,7 +154,9 @@ static void jpeg_set_qual(u8 *jpeg_hdr,
 {
        int i, sc;
 
-       if (quality < 50)
+       if (quality <= 0)
+               sc = 5000;
+       else if (quality < 50)
                sc = 5000 / quality;
        else
                sc = 200 - quality * 2;
index c46c8be896029534f2b08228eee825247398995d..2dd308f9541f469b719adf7d53f01901f4525b3e 100644 (file)
@@ -108,7 +108,7 @@ int stk1160_ac97_register(struct stk1160 *dev)
                 "stk1160-mixer");
        snprintf(card->longname, sizeof(card->longname),
                 "stk1160 ac97 codec mixer control");
-       strncpy(card->driver, dev->dev->driver->name, sizeof(card->driver));
+       strlcpy(card->driver, dev->dev->driver->name, sizeof(card->driver));
 
        rc = snd_ac97_bus(card, 0, &stk1160_ac97_ops, NULL, &ac97_bus);
        if (rc)
index f3cdf64997d60dd6937ba8885dab3c72525b1f2b..63aa9d9e34c52b1c2150fa7876ce3658a7c11fe5 100644 (file)
@@ -78,11 +78,19 @@ static void ntb_netdev_event_handler(void *data, int status)
        netdev_dbg(ndev, "Event %x, Link %x\n", status,
                   ntb_transport_link_query(dev->qp));
 
-       /* Currently, only link status event is supported */
-       if (status)
-               netif_carrier_on(ndev);
-       else
+       switch (status) {
+       case NTB_LINK_DOWN:
                netif_carrier_off(ndev);
+               break;
+       case NTB_LINK_UP:
+               if (!ntb_transport_link_query(dev->qp))
+                       return;
+
+               netif_carrier_on(ndev);
+               break;
+       default:
+               netdev_warn(ndev, "Unsupported event type %d\n", status);
+       }
 }
 
 static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
@@ -182,8 +190,10 @@ static int ntb_netdev_open(struct net_device *ndev)
 
                rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
                                              ndev->mtu + ETH_HLEN);
-               if (rc == -EINVAL)
+               if (rc == -EINVAL) {
+                       dev_kfree_skb(skb);
                        goto err;
+               }
        }
 
        netif_carrier_off(ndev);
@@ -367,12 +377,15 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
 {
        struct net_device *ndev;
        struct ntb_netdev *dev;
+       bool found = false;
 
        list_for_each_entry(dev, &dev_list, list) {
-               if (dev->pdev == pdev)
+               if (dev->pdev == pdev) {
+                       found = true;
                        break;
+               }
        }
-       if (dev == NULL)
+       if (!found)
                return;
 
        list_del(&dev->list);
index 170e8e60cdb7fe3d47307d4b9146039e3af7f3a8..372e08c4ffefb76852e4d640e6936f3529e8c215 100644 (file)
@@ -91,7 +91,7 @@ static struct dentry *debugfs_dir;
 /* Translate memory window 0,1 to BAR 2,4 */
 #define MW_TO_BAR(mw)  (mw * NTB_MAX_NUM_MW + 2)
 
-static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = {
+static const struct pci_device_id ntb_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
        {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
        {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
@@ -120,7 +120,8 @@ MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
  * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
  */
 int ntb_register_event_callback(struct ntb_device *ndev,
-                           void (*func)(void *handle, enum ntb_hw_event event))
+                               void (*func)(void *handle,
+                                            enum ntb_hw_event event))
 {
        if (ndev->event_cb)
                return -EINVAL;
@@ -715,9 +716,9 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
                               SNB_PBAR4LMT_OFFSET);
                        /* HW errata on the Limit registers.  They can only be
                         * written when the base register is 4GB aligned and
-                        * < 32bit.  This should already be the case based on the
-                        * driver defaults, but write the Limit registers first
-                        * just in case.
+                        * < 32bit.  This should already be the case based on
+                        * the driver defaults, but write the Limit registers
+                        * first just in case.
                         */
                } else {
                        ndev->limits.max_mw = SNB_MAX_MW;
@@ -739,9 +740,9 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
                        writeq(0, ndev->reg_base + SNB_PBAR4LMT_OFFSET);
                        /* HW errata on the Limit registers.  They can only be
                         * written when the base register is 4GB aligned and
-                        * < 32bit.  This should already be the case based on the
-                        * driver defaults, but write the Limit registers first
-                        * just in case.
+                        * < 32bit.  This should already be the case based on
+                        * the driver defaults, but write the Limit registers
+                        * first just in case.
                         */
                }
 
@@ -785,7 +786,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
                                /* B2B_XLAT_OFFSET is a 64bit register, but can
                                 * only take 32bit writes
                                 */
-                               writel(SNB_MBAR01_DSD_ADDR & 0xffffffff,
+                               writel(SNB_MBAR01_USD_ADDR & 0xffffffff,
                                       ndev->reg_base + SNB_B2B_XLAT_OFFSETL);
                                writel(SNB_MBAR01_USD_ADDR >> 32,
                                       ndev->reg_base + SNB_B2B_XLAT_OFFSETU);
@@ -803,7 +804,7 @@ static int ntb_xeon_setup(struct ntb_device *ndev)
                ndev->conn_type = NTB_CONN_RP;
 
                if (xeon_errata_workaround) {
-                       dev_err(&ndev->pdev->dev, 
+                       dev_err(&ndev->pdev->dev,
                                "NTB-RP disabled due to hardware errata.  To disregard this warning and potentially lock-up the system, add the parameter 'xeon_errata_workaround=0'.\n");
                        return -EINVAL;
                }
@@ -1079,111 +1080,131 @@ static irqreturn_t ntb_interrupt(int irq, void *dev)
        return IRQ_HANDLED;
 }
 
-static int ntb_setup_msix(struct ntb_device *ndev)
+static int ntb_setup_snb_msix(struct ntb_device *ndev, int msix_entries)
 {
        struct pci_dev *pdev = ndev->pdev;
        struct msix_entry *msix;
-       int msix_entries;
        int rc, i;
-       u16 val;
 
-       if (!pdev->msix_cap) {
-               rc = -EIO;
-               goto err;
-       }
+       if (msix_entries < ndev->limits.msix_cnt)
+               return -ENOSPC;
 
-       rc = pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &val);
-       if (rc)
-               goto err;
+       rc = pci_enable_msix_exact(pdev, ndev->msix_entries, msix_entries);
+       if (rc < 0)
+               return rc;
 
-       msix_entries = msix_table_size(val);
-       if (msix_entries > ndev->limits.msix_cnt) {
-               rc = -EINVAL;
-               goto err;
+       for (i = 0; i < msix_entries; i++) {
+               msix = &ndev->msix_entries[i];
+               WARN_ON(!msix->vector);
+
+               if (i == msix_entries - 1) {
+                       rc = request_irq(msix->vector,
+                                        xeon_event_msix_irq, 0,
+                                        "ntb-event-msix", ndev);
+                       if (rc)
+                               goto err;
+               } else {
+                       rc = request_irq(msix->vector,
+                                        xeon_callback_msix_irq, 0,
+                                        "ntb-callback-msix",
+                                        &ndev->db_cb[i]);
+                       if (rc)
+                               goto err;
+               }
        }
 
-       ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
-                                    GFP_KERNEL);
-       if (!ndev->msix_entries) {
-               rc = -ENOMEM;
-               goto err;
+       ndev->num_msix = msix_entries;
+       ndev->max_cbs = msix_entries - 1;
+
+       return 0;
+
+err:
+       while (--i >= 0) {
+               /* Code never reaches here for entry nr 'ndev->num_msix - 1' */
+               msix = &ndev->msix_entries[i];
+               free_irq(msix->vector, &ndev->db_cb[i]);
        }
 
-       for (i = 0; i < msix_entries; i++)
-               ndev->msix_entries[i].entry = i;
+       pci_disable_msix(pdev);
+       ndev->num_msix = 0;
 
-       rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
-       if (rc < 0)
-               goto err1;
-       if (rc > 0) {
-               /* On SNB, the link interrupt is always tied to 4th vector.  If
-                * we can't get all 4, then we can't use MSI-X.
-                */
-               if (ndev->hw_type != BWD_HW) {
-                       rc = -EIO;
-                       goto err1;
-               }
+       return rc;
+}
 
-               dev_warn(&pdev->dev,
-                        "Only %d MSI-X vectors.  Limiting the number of queues to that number.\n",
-                        rc);
-               msix_entries = rc;
+static int ntb_setup_bwd_msix(struct ntb_device *ndev, int msix_entries)
+{
+       struct pci_dev *pdev = ndev->pdev;
+       struct msix_entry *msix;
+       int rc, i;
 
-               rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
-               if (rc)
-                       goto err1;
-       }
+       msix_entries = pci_enable_msix_range(pdev, ndev->msix_entries,
+                                            1, msix_entries);
+       if (msix_entries < 0)
+               return msix_entries;
 
        for (i = 0; i < msix_entries; i++) {
                msix = &ndev->msix_entries[i];
                WARN_ON(!msix->vector);
 
-               /* Use the last MSI-X vector for Link status */
-               if (ndev->hw_type == BWD_HW) {
-                       rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
-                                        "ntb-callback-msix", &ndev->db_cb[i]);
-                       if (rc)
-                               goto err2;
-               } else {
-                       if (i == msix_entries - 1) {
-                               rc = request_irq(msix->vector,
-                                                xeon_event_msix_irq, 0,
-                                                "ntb-event-msix", ndev);
-                               if (rc)
-                                       goto err2;
-                       } else {
-                               rc = request_irq(msix->vector,
-                                                xeon_callback_msix_irq, 0,
-                                                "ntb-callback-msix",
-                                                &ndev->db_cb[i]);
-                               if (rc)
-                                       goto err2;
-                       }
-               }
+               rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
+                                "ntb-callback-msix", &ndev->db_cb[i]);
+               if (rc)
+                       goto err;
        }
 
        ndev->num_msix = msix_entries;
+       ndev->max_cbs = msix_entries;
+
+       return 0;
+
+err:
+       while (--i >= 0)
+               free_irq(msix->vector, &ndev->db_cb[i]);
+
+       pci_disable_msix(pdev);
+       ndev->num_msix = 0;
+
+       return rc;
+}
+
+static int ntb_setup_msix(struct ntb_device *ndev)
+{
+       struct pci_dev *pdev = ndev->pdev;
+       int msix_entries;
+       int rc, i;
+
+       msix_entries = pci_msix_vec_count(pdev);
+       if (msix_entries < 0) {
+               rc = msix_entries;
+               goto err;
+       } else if (msix_entries > ndev->limits.msix_cnt) {
+               rc = -EINVAL;
+               goto err;
+       }
+
+       ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
+                                    GFP_KERNEL);
+       if (!ndev->msix_entries) {
+               rc = -ENOMEM;
+               goto err;
+       }
+
+       for (i = 0; i < msix_entries; i++)
+               ndev->msix_entries[i].entry = i;
+
        if (ndev->hw_type == BWD_HW)
-               ndev->max_cbs = msix_entries;
+               rc = ntb_setup_bwd_msix(ndev, msix_entries);
        else
-               ndev->max_cbs = msix_entries - 1;
+               rc = ntb_setup_snb_msix(ndev, msix_entries);
+       if (rc)
+               goto err1;
 
        return 0;
 
-err2:
-       while (--i >= 0) {
-               msix = &ndev->msix_entries[i];
-               if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
-                       free_irq(msix->vector, ndev);
-               else
-                       free_irq(msix->vector, &ndev->db_cb[i]);
-       }
-       pci_disable_msix(pdev);
 err1:
        kfree(ndev->msix_entries);
-       dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
 err:
-       ndev->num_msix = 0;
+       dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
        return rc;
 }
 
@@ -1281,6 +1302,7 @@ static void ntb_free_interrupts(struct ntb_device *ndev)
                                free_irq(msix->vector, &ndev->db_cb[i]);
                }
                pci_disable_msix(pdev);
+               kfree(ndev->msix_entries);
        } else {
                free_irq(pdev->irq, ndev);
 
index bbdb7edca10cd8647e4b739dfab29f1ed33fc1e6..465517b7393efe4e995d323dc20f48b1eab859ae 100644 (file)
@@ -45,6 +45,7 @@
  * Contact Information:
  * Jon Mason <jon.mason@intel.com>
  */
+#include <linux/ntb.h>
 
 #define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF                0x3725
 #define PCI_DEVICE_ID_INTEL_NTB_PS_JSF         0x3726
@@ -60,8 +61,6 @@
 #define PCI_DEVICE_ID_INTEL_NTB_SS_HSX         0x2F0F
 #define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD                0x0C4E
 
-#define msix_table_size(control)       ((control & PCI_MSIX_FLAGS_QSIZE)+1)
-
 #ifndef readq
 static inline u64 readq(void __iomem *addr)
 {
@@ -83,9 +82,6 @@ static inline void writeq(u64 val, void __iomem *addr)
 #define NTB_BAR_MASK           ((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
                                 (1 << NTB_BAR_45))
 
-#define NTB_LINK_DOWN          0
-#define NTB_LINK_UP            1
-
 #define NTB_HB_TIMEOUT         msecs_to_jiffies(1000)
 
 #define NTB_MAX_NUM_MW         2
@@ -233,7 +229,7 @@ int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
                                                           int db_num));
 void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
 int ntb_register_event_callback(struct ntb_device *ndev,
-                               void (*event_cb_func) (void *handle,
+                               void (*event_cb_func)(void *handle,
                                                      enum ntb_hw_event event));
 void ntb_unregister_event_callback(struct ntb_device *ndev);
 int ntb_get_max_spads(struct ntb_device *ndev);
index 3217f394d45b106051b282f824be1413b5efa65d..9dd63b82202555e0ebdd46b3460a8ec1107f8f9f 100644 (file)
@@ -56,7 +56,6 @@
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <linux/ntb.h>
 #include "ntb_hw.h"
 
 #define NTB_TRANSPORT_VERSION  3
@@ -107,8 +106,8 @@ struct ntb_transport_qp {
        struct ntb_rx_info __iomem *rx_info;
        struct ntb_rx_info *remote_rx_info;
 
-       void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
-                           void *data, int len);
+       void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+                          void *data, int len);
        struct list_head tx_free_q;
        spinlock_t ntb_tx_free_q_lock;
        void __iomem *tx_mw;
@@ -117,8 +116,8 @@ struct ntb_transport_qp {
        unsigned int tx_max_entry;
        unsigned int tx_max_frame;
 
-       void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
-                           void *data, int len);
+       void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+                          void *data, int len);
        struct list_head rx_pend_q;
        struct list_head rx_free_q;
        spinlock_t ntb_rx_pend_q_lock;
@@ -129,7 +128,7 @@ struct ntb_transport_qp {
        unsigned int rx_max_frame;
        dma_cookie_t last_cookie;
 
-       void (*event_handler) (void *data, int status);
+       void (*event_handler)(void *data, int status);
        struct delayed_work link_work;
        struct work_struct link_cleanup;
 
@@ -480,7 +479,7 @@ static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
 }
 
 static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
-                                               struct list_head *list)
+                                          struct list_head *list)
 {
        struct ntb_queue_entry *entry;
        unsigned long flags;
@@ -839,7 +838,7 @@ static void ntb_qp_link_work(struct work_struct *work)
 }
 
 static int ntb_transport_init_queue(struct ntb_transport *nt,
-                                    unsigned int qp_num)
+                                   unsigned int qp_num)
 {
        struct ntb_transport_qp *qp;
        unsigned int num_qps_mw, tx_size;
@@ -1055,7 +1054,7 @@ static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset,
        if (!chan)
                goto err;
 
-       if (len < copy_bytes) 
+       if (len < copy_bytes)
                goto err_wait;
 
        device = chan->device;
@@ -1190,8 +1189,7 @@ out:
        return 0;
 
 err:
-       ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
-                    &qp->rx_pend_q);
+       ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
        /* Ensure that the data is fully copied out before clearing the flag */
        wmb();
        hdr->flags = 0;
index 5f67843c7fb7388c158ade858a90c18c61f8f2c7..27df2c533b09c78ef93d1f44f4fd36b11773f72e 100644 (file)
@@ -53,6 +53,18 @@ config ACERHDF
          If you have an Acer Aspire One netbook, say Y or M
          here.
 
+config ALIENWARE_WMI
+       tristate "Alienware Special feature control"
+       depends on ACPI
+       depends on LEDS_CLASS
+       depends on NEW_LEDS
+       depends on ACPI_WMI
+       ---help---
+        This is a driver for controlling Alienware BIOS driven
+        features.  It exposes an interface for controlling the AlienFX
+        zones on Alienware machines that don't contain a dedicated AlienFX
+        USB MCU such as the X51 and X51-R2.
+
 config ASUS_LAPTOP
        tristate "Asus Laptop Extras"
        depends on ACPI
@@ -196,7 +208,7 @@ config HP_ACCEL
          be called hp_accel.
 
 config HP_WIRELESS
-       tristate "HP WIRELESS"
+       tristate "HP wireless button"
        depends on ACPI
        depends on INPUT
        help
@@ -817,12 +829,4 @@ config PVPANIC
          a paravirtualized device provided by QEMU; it lets a virtual machine
          (guest) communicate panic events to the host.
 
-config INTEL_BAYTRAIL_MBI
-       tristate
-       depends on PCI
-       ---help---
-         Needed on Baytrail platforms for access to the IOSF Sideband Mailbox
-         Interface. This is a requirement for systems that need to configure
-         the PUNIT for power management features such as RAPL.
-
 endif # X86_PLATFORM_DEVICES
index 9b87cfc42b8419202553a1d49986b7a737932882..1a2eafc9d48efa5d8d4fc19c6aec4862e8547eb4 100644 (file)
@@ -55,4 +55,4 @@ obj-$(CONFIG_INTEL_RST)               += intel-rst.o
 obj-$(CONFIG_INTEL_SMARTCONNECT)       += intel-smartconnect.o
 
 obj-$(CONFIG_PVPANIC)           += pvpanic.o
-obj-$(CONFIG_INTEL_BAYTRAIL_MBI)       += intel_baytrail.o
+obj-$(CONFIG_ALIENWARE_WMI)    += alienware-wmi.o
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
new file mode 100644 (file)
index 0000000..541f951
--- /dev/null
@@ -0,0 +1,565 @@
+/*
+ * Alienware AlienFX control
+ *
+ * Copyright (C) 2014 Dell Inc <mario_limonciello@dell.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/leds.h>
+
+#define LEGACY_CONTROL_GUID            "A90597CE-A997-11DA-B012-B622A1EF5492"
+#define LEGACY_POWER_CONTROL_GUID      "A80593CE-A997-11DA-B012-B622A1EF5492"
+#define WMAX_CONTROL_GUID              "A70591CE-A997-11DA-B012-B622A1EF5492"
+
+#define WMAX_METHOD_HDMI_SOURCE                0x1
+#define WMAX_METHOD_HDMI_STATUS                0x2
+#define WMAX_METHOD_BRIGHTNESS         0x3
+#define WMAX_METHOD_ZONE_CONTROL       0x4
+
+MODULE_AUTHOR("Mario Limonciello <mario_limonciello@dell.com>");
+MODULE_DESCRIPTION("Alienware special feature control");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("wmi:" LEGACY_CONTROL_GUID);
+MODULE_ALIAS("wmi:" WMAX_CONTROL_GUID);
+
+enum INTERFACE_FLAGS {
+       LEGACY,
+       WMAX,
+};
+
+enum LEGACY_CONTROL_STATES {
+       LEGACY_RUNNING = 1,
+       LEGACY_BOOTING = 0,
+       LEGACY_SUSPEND = 3,
+};
+
+enum WMAX_CONTROL_STATES {
+       WMAX_RUNNING = 0xFF,
+       WMAX_BOOTING = 0,
+       WMAX_SUSPEND = 3,
+};
+
+struct quirk_entry {
+       u8 num_zones;
+};
+
+static struct quirk_entry *quirks;
+
+static struct quirk_entry quirk_unknown = {
+       .num_zones = 2,
+};
+
+static struct quirk_entry quirk_x51_family = {
+       .num_zones = 3,
+};
+
+static int dmi_matched(const struct dmi_system_id *dmi)
+{
+       quirks = dmi->driver_data;
+       return 1;
+}
+
+static struct dmi_system_id alienware_quirks[] = {
+       {
+        .callback = dmi_matched,
+        .ident = "Alienware X51 R1",
+        .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
+                    },
+        .driver_data = &quirk_x51_family,
+        },
+       {
+        .callback = dmi_matched,
+        .ident = "Alienware X51 R2",
+        .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
+                    },
+        .driver_data = &quirk_x51_family,
+        },
+       {}
+};
+
+struct color_platform {
+       u8 blue;
+       u8 green;
+       u8 red;
+} __packed;
+
+struct platform_zone {
+       u8 location;
+       struct device_attribute *attr;
+       struct color_platform colors;
+};
+
+struct wmax_brightness_args {
+       u32 led_mask;
+       u32 percentage;
+};
+
+struct hdmi_args {
+       u8 arg;
+};
+
+struct legacy_led_args {
+       struct color_platform colors;
+       u8 brightness;
+       u8 state;
+} __packed;
+
+struct wmax_led_args {
+       u32 led_mask;
+       struct color_platform colors;
+       u8 state;
+} __packed;
+
+static struct platform_device *platform_device;
+static struct device_attribute *zone_dev_attrs;
+static struct attribute **zone_attrs;
+static struct platform_zone *zone_data;
+
+static struct platform_driver platform_driver = {
+       .driver = {
+                  .name = "alienware-wmi",
+                  .owner = THIS_MODULE,
+                  }
+};
+
+static struct attribute_group zone_attribute_group = {
+       .name = "rgb_zones",
+};
+
+static u8 interface;
+static u8 lighting_control_state;
+static u8 global_brightness;
+
+/*
+ * Helpers used for zone control
+*/
+static int parse_rgb(const char *buf, struct platform_zone *zone)
+{
+       long unsigned int rgb;
+       int ret;
+       union color_union {
+               struct color_platform cp;
+               int package;
+       } repackager;
+
+       ret = kstrtoul(buf, 16, &rgb);
+       if (ret)
+               return ret;
+
+       /* RGB triplet notation is 24-bit hexadecimal */
+       if (rgb > 0xFFFFFF)
+               return -EINVAL;
+
+       repackager.package = rgb & 0x0f0f0f0f;
+       pr_debug("alienware-wmi: r: %d g:%d b: %d\n",
+                repackager.cp.red, repackager.cp.green, repackager.cp.blue);
+       zone->colors = repackager.cp;
+       return 0;
+}
+
+static struct platform_zone *match_zone(struct device_attribute *attr)
+{
+       int i;
+       for (i = 0; i < quirks->num_zones; i++) {
+               if ((struct device_attribute *)zone_data[i].attr == attr) {
+                       pr_debug("alienware-wmi: matched zone location: %d\n",
+                                zone_data[i].location);
+                       return &zone_data[i];
+               }
+       }
+       return NULL;
+}
+
+/*
+ * Individual RGB zone control
+*/
+static int alienware_update_led(struct platform_zone *zone)
+{
+       int method_id;
+       acpi_status status;
+       char *guid;
+       struct acpi_buffer input;
+       struct legacy_led_args legacy_args;
+       struct wmax_led_args wmax_args;
+       if (interface == WMAX) {
+               wmax_args.led_mask = 1 << zone->location;
+               wmax_args.colors = zone->colors;
+               wmax_args.state = lighting_control_state;
+               guid = WMAX_CONTROL_GUID;
+               method_id = WMAX_METHOD_ZONE_CONTROL;
+
+               input.length = (acpi_size) sizeof(wmax_args);
+               input.pointer = &wmax_args;
+       } else {
+               legacy_args.colors = zone->colors;
+               legacy_args.brightness = global_brightness;
+               legacy_args.state = 0;
+               if (lighting_control_state == LEGACY_BOOTING ||
+                   lighting_control_state == LEGACY_SUSPEND) {
+                       guid = LEGACY_POWER_CONTROL_GUID;
+                       legacy_args.state = lighting_control_state;
+               } else
+                       guid = LEGACY_CONTROL_GUID;
+               method_id = zone->location + 1;
+
+               input.length = (acpi_size) sizeof(legacy_args);
+               input.pointer = &legacy_args;
+       }
+       pr_debug("alienware-wmi: guid %s method %d\n", guid, method_id);
+
+       status = wmi_evaluate_method(guid, 1, method_id, &input, NULL);
+       if (ACPI_FAILURE(status))
+               pr_err("alienware-wmi: zone set failure: %u\n", status);
+       return ACPI_FAILURE(status);
+}
+
+static ssize_t zone_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct platform_zone *target_zone;
+       target_zone = match_zone(attr);
+       if (target_zone == NULL)
+               return sprintf(buf, "red: -1, green: -1, blue: -1\n");
+       return sprintf(buf, "red: %d, green: %d, blue: %d\n",
+                      target_zone->colors.red,
+                      target_zone->colors.green, target_zone->colors.blue);
+
+}
+
+static ssize_t zone_set(struct device *dev, struct device_attribute *attr,
+                       const char *buf, size_t count)
+{
+       struct platform_zone *target_zone;
+       int ret;
+       target_zone = match_zone(attr);
+       if (target_zone == NULL) {
+               pr_err("alienware-wmi: invalid target zone\n");
+               return 1;
+       }
+       ret = parse_rgb(buf, target_zone);
+       if (ret)
+               return ret;
+       ret = alienware_update_led(target_zone);
+       return ret ? ret : count;
+}
+
+/*
+ * LED Brightness (Global)
+*/
+static int wmax_brightness(int brightness)
+{
+       acpi_status status;
+       struct acpi_buffer input;
+       struct wmax_brightness_args args = {
+               .led_mask = 0xFF,
+               .percentage = brightness,
+       };
+       input.length = (acpi_size) sizeof(args);
+       input.pointer = &args;
+       status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+                                    WMAX_METHOD_BRIGHTNESS, &input, NULL);
+       if (ACPI_FAILURE(status))
+               pr_err("alienware-wmi: brightness set failure: %u\n", status);
+       return ACPI_FAILURE(status);
+}
+
+static void global_led_set(struct led_classdev *led_cdev,
+                          enum led_brightness brightness)
+{
+       int ret;
+       global_brightness = brightness;
+       if (interface == WMAX)
+               ret = wmax_brightness(brightness);
+       else
+               ret = alienware_update_led(&zone_data[0]);
+       if (ret)
+               pr_err("LED brightness update failed\n");
+}
+
+static enum led_brightness global_led_get(struct led_classdev *led_cdev)
+{
+       return global_brightness;
+}
+
+static struct led_classdev global_led = {
+       .brightness_set = global_led_set,
+       .brightness_get = global_led_get,
+       .name = "alienware::global_brightness",
+};
+
+/*
+ * Lighting control state device attribute (Global)
+*/
+static ssize_t show_control_state(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       if (lighting_control_state == LEGACY_BOOTING)
+               return scnprintf(buf, PAGE_SIZE, "[booting] running suspend\n");
+       else if (lighting_control_state == LEGACY_SUSPEND)
+               return scnprintf(buf, PAGE_SIZE, "booting running [suspend]\n");
+       return scnprintf(buf, PAGE_SIZE, "booting [running] suspend\n");
+}
+
+static ssize_t store_control_state(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
+{
+       long unsigned int val;
+       if (strcmp(buf, "booting\n") == 0)
+               val = LEGACY_BOOTING;
+       else if (strcmp(buf, "suspend\n") == 0)
+               val = LEGACY_SUSPEND;
+       else if (interface == LEGACY)
+               val = LEGACY_RUNNING;
+       else
+               val = WMAX_RUNNING;
+       lighting_control_state = val;
+       pr_debug("alienware-wmi: updated control state to %d\n",
+                lighting_control_state);
+       return count;
+}
+
+static DEVICE_ATTR(lighting_control_state, 0644, show_control_state,
+                  store_control_state);
+
+static int alienware_zone_init(struct platform_device *dev)
+{
+       int i;
+       char buffer[10];
+       char *name;
+
+       if (interface == WMAX) {
+               global_led.max_brightness = 100;
+               lighting_control_state = WMAX_RUNNING;
+       } else if (interface == LEGACY) {
+               global_led.max_brightness = 0x0F;
+               lighting_control_state = LEGACY_RUNNING;
+       }
+       global_brightness = global_led.max_brightness;
+
+       /*
+        *      - zone_dev_attrs num_zones + 1 is for individual zones and then
+        *        null terminated
+        *      - zone_attrs num_zones + 2 is for all attrs in zone_dev_attrs +
+        *        the lighting control + null terminated
+        *      - zone_data num_zones is for the distinct zones
+        */
+       zone_dev_attrs =
+           kzalloc(sizeof(struct device_attribute) * (quirks->num_zones + 1),
+                   GFP_KERNEL);
+       if (!zone_dev_attrs)
+               return -ENOMEM;
+
+       zone_attrs =
+           kzalloc(sizeof(struct attribute *) * (quirks->num_zones + 2),
+                   GFP_KERNEL);
+       if (!zone_attrs)
+               return -ENOMEM;
+
+       zone_data =
+           kzalloc(sizeof(struct platform_zone) * (quirks->num_zones),
+                   GFP_KERNEL);
+       if (!zone_data)
+               return -ENOMEM;
+
+       for (i = 0; i < quirks->num_zones; i++) {
+               sprintf(buffer, "zone%02X", i);
+               name = kstrdup(buffer, GFP_KERNEL);
+               if (name == NULL)
+                       return 1;
+               sysfs_attr_init(&zone_dev_attrs[i].attr);
+               zone_dev_attrs[i].attr.name = name;
+               zone_dev_attrs[i].attr.mode = 0644;
+               zone_dev_attrs[i].show = zone_show;
+               zone_dev_attrs[i].store = zone_set;
+               zone_data[i].location = i;
+               zone_attrs[i] = &zone_dev_attrs[i].attr;
+               zone_data[i].attr = &zone_dev_attrs[i];
+       }
+       zone_attrs[quirks->num_zones] = &dev_attr_lighting_control_state.attr;
+       zone_attribute_group.attrs = zone_attrs;
+
+       led_classdev_register(&dev->dev, &global_led);
+
+       return sysfs_create_group(&dev->dev.kobj, &zone_attribute_group);
+}
+
+static void alienware_zone_exit(struct platform_device *dev)
+{
+       sysfs_remove_group(&dev->dev.kobj, &zone_attribute_group);
+       led_classdev_unregister(&global_led);
+       if (zone_dev_attrs) {
+               int i;
+               for (i = 0; i < quirks->num_zones; i++)
+                       kfree(zone_dev_attrs[i].attr.name);
+       }
+       kfree(zone_dev_attrs);
+       kfree(zone_data);
+       kfree(zone_attrs);
+}
+
+/*
+       The HDMI mux sysfs node indicates the status of the HDMI input mux.
+       It can toggle between standard system GPU output and HDMI input.
+*/
+static ssize_t show_hdmi(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       acpi_status status;
+       struct acpi_buffer input;
+       union acpi_object *obj;
+       u32 tmp = 0;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       struct hdmi_args in_args = {
+               .arg = 0,
+       };
+       input.length = (acpi_size) sizeof(in_args);
+       input.pointer = &in_args;
+       status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+                                    WMAX_METHOD_HDMI_STATUS, &input, &output);
+
+       if (ACPI_SUCCESS(status)) {
+               obj = (union acpi_object *)output.pointer;
+               if (obj && obj->type == ACPI_TYPE_INTEGER)
+                       tmp = (u32) obj->integer.value;
+               if (tmp == 1)
+                       return scnprintf(buf, PAGE_SIZE,
+                                        "[input] gpu unknown\n");
+               else if (tmp == 2)
+                       return scnprintf(buf, PAGE_SIZE,
+                                        "input [gpu] unknown\n");
+       }
+       pr_err("alienware-wmi: unknown HDMI status: %d\n", status);
+       return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
+}
+
+static ssize_t toggle_hdmi(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct acpi_buffer input;
+       acpi_status status;
+       struct hdmi_args args;
+       if (strcmp(buf, "gpu\n") == 0)
+               args.arg = 1;
+       else if (strcmp(buf, "input\n") == 0)
+               args.arg = 2;
+       else
+               args.arg = 3;
+       pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
+       input.length = (acpi_size) sizeof(args);
+       input.pointer = &args;
+       status = wmi_evaluate_method(WMAX_CONTROL_GUID, 1,
+                                    WMAX_METHOD_HDMI_SOURCE, &input, NULL);
+       if (ACPI_FAILURE(status))
+               pr_err("alienware-wmi: HDMI toggle failed: results: %u\n",
+                      status);
+       return count;
+}
+
+static DEVICE_ATTR(hdmi, S_IRUGO | S_IWUSR, show_hdmi, toggle_hdmi);
+
+static void remove_hdmi(struct platform_device *device)
+{
+       device_remove_file(&device->dev, &dev_attr_hdmi);
+}
+
+static int create_hdmi(void)
+{
+       int ret = -ENOMEM;
+       ret = device_create_file(&platform_device->dev, &dev_attr_hdmi);
+       if (ret)
+               goto error_create_hdmi;
+       return 0;
+
+error_create_hdmi:
+       remove_hdmi(platform_device);
+       return ret;
+}
+
+static int __init alienware_wmi_init(void)
+{
+       int ret;
+
+       if (wmi_has_guid(LEGACY_CONTROL_GUID))
+               interface = LEGACY;
+       else if (wmi_has_guid(WMAX_CONTROL_GUID))
+               interface = WMAX;
+       else {
+               pr_warn("alienware-wmi: No known WMI GUID found\n");
+               return -ENODEV;
+       }
+
+       dmi_check_system(alienware_quirks);
+       if (quirks == NULL)
+               quirks = &quirk_unknown;
+
+       ret = platform_driver_register(&platform_driver);
+       if (ret)
+               goto fail_platform_driver;
+       platform_device = platform_device_alloc("alienware-wmi", -1);
+       if (!platform_device) {
+               ret = -ENOMEM;
+               goto fail_platform_device1;
+       }
+       ret = platform_device_add(platform_device);
+       if (ret)
+               goto fail_platform_device2;
+
+       if (interface == WMAX) {
+               ret = create_hdmi();
+               if (ret)
+                       goto fail_prep_hdmi;
+       }
+
+       ret = alienware_zone_init(platform_device);
+       if (ret)
+               goto fail_prep_zones;
+
+       return 0;
+
+fail_prep_zones:
+       alienware_zone_exit(platform_device);
+fail_prep_hdmi:
+       platform_device_del(platform_device);
+fail_platform_device2:
+       platform_device_put(platform_device);
+fail_platform_device1:
+       platform_driver_unregister(&platform_driver);
+fail_platform_driver:
+       return ret;
+}
+
+module_init(alienware_wmi_init);
+
+static void __exit alienware_wmi_exit(void)
+{
+       if (platform_device) {
+               alienware_zone_exit(platform_device);
+               remove_hdmi(platform_device);
+               platform_device_unregister(platform_device);
+               platform_driver_unregister(&platform_driver);
+       }
+}
+
+module_exit(alienware_wmi_exit);
index 570926c10014d53ef3c1a2afc4de6aa87eb71a96..c3784baceae3cca2b9d1d32113016f4562f94e48 100644 (file)
@@ -71,6 +71,44 @@ static unsigned short keymap_Lifebook_Tseries[KEYMAP_LEN] __initdata = {
        KEY_LEFTALT
 };
 
+static unsigned short keymap_Lifebook_T901[KEYMAP_LEN] __initdata = {
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_SCROLLDOWN,
+       KEY_SCROLLUP,
+       KEY_CYCLEWINDOWS,
+       KEY_LEFTCTRL,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_LEFTMETA
+};
+
+static unsigned short keymap_Lifebook_T902[KEYMAP_LEN] __initdata = {
+       KEY_RESERVED,
+       KEY_VOLUMEDOWN,
+       KEY_VOLUMEUP,
+       KEY_CYCLEWINDOWS,
+       KEY_PROG1,
+       KEY_PROG2,
+       KEY_LEFTMETA,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+       KEY_RESERVED,
+};
+
 static unsigned short keymap_Lifebook_U810[KEYMAP_LEN] __initdata = {
        KEY_RESERVED,
        KEY_RESERVED,
@@ -300,6 +338,33 @@ static int fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
 }
 
 static const struct dmi_system_id dmi_ids[] __initconst = {
+       {
+               .callback = fujitsu_dmi_lifebook,
+               .ident = "Fujitsu Lifebook T901",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook T901")
+               },
+               .driver_data = keymap_Lifebook_T901
+       },
+       {
+               .callback = fujitsu_dmi_lifebook,
+               .ident = "Fujitsu Lifebook T901",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T901")
+               },
+               .driver_data = keymap_Lifebook_T901
+       },
+       {
+               .callback = fujitsu_dmi_lifebook,
+               .ident = "Fujitsu Lifebook T902",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T902")
+               },
+               .driver_data = keymap_Lifebook_T902
+       },
        {
                .callback = fujitsu_dmi_lifebook,
                .ident = "Fujitsu Siemens P/T Series",
diff --git a/drivers/platform/x86/intel_baytrail.c b/drivers/platform/x86/intel_baytrail.c
deleted file mode 100644 (file)
index f96626b..0000000
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Baytrail IOSF-SB MailBox Interface Driver
- * Copyright (c) 2013, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- *
- * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
- * mailbox interface (MBI) to communicate with mutiple devices. This
- * driver implements BayTrail-specific access to this interface.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-
-#include "intel_baytrail.h"
-
-static DEFINE_SPINLOCK(iosf_mbi_lock);
-
-static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
-{
-       return (op << 24) | (port << 16) | (offset << 8) | BT_MBI_ENABLE;
-}
-
-static struct pci_dev *mbi_pdev;       /* one mbi device */
-
-/* Hold lock before calling */
-static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
-{
-       int result;
-
-       if (!mbi_pdev)
-               return -ENODEV;
-
-       if (mcrx) {
-               result = pci_write_config_dword(mbi_pdev,
-                                               BT_MBI_MCRX_OFFSET, mcrx);
-               if (result < 0)
-                       goto iosf_mbi_read_err;
-       }
-
-       result = pci_write_config_dword(mbi_pdev,
-                                       BT_MBI_MCR_OFFSET, mcr);
-       if (result < 0)
-               goto iosf_mbi_read_err;
-
-       result = pci_read_config_dword(mbi_pdev,
-                                      BT_MBI_MDR_OFFSET, mdr);
-       if (result < 0)
-               goto iosf_mbi_read_err;
-
-       return 0;
-
-iosf_mbi_read_err:
-       dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
-               result);
-       return result;
-}
-
-/* Hold lock before calling */
-static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
-{
-       int result;
-
-       if (!mbi_pdev)
-               return -ENODEV;
-
-       result = pci_write_config_dword(mbi_pdev,
-                                       BT_MBI_MDR_OFFSET, mdr);
-       if (result < 0)
-               goto iosf_mbi_write_err;
-
-       if (mcrx) {
-               result = pci_write_config_dword(mbi_pdev,
-                        BT_MBI_MCRX_OFFSET, mcrx);
-               if (result < 0)
-                       goto iosf_mbi_write_err;
-       }
-
-       result = pci_write_config_dword(mbi_pdev,
-                                       BT_MBI_MCR_OFFSET, mcr);
-       if (result < 0)
-               goto iosf_mbi_write_err;
-
-       return 0;
-
-iosf_mbi_write_err:
-       dev_err(&mbi_pdev->dev, "error: PCI config operation returned %d\n",
-               result);
-       return result;
-}
-
-int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
-{
-       u32 mcr, mcrx;
-       unsigned long flags;
-       int ret;
-
-       /*Access to the GFX unit is handled by GPU code */
-       BUG_ON(port == BT_MBI_UNIT_GFX);
-
-       mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
-       mcrx = offset & BT_MBI_MASK_HI;
-
-       spin_lock_irqsave(&iosf_mbi_lock, flags);
-       ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
-       spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(bt_mbi_read);
-
-int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
-{
-       u32 mcr, mcrx;
-       unsigned long flags;
-       int ret;
-
-       /*Access to the GFX unit is handled by GPU code */
-       BUG_ON(port == BT_MBI_UNIT_GFX);
-
-       mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
-       mcrx = offset & BT_MBI_MASK_HI;
-
-       spin_lock_irqsave(&iosf_mbi_lock, flags);
-       ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
-       spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(bt_mbi_write);
-
-int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
-{
-       u32 mcr, mcrx;
-       u32 value;
-       unsigned long flags;
-       int ret;
-
-       /*Access to the GFX unit is handled by GPU code */
-       BUG_ON(port == BT_MBI_UNIT_GFX);
-
-       mcr = iosf_mbi_form_mcr(opcode, port, offset & BT_MBI_MASK_LO);
-       mcrx = offset & BT_MBI_MASK_HI;
-
-       spin_lock_irqsave(&iosf_mbi_lock, flags);
-
-       /* Read current mdr value */
-       ret = iosf_mbi_pci_read_mdr(mcrx, mcr & BT_MBI_RD_MASK, &value);
-       if (ret < 0) {
-               spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-               return ret;
-       }
-
-       /* Apply mask */
-       value &= ~mask;
-       mdr &= mask;
-       value |= mdr;
-
-       /* Write back */
-       ret = iosf_mbi_pci_write_mdr(mcrx, mcr | BT_MBI_WR_MASK, value);
-
-       spin_unlock_irqrestore(&iosf_mbi_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(bt_mbi_modify);
-
-static int iosf_mbi_probe(struct pci_dev *pdev,
-                         const struct pci_device_id *unused)
-{
-       int ret;
-
-       ret = pci_enable_device(pdev);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "error: could not enable device\n");
-               return ret;
-       }
-
-       mbi_pdev = pci_dev_get(pdev);
-       return 0;
-}
-
-static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
-       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
-       { 0, },
-};
-MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
-
-static struct pci_driver iosf_mbi_pci_driver = {
-       .name           = "iosf_mbi_pci",
-       .probe          = iosf_mbi_probe,
-       .id_table       = iosf_mbi_pci_ids,
-};
-
-static int __init bt_mbi_init(void)
-{
-       return pci_register_driver(&iosf_mbi_pci_driver);
-}
-
-static void __exit bt_mbi_exit(void)
-{
-       pci_unregister_driver(&iosf_mbi_pci_driver);
-       if (mbi_pdev) {
-               pci_dev_put(mbi_pdev);
-               mbi_pdev = NULL;
-       }
-}
-
-module_init(bt_mbi_init);
-module_exit(bt_mbi_exit);
-
-MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
-MODULE_DESCRIPTION("BayTrail Mailbox Interface accessor");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel_baytrail.h b/drivers/platform/x86/intel_baytrail.h
deleted file mode 100644 (file)
index 8bcc311..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * intel_baytrail.h: MailBox access support for Intel BayTrail platforms
- */
-
-#ifndef INTEL_BAYTRAIL_MBI_SYMS_H
-#define INTEL_BAYTRAIL_MBI_SYMS_H
-
-#define BT_MBI_MCR_OFFSET      0xD0
-#define BT_MBI_MDR_OFFSET      0xD4
-#define BT_MBI_MCRX_OFFSET     0xD8
-
-#define BT_MBI_RD_MASK         0xFEFFFFFF
-#define BT_MBI_WR_MASK         0X01000000
-
-#define BT_MBI_MASK_HI         0xFFFFFF00
-#define BT_MBI_MASK_LO         0x000000FF
-#define BT_MBI_ENABLE          0xF0
-
-/* BT-SB unit access methods */
-#define BT_MBI_UNIT_AUNIT      0x00
-#define BT_MBI_UNIT_SMC                0x01
-#define BT_MBI_UNIT_CPU                0x02
-#define BT_MBI_UNIT_BUNIT      0x03
-#define BT_MBI_UNIT_PMC                0x04
-#define BT_MBI_UNIT_GFX                0x06
-#define BT_MBI_UNIT_SMI                0x0C
-#define BT_MBI_UNIT_USB                0x43
-#define BT_MBI_UNIT_SATA       0xA3
-#define BT_MBI_UNIT_PCIE       0xA6
-
-/* Read/write opcodes */
-#define BT_MBI_AUNIT_READ      0x10
-#define BT_MBI_AUNIT_WRITE     0x11
-#define BT_MBI_SMC_READ                0x10
-#define BT_MBI_SMC_WRITE       0x11
-#define BT_MBI_CPU_READ                0x10
-#define BT_MBI_CPU_WRITE       0x11
-#define BT_MBI_BUNIT_READ      0x10
-#define BT_MBI_BUNIT_WRITE     0x11
-#define BT_MBI_PMC_READ                0x06
-#define BT_MBI_PMC_WRITE       0x07
-#define BT_MBI_GFX_READ                0x00
-#define BT_MBI_GFX_WRITE       0x01
-#define BT_MBI_SMIO_READ       0x06
-#define BT_MBI_SMIO_WRITE      0x07
-#define BT_MBI_USB_READ                0x06
-#define BT_MBI_USB_WRITE       0x07
-#define BT_MBI_SATA_READ       0x00
-#define BT_MBI_SATA_WRITE      0x01
-#define BT_MBI_PCIE_READ       0x00
-#define BT_MBI_PCIE_WRITE      0x01
-
-/**
- * bt_mbi_read() - MailBox Interface read command
- * @port:      port indicating subunit being accessed
- * @opcode:    port specific read or write opcode
- * @offset:    register address offset
- * @mdr:       register data to be read
- *
- * Locking is handled by spinlock - cannot sleep.
- * Return: Nonzero on error
- */
-int bt_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
-
-/**
- * bt_mbi_write() - MailBox unmasked write command
- * @port:      port indicating subunit being accessed
- * @opcode:    port specific read or write opcode
- * @offset:    register address offset
- * @mdr:       register data to be written
- *
- * Locking is handled by spinlock - cannot sleep.
- * Return: Nonzero on error
- */
-int bt_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
-
-/**
- * bt_mbi_modify() - MailBox masked write command
- * @port:      port indicating subunit being accessed
- * @opcode:    port specific read or write opcode
- * @offset:    register address offset
- * @mdr:       register data being modified
- * @mask:      mask indicating bits in mdr to be modified
- *
- * Locking is handled by spinlock - cannot sleep.
- * Return: Nonzero on error
- */
-int bt_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
-
-#endif /* INTEL_BAYTRAIL_MBI_SYMS_H */
index 609d38779b2680175660557b1093fa1026c91aad..3f870972247c4ac6b9103e8c1a86f7634b8ff051 100644 (file)
@@ -449,6 +449,7 @@ static struct attribute_group pcc_attr_group = {
 
 /* hotkey input device driver */
 
+static int sleep_keydown_seen;
 static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
 {
        struct input_dev *hotk_input_dev = pcc->input_dev;
@@ -462,6 +463,16 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
                                 "error getting hotkey status\n"));
                return;
        }
+
+       /* hack: some firmware sends no key down for sleep / hibernate */
+       if ((result & 0xf) == 0x7 || (result & 0xf) == 0xa) {
+               if (result & 0x80)
+                       sleep_keydown_seen = 1;
+               if (!sleep_keydown_seen)
+                       sparse_keymap_report_event(hotk_input_dev,
+                                       result & 0xf, 0x80, false);
+       }
+
        if (!sparse_keymap_report_event(hotk_input_dev,
                                        result & 0xf, result & 0x80, false))
                ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
index 8f8551a63cc0b6de1bbb55a63ad538e760b72b63..9c5a07417b2b4a1e39c68a5998d972bc31e8eda0 100644 (file)
@@ -76,8 +76,6 @@ do {                                          \
                pr_warn(fmt, ##__VA_ARGS__);    \
 } while (0)
 
-#define SONY_LAPTOP_DRIVER_VERSION     "0.6"
-
 #define SONY_NC_CLASS          "sony-nc"
 #define SONY_NC_HID            "SNY5001"
 #define SONY_NC_DRIVER_NAME    "Sony Notebook Control Driver"
@@ -89,7 +87,6 @@ do {                                          \
 MODULE_AUTHOR("Stelian Pop, Mattia Dongili");
 MODULE_DESCRIPTION("Sony laptop extras driver (SPIC and SNC ACPI device)");
 MODULE_LICENSE("GPL");
-MODULE_VERSION(SONY_LAPTOP_DRIVER_VERSION);
 
 static int debug;
 module_param(debug, int, 0);
@@ -129,7 +126,8 @@ static int kbd_backlight = -1;
 module_param(kbd_backlight, int, 0444);
 MODULE_PARM_DESC(kbd_backlight,
                 "set this to 0 to disable keyboard backlight, "
-                "1 to enable it (default: no change from current value)");
+                "1 to enable it with automatic control and 2 to have it always "
+                "on (default: no change from current value)");
 
 static int kbd_backlight_timeout = -1;
 module_param(kbd_backlight_timeout, int, 0444);
@@ -152,7 +150,8 @@ static void sony_nc_battery_care_cleanup(struct platform_device *pd);
 static int sony_nc_thermal_setup(struct platform_device *pd);
 static void sony_nc_thermal_cleanup(struct platform_device *pd);
 
-static int sony_nc_lid_resume_setup(struct platform_device *pd);
+static int sony_nc_lid_resume_setup(struct platform_device *pd,
+                                   unsigned int handle);
 static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
 
 static int sony_nc_gfx_switch_setup(struct platform_device *pd,
@@ -163,6 +162,21 @@ static int __sony_nc_gfx_switch_status_get(void);
 static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
 static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
 
+static int sony_nc_lowbatt_setup(struct platform_device *pd);
+static void sony_nc_lowbatt_cleanup(struct platform_device *pd);
+
+static int sony_nc_fanspeed_setup(struct platform_device *pd);
+static void sony_nc_fanspeed_cleanup(struct platform_device *pd);
+
+static int sony_nc_usb_charge_setup(struct platform_device *pd);
+static void sony_nc_usb_charge_cleanup(struct platform_device *pd);
+
+static int sony_nc_panelid_setup(struct platform_device *pd);
+static void sony_nc_panelid_cleanup(struct platform_device *pd);
+
+static int sony_nc_smart_conn_setup(struct platform_device *pd);
+static void sony_nc_smart_conn_cleanup(struct platform_device *pd);
+
 static int sony_nc_touchpad_setup(struct platform_device *pd,
                                  unsigned int handle);
 static void sony_nc_touchpad_cleanup(struct platform_device *pd);
@@ -1122,6 +1136,8 @@ static struct sony_nc_event sony_100_events[] = {
        { 0x25, SONYPI_EVENT_ANYBUTTON_RELEASED },
        { 0xa6, SONYPI_EVENT_HELP_PRESSED },
        { 0x26, SONYPI_EVENT_ANYBUTTON_RELEASED },
+       { 0xa8, SONYPI_EVENT_FNKEY_1 },
+       { 0x28, SONYPI_EVENT_ANYBUTTON_RELEASED },
        { 0, 0 },
 };
 
@@ -1339,7 +1355,8 @@ static void sony_nc_function_setup(struct acpi_device *device,
                                                result);
                        break;
                case 0x0119:
-                       result = sony_nc_lid_resume_setup(pf_device);
+               case 0x015D:
+                       result = sony_nc_lid_resume_setup(pf_device, handle);
                        if (result)
                                pr_err("couldn't set up lid resume function (%d)\n",
                                                result);
@@ -1381,6 +1398,36 @@ static void sony_nc_function_setup(struct acpi_device *device,
                                pr_err("couldn't set up keyboard backlight function (%d)\n",
                                                result);
                        break;
+               case 0x0121:
+                       result = sony_nc_lowbatt_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up low battery function (%d)\n",
+                                      result);
+                       break;
+               case 0x0149:
+                       result = sony_nc_fanspeed_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up fan speed function (%d)\n",
+                                      result);
+                       break;
+               case 0x0155:
+                       result = sony_nc_usb_charge_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up USB charge support (%d)\n",
+                                               result);
+                       break;
+               case 0x011D:
+                       result = sony_nc_panelid_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up panel ID function (%d)\n",
+                                      result);
+                       break;
+               case 0x0168:
+                       result = sony_nc_smart_conn_setup(pf_device);
+                       if (result)
+                               pr_err("couldn't set up smart connect support (%d)\n",
+                                               result);
+                       break;
                default:
                        continue;
                }
@@ -1420,6 +1467,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
                        sony_nc_battery_care_cleanup(pd);
                        break;
                case 0x0119:
+               case 0x015D:
                        sony_nc_lid_resume_cleanup(pd);
                        break;
                case 0x0122:
@@ -1444,6 +1492,21 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
                case 0x0163:
                        sony_nc_kbd_backlight_cleanup(pd, handle);
                        break;
+               case 0x0121:
+                       sony_nc_lowbatt_cleanup(pd);
+                       break;
+               case 0x0149:
+                       sony_nc_fanspeed_cleanup(pd);
+                       break;
+               case 0x0155:
+                       sony_nc_usb_charge_cleanup(pd);
+                       break;
+               case 0x011D:
+                       sony_nc_panelid_cleanup(pd);
+                       break;
+               case 0x0168:
+                       sony_nc_smart_conn_cleanup(pd);
+                       break;
                default:
                        continue;
                }
@@ -1719,7 +1782,7 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
 {
        int result;
 
-       if (value > 1)
+       if (value > 2)
                return -EINVAL;
 
        if (sony_call_snc_handle(kbdbl_ctl->handle,
@@ -1727,8 +1790,10 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
                return -EIO;
 
        /* Try to turn the light on/off immediately */
-       sony_call_snc_handle(kbdbl_ctl->handle,
-                       (value << 0x10) | (kbdbl_ctl->base + 0x100), &result);
+       if (value != 1)
+               sony_call_snc_handle(kbdbl_ctl->handle,
+                               (value << 0x0f) | (kbdbl_ctl->base + 0x100),
+                               &result);
 
        kbdbl_ctl->mode = value;
 
@@ -2221,9 +2286,14 @@ static void sony_nc_thermal_resume(void)
 #endif
 
 /* resume on LID open */
+#define LID_RESUME_S5  0
+#define LID_RESUME_S4  1
+#define LID_RESUME_S3  2
+#define LID_RESUME_MAX 3
 struct snc_lid_resume_control {
-       struct device_attribute attrs[3];
+       struct device_attribute attrs[LID_RESUME_MAX];
        unsigned int status;
+       int handle;
 };
 static struct snc_lid_resume_control *lid_ctl;
 
@@ -2231,8 +2301,9 @@ static ssize_t sony_nc_lid_resume_store(struct device *dev,
                                        struct device_attribute *attr,
                                        const char *buffer, size_t count)
 {
-       unsigned int result, pos;
+       unsigned int result;
        unsigned long value;
+       unsigned int pos = LID_RESUME_S5;
        if (count > 31)
                return -EINVAL;
 
@@ -2245,21 +2316,21 @@ static ssize_t sony_nc_lid_resume_store(struct device *dev,
         * +--------------+
         *   2    1    0
         */
-       if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
-               pos = 2;
-       else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
-               pos = 1;
-       else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
-               pos = 0;
-       else
-               return -EINVAL;
+       while (pos < LID_RESUME_MAX) {
+               if (&lid_ctl->attrs[pos].attr == &attr->attr)
+                       break;
+               pos++;
+       }
+       if (pos == LID_RESUME_MAX)
+               return -EINVAL;
 
        if (value)
                value = lid_ctl->status | (1 << pos);
        else
                value = lid_ctl->status & ~(1 << pos);
 
-       if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result))
+       if (sony_call_snc_handle(lid_ctl->handle, value << 0x10 | 0x0100,
+                               &result))
                return -EIO;
 
        lid_ctl->status = value;
@@ -2268,29 +2339,27 @@ static ssize_t sony_nc_lid_resume_store(struct device *dev,
 }
 
 static ssize_t sony_nc_lid_resume_show(struct device *dev,
-                                      struct device_attribute *attr, char *buffer)
+                                       struct device_attribute *attr,
+                                       char *buffer)
 {
-       unsigned int pos;
+       unsigned int pos = LID_RESUME_S5;
 
-       if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
-               pos = 2;
-       else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
-               pos = 1;
-       else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
-               pos = 0;
-       else
-               return -EINVAL;
-              
-       return snprintf(buffer, PAGE_SIZE, "%d\n",
-                       (lid_ctl->status >> pos) & 0x01);
+       while (pos < LID_RESUME_MAX) {
+               if (&lid_ctl->attrs[pos].attr == &attr->attr)
+                       return snprintf(buffer, PAGE_SIZE, "%d\n",
+                                       (lid_ctl->status >> pos) & 0x01);
+               pos++;
+       }
+       return -EINVAL;
 }
 
-static int sony_nc_lid_resume_setup(struct platform_device *pd)
+static int sony_nc_lid_resume_setup(struct platform_device *pd,
+                                       unsigned int handle)
 {
        unsigned int result;
        int i;
 
-       if (sony_call_snc_handle(0x0119, 0x0000, &result))
+       if (sony_call_snc_handle(handle, 0x0000, &result))
                return -EIO;
 
        lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
@@ -2298,26 +2367,29 @@ static int sony_nc_lid_resume_setup(struct platform_device *pd)
                return -ENOMEM;
 
        lid_ctl->status = result & 0x7;
+       lid_ctl->handle = handle;
 
        sysfs_attr_init(&lid_ctl->attrs[0].attr);
-       lid_ctl->attrs[0].attr.name = "lid_resume_S3";
-       lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
-       lid_ctl->attrs[0].show = sony_nc_lid_resume_show;
-       lid_ctl->attrs[0].store = sony_nc_lid_resume_store;
-
-       sysfs_attr_init(&lid_ctl->attrs[1].attr);
-       lid_ctl->attrs[1].attr.name = "lid_resume_S4";
-       lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR;
-       lid_ctl->attrs[1].show = sony_nc_lid_resume_show;
-       lid_ctl->attrs[1].store = sony_nc_lid_resume_store;
-
-       sysfs_attr_init(&lid_ctl->attrs[2].attr);
-       lid_ctl->attrs[2].attr.name = "lid_resume_S5";
-       lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR;
-       lid_ctl->attrs[2].show = sony_nc_lid_resume_show;
-       lid_ctl->attrs[2].store = sony_nc_lid_resume_store;
-
-       for (i = 0; i < 3; i++) {
+       lid_ctl->attrs[LID_RESUME_S5].attr.name = "lid_resume_S5";
+       lid_ctl->attrs[LID_RESUME_S5].attr.mode = S_IRUGO | S_IWUSR;
+       lid_ctl->attrs[LID_RESUME_S5].show = sony_nc_lid_resume_show;
+       lid_ctl->attrs[LID_RESUME_S5].store = sony_nc_lid_resume_store;
+
+       if (handle == 0x0119) {
+               sysfs_attr_init(&lid_ctl->attrs[1].attr);
+               lid_ctl->attrs[LID_RESUME_S4].attr.name = "lid_resume_S4";
+               lid_ctl->attrs[LID_RESUME_S4].attr.mode = S_IRUGO | S_IWUSR;
+               lid_ctl->attrs[LID_RESUME_S4].show = sony_nc_lid_resume_show;
+               lid_ctl->attrs[LID_RESUME_S4].store = sony_nc_lid_resume_store;
+
+               sysfs_attr_init(&lid_ctl->attrs[2].attr);
+               lid_ctl->attrs[LID_RESUME_S3].attr.name = "lid_resume_S3";
+               lid_ctl->attrs[LID_RESUME_S3].attr.mode = S_IRUGO | S_IWUSR;
+               lid_ctl->attrs[LID_RESUME_S3].show = sony_nc_lid_resume_show;
+               lid_ctl->attrs[LID_RESUME_S3].store = sony_nc_lid_resume_store;
+       }
+       for (i = 0; i < LID_RESUME_MAX &&
+                       lid_ctl->attrs[LID_RESUME_S3].attr.name; i++) {
                result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
                if (result)
                        goto liderror;
@@ -2340,8 +2412,12 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
        int i;
 
        if (lid_ctl) {
-               for (i = 0; i < 3; i++)
+               for (i = 0; i < LID_RESUME_MAX; i++) {
+                       if (!lid_ctl->attrs[i].attr.name)
+                               break;
+
                        device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
+               }
 
                kfree(lid_ctl);
                lid_ctl = NULL;
@@ -2524,6 +2600,355 @@ static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
        }
 }
 
+/* low battery function */
+static struct device_attribute *lowbatt_handle;
+
+static ssize_t sony_nc_lowbatt_store(struct device *dev,
+               struct device_attribute *attr,
+               const char *buffer, size_t count)
+{
+       unsigned int result;
+       unsigned long value;
+
+       if (count > 31)
+               return -EINVAL;
+
+       if (kstrtoul(buffer, 10, &value) || value > 1)
+               return -EINVAL;
+
+       if (sony_call_snc_handle(0x0121, value << 8, &result))
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t sony_nc_lowbatt_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0121, 0x0200, &result))
+               return -EIO;
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", result & 1);
+}
+
+static int sony_nc_lowbatt_setup(struct platform_device *pd)
+{
+       unsigned int result;
+
+       lowbatt_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+       if (!lowbatt_handle)
+               return -ENOMEM;
+
+       sysfs_attr_init(&lowbatt_handle->attr);
+       lowbatt_handle->attr.name = "lowbatt_hibernate";
+       lowbatt_handle->attr.mode = S_IRUGO | S_IWUSR;
+       lowbatt_handle->show = sony_nc_lowbatt_show;
+       lowbatt_handle->store = sony_nc_lowbatt_store;
+
+       result = device_create_file(&pd->dev, lowbatt_handle);
+       if (result) {
+               kfree(lowbatt_handle);
+               lowbatt_handle = NULL;
+               return result;
+       }
+
+       return 0;
+}
+
+static void sony_nc_lowbatt_cleanup(struct platform_device *pd)
+{
+       if (lowbatt_handle) {
+               device_remove_file(&pd->dev, lowbatt_handle);
+               kfree(lowbatt_handle);
+               lowbatt_handle = NULL;
+       }
+}
+
+/* fan speed function */
+static struct device_attribute *fan_handle, *hsf_handle;
+
+static ssize_t sony_nc_hsfan_store(struct device *dev,
+               struct device_attribute *attr,
+               const char *buffer, size_t count)
+{
+       unsigned int result;
+       unsigned long value;
+
+       if (count > 31)
+               return -EINVAL;
+
+       if (kstrtoul(buffer, 10, &value) || value > 1)
+               return -EINVAL;
+
+       if (sony_call_snc_handle(0x0149, value << 0x10 | 0x0200, &result))
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t sony_nc_hsfan_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0149, 0x0100, &result))
+               return -EIO;
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static ssize_t sony_nc_fanspeed_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0149, 0x0300, &result))
+               return -EIO;
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0xff);
+}
+
+static int sony_nc_fanspeed_setup(struct platform_device *pd)
+{
+       unsigned int result;
+
+       fan_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+       if (!fan_handle)
+               return -ENOMEM;
+
+       hsf_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+       if (!hsf_handle) {
+               result = -ENOMEM;
+               goto out_hsf_handle_alloc;
+       }
+
+       sysfs_attr_init(&fan_handle->attr);
+       fan_handle->attr.name = "fanspeed";
+       fan_handle->attr.mode = S_IRUGO;
+       fan_handle->show = sony_nc_fanspeed_show;
+       fan_handle->store = NULL;
+
+       sysfs_attr_init(&hsf_handle->attr);
+       hsf_handle->attr.name = "fan_forced";
+       hsf_handle->attr.mode = S_IRUGO | S_IWUSR;
+       hsf_handle->show = sony_nc_hsfan_show;
+       hsf_handle->store = sony_nc_hsfan_store;
+
+       result = device_create_file(&pd->dev, fan_handle);
+       if (result)
+               goto out_fan_handle;
+
+       result = device_create_file(&pd->dev, hsf_handle);
+       if (result)
+               goto out_hsf_handle;
+
+       return 0;
+
+out_hsf_handle:
+       device_remove_file(&pd->dev, fan_handle);
+
+out_fan_handle:
+       kfree(hsf_handle);
+       hsf_handle = NULL;
+
+out_hsf_handle_alloc:
+       kfree(fan_handle);
+       fan_handle = NULL;
+       return result;
+}
+
+static void sony_nc_fanspeed_cleanup(struct platform_device *pd)
+{
+       if (fan_handle) {
+               device_remove_file(&pd->dev, fan_handle);
+               kfree(fan_handle);
+               fan_handle = NULL;
+       }
+       if (hsf_handle) {
+               device_remove_file(&pd->dev, hsf_handle);
+               kfree(hsf_handle);
+               hsf_handle = NULL;
+       }
+}
+
+/* USB charge function */
+static struct device_attribute *uc_handle;
+
+static ssize_t sony_nc_usb_charge_store(struct device *dev,
+               struct device_attribute *attr,
+               const char *buffer, size_t count)
+{
+       unsigned int result;
+       unsigned long value;
+
+       if (count > 31)
+               return -EINVAL;
+
+       if (kstrtoul(buffer, 10, &value) || value > 1)
+               return -EINVAL;
+
+       if (sony_call_snc_handle(0x0155, value << 0x10 | 0x0100, &result))
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t sony_nc_usb_charge_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0155, 0x0000, &result))
+               return -EIO;
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+}
+
+static int sony_nc_usb_charge_setup(struct platform_device *pd)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x0155, 0x0000, &result) || !(result & 0x01)) {
+               /* some models advertise the handle but have no implementation
+                * for it
+                */
+               pr_info("No USB Charge capability found\n");
+               return 0;
+       }
+
+       uc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+       if (!uc_handle)
+               return -ENOMEM;
+
+       sysfs_attr_init(&uc_handle->attr);
+       uc_handle->attr.name = "usb_charge";
+       uc_handle->attr.mode = S_IRUGO | S_IWUSR;
+       uc_handle->show = sony_nc_usb_charge_show;
+       uc_handle->store = sony_nc_usb_charge_store;
+
+       result = device_create_file(&pd->dev, uc_handle);
+       if (result) {
+               kfree(uc_handle);
+               uc_handle = NULL;
+               return result;
+       }
+
+       return 0;
+}
+
+static void sony_nc_usb_charge_cleanup(struct platform_device *pd)
+{
+       if (uc_handle) {
+               device_remove_file(&pd->dev, uc_handle);
+               kfree(uc_handle);
+               uc_handle = NULL;
+       }
+}
+
+/* Panel ID function */
+static struct device_attribute *panel_handle;
+
+static ssize_t sony_nc_panelid_show(struct device *dev,
+               struct device_attribute *attr, char *buffer)
+{
+       unsigned int result;
+
+       if (sony_call_snc_handle(0x011D, 0x0000, &result))
+               return -EIO;
+
+       return snprintf(buffer, PAGE_SIZE, "%d\n", result);
+}
+
+static int sony_nc_panelid_setup(struct platform_device *pd)
+{
+       unsigned int result;
+
+       panel_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+       if (!panel_handle)
+               return -ENOMEM;
+
+       sysfs_attr_init(&panel_handle->attr);
+       panel_handle->attr.name = "panel_id";
+       panel_handle->attr.mode = S_IRUGO;
+       panel_handle->show = sony_nc_panelid_show;
+       panel_handle->store = NULL;
+
+       result = device_create_file(&pd->dev, panel_handle);
+       if (result) {
+               kfree(panel_handle);
+               panel_handle = NULL;
+               return result;
+       }
+
+       return 0;
+}
+
+static void sony_nc_panelid_cleanup(struct platform_device *pd)
+{
+       if (panel_handle) {
+               device_remove_file(&pd->dev, panel_handle);
+               kfree(panel_handle);
+               panel_handle = NULL;
+       }
+}
+
+/* smart connect function */
+static struct device_attribute *sc_handle;
+
+static ssize_t sony_nc_smart_conn_store(struct device *dev,
+               struct device_attribute *attr,
+               const char *buffer, size_t count)
+{
+       unsigned int result;
+       unsigned long value;
+
+       if (count > 31)
+               return -EINVAL;
+
+       if (kstrtoul(buffer, 10, &value) || value > 1)
+               return -EINVAL;
+
+       if (sony_call_snc_handle(0x0168, value << 0x10, &result))
+               return -EIO;
+
+       return count;
+}
+
+static int sony_nc_smart_conn_setup(struct platform_device *pd)
+{
+       unsigned int result;
+
+       sc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
+       if (!sc_handle)
+               return -ENOMEM;
+
+       sysfs_attr_init(&sc_handle->attr);
+       sc_handle->attr.name = "smart_connect";
+       sc_handle->attr.mode = S_IWUSR;
+       sc_handle->show = NULL;
+       sc_handle->store = sony_nc_smart_conn_store;
+
+       result = device_create_file(&pd->dev, sc_handle);
+       if (result) {
+               kfree(sc_handle);
+               sc_handle = NULL;
+               return result;
+       }
+
+       return 0;
+}
+
+static void sony_nc_smart_conn_cleanup(struct platform_device *pd)
+{
+       if (sc_handle) {
+               device_remove_file(&pd->dev, sc_handle);
+               kfree(sc_handle);
+               sc_handle = NULL;
+       }
+}
+
 /* Touchpad enable/disable */
 struct touchpad_control {
        struct device_attribute attr;
@@ -2726,8 +3151,6 @@ static int sony_nc_add(struct acpi_device *device)
        int result = 0;
        struct sony_nc_value *item;
 
-       pr_info("%s v%s\n", SONY_NC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
-
        sony_nc_acpi_device = device;
        strcpy(acpi_device_class(device), "sony/hotkey");
 
@@ -2821,6 +3244,7 @@ static int sony_nc_add(struct acpi_device *device)
                }
        }
 
+       pr_info("SNC setup done.\n");
        return 0;
 
 out_sysfs:
@@ -4259,8 +4683,6 @@ static int sony_pic_add(struct acpi_device *device)
        struct sony_pic_ioport *io, *tmp_io;
        struct sony_pic_irq *irq, *tmp_irq;
 
-       pr_info("%s v%s\n", SONY_PIC_DRIVER_NAME, SONY_LAPTOP_DRIVER_VERSION);
-
        spic_dev.acpi_dev = device;
        strcpy(acpi_device_class(device), "sony/hotkey");
        sony_pic_detect_device_type(&spic_dev);
@@ -4360,6 +4782,7 @@ static int sony_pic_add(struct acpi_device *device)
        if (result)
                goto err_remove_pf;
 
+       pr_info("SPIC setup done.\n");
        return 0;
 
 err_remove_pf:
index f21e1095b1dd389a9686fa23589de56d978d400e..15e61c16736ef7d68c0fc439baaed760262d2b94 100644 (file)
@@ -3441,6 +3441,106 @@ err_exit:
        return (res < 0)? res : 1;
 }
 
+/* Thinkpad X1 Carbon support 5 modes including Home mode, Web browser
+ * mode, Web conference mode, Function mode and Lay-flat mode.
+ * We support Home mode and Function mode currently.
+ *
+ * Will consider support rest of modes in future.
+ *
+ */
+enum ADAPTIVE_KEY_MODE {
+       HOME_MODE,
+       WEB_BROWSER_MODE,
+       WEB_CONFERENCE_MODE,
+       FUNCTION_MODE,
+       LAYFLAT_MODE
+};
+
+const int adaptive_keyboard_modes[] = {
+       HOME_MODE,
+/*     WEB_BROWSER_MODE = 2,
+       WEB_CONFERENCE_MODE = 3, */
+       FUNCTION_MODE
+};
+
+#define DFR_CHANGE_ROW                 0x101
+#define DFR_SHOW_QUICKVIEW_ROW         0x102
+
+/* press Fn key a while second, it will switch to Function Mode. Then
+ * release Fn key, previous mode be restored.
+ */
+static bool adaptive_keyboard_mode_is_saved;
+static int adaptive_keyboard_prev_mode;
+
+static int adaptive_keyboard_get_next_mode(int mode)
+{
+       size_t i;
+       size_t max_mode = ARRAY_SIZE(adaptive_keyboard_modes) - 1;
+
+       for (i = 0; i <= max_mode; i++) {
+               if (adaptive_keyboard_modes[i] == mode)
+                       break;
+       }
+
+       if (i >= max_mode)
+               i = 0;
+       else
+               i++;
+
+       return adaptive_keyboard_modes[i];
+}
+
+static bool adaptive_keyboard_hotkey_notify_hotkey(unsigned int scancode)
+{
+       u32 current_mode = 0;
+       int new_mode = 0;
+
+       switch (scancode) {
+       case DFR_CHANGE_ROW:
+               if (adaptive_keyboard_mode_is_saved) {
+                       new_mode = adaptive_keyboard_prev_mode;
+                       adaptive_keyboard_mode_is_saved = false;
+               } else {
+                       if (!acpi_evalf(
+                                       hkey_handle, &current_mode,
+                                       "GTRW", "dd", 0)) {
+                               pr_err("Cannot read adaptive keyboard mode\n");
+                               return false;
+                       } else {
+                               new_mode = adaptive_keyboard_get_next_mode(
+                                               current_mode);
+                       }
+               }
+
+               if (!acpi_evalf(hkey_handle, NULL, "STRW", "vd", new_mode)) {
+                       pr_err("Cannot set adaptive keyboard mode\n");
+                       return false;
+               }
+
+               return true;
+
+       case DFR_SHOW_QUICKVIEW_ROW:
+               if (!acpi_evalf(hkey_handle,
+                               &adaptive_keyboard_prev_mode,
+                               "GTRW", "dd", 0)) {
+                       pr_err("Cannot read adaptive keyboard mode\n");
+                       return false;
+               } else {
+                       adaptive_keyboard_mode_is_saved = true;
+
+                       if (!acpi_evalf(hkey_handle,
+                                       NULL, "STRW", "vd", FUNCTION_MODE)) {
+                               pr_err("Cannot set adaptive keyboard mode\n");
+                               return false;
+                       }
+               }
+               return true;
+
+       default:
+               return false;
+       }
+}
+
 static bool hotkey_notify_hotkey(const u32 hkey,
                                 bool *send_acpi_ev,
                                 bool *ignore_acpi_ev)
@@ -3460,6 +3560,8 @@ static bool hotkey_notify_hotkey(const u32 hkey,
                        *ignore_acpi_ev = true;
                }
                return true;
+       } else {
+               return adaptive_keyboard_hotkey_notify_hotkey(scancode);
        }
        return false;
 }
@@ -3732,13 +3834,28 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
 
 static void hotkey_suspend(void)
 {
+       int hkeyv;
+
        /* Do these on suspend, we get the events on early resume! */
        hotkey_wakeup_reason = TP_ACPI_WAKEUP_NONE;
        hotkey_autosleep_ack = 0;
+
+       /* save previous mode of adaptive keyboard of X1 Carbon */
+       if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
+               if ((hkeyv >> 8) == 2) {
+                       if (!acpi_evalf(hkey_handle,
+                                               &adaptive_keyboard_prev_mode,
+                                               "GTRW", "dd", 0)) {
+                               pr_err("Cannot read adaptive keyboard mode.\n");
+                       }
+               }
+       }
 }
 
 static void hotkey_resume(void)
 {
+       int hkeyv;
+
        tpacpi_disable_brightness_delay();
 
        if (hotkey_status_set(true) < 0 ||
@@ -3751,6 +3868,18 @@ static void hotkey_resume(void)
        hotkey_wakeup_reason_notify_change();
        hotkey_wakeup_hotunplug_complete_notify_change();
        hotkey_poll_setup_safe(false);
+
+       /* restore previous mode of adapive keyboard of X1 Carbon */
+       if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
+               if ((hkeyv >> 8) == 2) {
+                       if (!acpi_evalf(hkey_handle,
+                                               NULL,
+                                               "STRW", "vd",
+                                               adaptive_keyboard_prev_mode)) {
+                               pr_err("Cannot set adaptive keyboard mode.\n");
+                       }
+               }
+       }
 }
 
 /* procfs -------------------------------------------------------------- */
@@ -8451,9 +8580,21 @@ static void mute_led_exit(void)
                tpacpi_led_set(i, false);
 }
 
+static void mute_led_resume(void)
+{
+       int i;
+
+       for (i = 0; i < TPACPI_LED_MAX; i++) {
+               struct tp_led_table *t = &led_tables[i];
+               if (t->state >= 0)
+                       mute_led_on_off(t, t->state);
+       }
+}
+
 static struct ibm_struct mute_led_driver_data = {
        .name = "mute_led",
        .exit = mute_led_exit,
+       .resume = mute_led_resume,
 };
 
 /****************************************************************************
index 90dd7645a9e504449fd837ec1ad280775fc84ae9..46473ca7566bcd1ab1e1f286985be68c369fb250 100644 (file)
@@ -5,6 +5,7 @@
  *  Copyright (C) 2002-2004 John Belmonte
  *  Copyright (C) 2008 Philip Langdale
  *  Copyright (C) 2010 Pierre Ducroquet
+ *  Copyright (C) 2014 Azael Avalos
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -37,7 +38,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#define TOSHIBA_ACPI_VERSION   "0.19"
+#define TOSHIBA_ACPI_VERSION   "0.20"
 #define PROC_INTERFACE_VERSION 1
 
 #include <linux/kernel.h>
@@ -77,6 +78,9 @@ MODULE_LICENSE("GPL");
  * However the ACPI methods seem to be incomplete in some areas (for
  * example they allow setting, but not reading, the LCD brightness value),
  * so this is still useful.
+ *
+ * SCI stands for "System Configuration Interface" which aim is to
+ * conceal differences in hardware between different models.
  */
 
 #define HCI_WORDS                      6
@@ -84,12 +88,23 @@ MODULE_LICENSE("GPL");
 /* operations */
 #define HCI_SET                                0xff00
 #define HCI_GET                                0xfe00
+#define SCI_OPEN                       0xf100
+#define SCI_CLOSE                      0xf200
+#define SCI_GET                                0xf300
+#define SCI_SET                                0xf400
 
 /* return codes */
 #define HCI_SUCCESS                    0x0000
 #define HCI_FAILURE                    0x1000
 #define HCI_NOT_SUPPORTED              0x8000
 #define HCI_EMPTY                      0x8c00
+#define HCI_DATA_NOT_AVAILABLE         0x8d20
+#define HCI_NOT_INITIALIZED            0x8d50
+#define SCI_OPEN_CLOSE_OK              0x0044
+#define SCI_ALREADY_OPEN               0x8100
+#define SCI_NOT_OPENED                 0x8200
+#define SCI_INPUT_DATA_ERROR           0x8300
+#define SCI_NOT_PRESENT                        0x8600
 
 /* registers */
 #define HCI_FAN                                0x0004
@@ -99,13 +114,22 @@ MODULE_LICENSE("GPL");
 #define HCI_HOTKEY_EVENT               0x001e
 #define HCI_LCD_BRIGHTNESS             0x002a
 #define HCI_WIRELESS                   0x0056
+#define HCI_ACCELEROMETER              0x006d
+#define HCI_KBD_ILLUMINATION           0x0095
+#define HCI_ECO_MODE                   0x0097
+#define HCI_ACCELEROMETER2             0x00a6
+#define SCI_ILLUMINATION               0x014e
+#define SCI_KBD_ILLUM_STATUS           0x015c
+#define SCI_TOUCHPAD                   0x050e
 
 /* field definitions */
+#define HCI_ACCEL_MASK                 0x7fff
 #define HCI_HOTKEY_DISABLE             0x0b
 #define HCI_HOTKEY_ENABLE              0x09
 #define HCI_LCD_BRIGHTNESS_BITS                3
 #define HCI_LCD_BRIGHTNESS_SHIFT       (16-HCI_LCD_BRIGHTNESS_BITS)
 #define HCI_LCD_BRIGHTNESS_LEVELS      (1 << HCI_LCD_BRIGHTNESS_BITS)
+#define HCI_MISC_SHIFT                 0x10
 #define HCI_VIDEO_OUT_LCD              0x1
 #define HCI_VIDEO_OUT_CRT              0x2
 #define HCI_VIDEO_OUT_TV               0x4
@@ -113,6 +137,8 @@ MODULE_LICENSE("GPL");
 #define HCI_WIRELESS_BT_PRESENT                0x0f
 #define HCI_WIRELESS_BT_ATTACH         0x40
 #define HCI_WIRELESS_BT_POWER          0x80
+#define SCI_KBD_MODE_FNZ               0x1
+#define SCI_KBD_MODE_AUTO              0x2
 
 struct toshiba_acpi_dev {
        struct acpi_device *acpi_dev;
@@ -122,10 +148,14 @@ struct toshiba_acpi_dev {
        struct work_struct hotkey_work;
        struct backlight_device *backlight_dev;
        struct led_classdev led_dev;
+       struct led_classdev kbd_led;
+       struct led_classdev eco_led;
 
        int force_fan;
        int last_key_event;
        int key_event_valid;
+       int kbd_mode;
+       int kbd_time;
 
        unsigned int illumination_supported:1;
        unsigned int video_supported:1;
@@ -134,6 +164,12 @@ struct toshiba_acpi_dev {
        unsigned int ntfy_supported:1;
        unsigned int info_supported:1;
        unsigned int tr_backlight_supported:1;
+       unsigned int kbd_illum_supported:1;
+       unsigned int kbd_led_registered:1;
+       unsigned int touchpad_supported:1;
+       unsigned int eco_supported:1;
+       unsigned int accelerometer_supported:1;
+       unsigned int sysfs_created:1;
 
        struct mutex mutex;
 };
@@ -280,21 +316,94 @@ static acpi_status hci_read2(struct toshiba_acpi_dev *dev, u32 reg,
        return status;
 }
 
+/* common sci tasks
+ */
+
+static int sci_open(struct toshiba_acpi_dev *dev)
+{
+       u32 in[HCI_WORDS] = { SCI_OPEN, 0, 0, 0, 0, 0 };
+       u32 out[HCI_WORDS];
+       acpi_status status;
+
+       status = hci_raw(dev, in, out);
+       if  (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+               pr_err("ACPI call to open SCI failed\n");
+               return 0;
+       }
+
+       if (out[0] == SCI_OPEN_CLOSE_OK) {
+               return 1;
+       } else if (out[0] == SCI_ALREADY_OPEN) {
+               pr_info("Toshiba SCI already opened\n");
+               return 1;
+       } else if (out[0] == SCI_NOT_PRESENT) {
+               pr_info("Toshiba SCI is not present\n");
+       }
+
+       return 0;
+}
+
+static void sci_close(struct toshiba_acpi_dev *dev)
+{
+       u32 in[HCI_WORDS] = { SCI_CLOSE, 0, 0, 0, 0, 0 };
+       u32 out[HCI_WORDS];
+       acpi_status status;
+
+       status = hci_raw(dev, in, out);
+       if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+               pr_err("ACPI call to close SCI failed\n");
+               return;
+       }
+
+       if (out[0] == SCI_OPEN_CLOSE_OK)
+               return;
+       else if (out[0] == SCI_NOT_OPENED)
+               pr_info("Toshiba SCI not opened\n");
+       else if (out[0] == SCI_NOT_PRESENT)
+               pr_info("Toshiba SCI is not present\n");
+}
+
+static acpi_status sci_read(struct toshiba_acpi_dev *dev, u32 reg,
+                           u32 *out1, u32 *result)
+{
+       u32 in[HCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 };
+       u32 out[HCI_WORDS];
+       acpi_status status = hci_raw(dev, in, out);
+       *out1 = out[2];
+       *result = (ACPI_SUCCESS(status)) ? out[0] : HCI_FAILURE;
+       return status;
+}
+
+static acpi_status sci_write(struct toshiba_acpi_dev *dev, u32 reg,
+                            u32 in1, u32 *result)
+{
+       u32 in[HCI_WORDS] = { SCI_SET, reg, in1, 0, 0, 0 };
+       u32 out[HCI_WORDS];
+       acpi_status status = hci_raw(dev, in, out);
+       *result = (ACPI_SUCCESS(status)) ? out[0] : HCI_FAILURE;
+       return status;
+}
+
 /* Illumination support */
 static int toshiba_illumination_available(struct toshiba_acpi_dev *dev)
 {
-       u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
+       u32 in[HCI_WORDS] = { SCI_GET, SCI_ILLUMINATION, 0, 0, 0, 0 };
        u32 out[HCI_WORDS];
        acpi_status status;
 
-       in[0] = 0xf100;
+       if (!sci_open(dev))
+               return 0;
+
        status = hci_raw(dev, in, out);
-       if (ACPI_FAILURE(status)) {
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || out[0] == HCI_FAILURE) {
+               pr_err("ACPI call to query Illumination support failed\n");
+               return 0;
+       } else if (out[0] == HCI_NOT_SUPPORTED || out[1] != 1) {
                pr_info("Illumination device not available\n");
                return 0;
        }
-       in[0] = 0xf400;
-       status = hci_raw(dev, in, out);
+
        return 1;
 }
 
@@ -303,82 +412,270 @@ static void toshiba_illumination_set(struct led_classdev *cdev,
 {
        struct toshiba_acpi_dev *dev = container_of(cdev,
                        struct toshiba_acpi_dev, led_dev);
-       u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
-       u32 out[HCI_WORDS];
+       u32 state, result;
        acpi_status status;
 
        /* First request : initialize communication. */
-       in[0] = 0xf100;
-       status = hci_raw(dev, in, out);
+       if (!sci_open(dev))
+               return;
+
+       /* Switch the illumination on/off */
+       state = brightness ? 1 : 0;
+       status = sci_write(dev, SCI_ILLUMINATION, state, &result);
+       sci_close(dev);
        if (ACPI_FAILURE(status)) {
-               pr_info("Illumination device not available\n");
+               pr_err("ACPI call for illumination failed\n");
+               return;
+       } else if (result == HCI_NOT_SUPPORTED) {
+               pr_info("Illumination not supported\n");
                return;
        }
+}
 
-       if (brightness) {
-               /* Switch the illumination on */
-               in[0] = 0xf400;
-               in[1] = 0x14e;
-               in[2] = 1;
-               status = hci_raw(dev, in, out);
-               if (ACPI_FAILURE(status)) {
-                       pr_info("ACPI call for illumination failed\n");
-                       return;
-               }
-       } else {
-               /* Switch the illumination off */
-               in[0] = 0xf400;
-               in[1] = 0x14e;
-               in[2] = 0;
-               status = hci_raw(dev, in, out);
-               if (ACPI_FAILURE(status)) {
-                       pr_info("ACPI call for illumination failed.\n");
-                       return;
-               }
+static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
+{
+       struct toshiba_acpi_dev *dev = container_of(cdev,
+                       struct toshiba_acpi_dev, led_dev);
+       u32 state, result;
+       acpi_status status;
+
+       /* First request : initialize communication. */
+       if (!sci_open(dev))
+               return LED_OFF;
+
+       /* Check the illumination */
+       status = sci_read(dev, SCI_ILLUMINATION, &state, &result);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+               pr_err("ACPI call for illumination failed\n");
+               return LED_OFF;
+       } else if (result == HCI_NOT_SUPPORTED) {
+               pr_info("Illumination not supported\n");
+               return LED_OFF;
        }
 
-       /* Last request : close communication. */
-       in[0] = 0xf200;
-       in[1] = 0;
-       in[2] = 0;
-       hci_raw(dev, in, out);
+       return state ? LED_FULL : LED_OFF;
 }
 
-static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev)
+/* KBD Illumination */
+static int toshiba_kbd_illum_status_set(struct toshiba_acpi_dev *dev, u32 time)
+{
+       u32 result;
+       acpi_status status;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       status = sci_write(dev, SCI_KBD_ILLUM_STATUS, time, &result);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to set KBD backlight status failed\n");
+               return -EIO;
+       } else if (result == HCI_NOT_SUPPORTED) {
+               pr_info("Keyboard backlight status not supported\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int toshiba_kbd_illum_status_get(struct toshiba_acpi_dev *dev, u32 *time)
+{
+       u32 result;
+       acpi_status status;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       status = sci_read(dev, SCI_KBD_ILLUM_STATUS, time, &result);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to get KBD backlight status failed\n");
+               return -EIO;
+       } else if (result == HCI_NOT_SUPPORTED) {
+               pr_info("Keyboard backlight status not supported\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static enum led_brightness toshiba_kbd_backlight_get(struct led_classdev *cdev)
 {
        struct toshiba_acpi_dev *dev = container_of(cdev,
-                       struct toshiba_acpi_dev, led_dev);
-       u32 in[HCI_WORDS] = { 0, 0, 0, 0, 0, 0 };
+                       struct toshiba_acpi_dev, kbd_led);
+       u32 state, result;
+       acpi_status status;
+
+       /* Check the keyboard backlight state */
+       status = hci_read1(dev, HCI_KBD_ILLUMINATION, &state, &result);
+       if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to get the keyboard backlight failed\n");
+               return LED_OFF;
+       } else if (result == HCI_NOT_SUPPORTED) {
+               pr_info("Keyboard backlight not supported\n");
+               return LED_OFF;
+       }
+
+       return state ? LED_FULL : LED_OFF;
+}
+
+static void toshiba_kbd_backlight_set(struct led_classdev *cdev,
+                                    enum led_brightness brightness)
+{
+       struct toshiba_acpi_dev *dev = container_of(cdev,
+                       struct toshiba_acpi_dev, kbd_led);
+       u32 state, result;
+       acpi_status status;
+
+       /* Set the keyboard backlight state */
+       state = brightness ? 1 : 0;
+       status = hci_write1(dev, HCI_KBD_ILLUMINATION, state, &result);
+       if (ACPI_FAILURE(status) || result == SCI_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to set KBD Illumination mode failed\n");
+               return;
+       } else if (result == HCI_NOT_SUPPORTED) {
+               pr_info("Keyboard backlight not supported\n");
+               return;
+       }
+}
+
+/* TouchPad support */
+static int toshiba_touchpad_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+       u32 result;
+       acpi_status status;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       status = sci_write(dev, SCI_TOUCHPAD, state, &result);
+       sci_close(dev);
+       if (ACPI_FAILURE(status)) {
+               pr_err("ACPI call to set the touchpad failed\n");
+               return -EIO;
+       } else if (result == HCI_NOT_SUPPORTED) {
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+       u32 result;
+       acpi_status status;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       status = sci_read(dev, SCI_TOUCHPAD, state, &result);
+       sci_close(dev);
+       if (ACPI_FAILURE(status)) {
+               pr_err("ACPI call to query the touchpad failed\n");
+               return -EIO;
+       } else if (result == HCI_NOT_SUPPORTED) {
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+/* Eco Mode support */
+static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
+{
+       acpi_status status;
+       u32 in[HCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
+       u32 out[HCI_WORDS];
+
+       status = hci_raw(dev, in, out);
+       if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+               pr_info("ACPI call to get ECO led failed\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static enum led_brightness toshiba_eco_mode_get_status(struct led_classdev *cdev)
+{
+       struct toshiba_acpi_dev *dev = container_of(cdev,
+                       struct toshiba_acpi_dev, eco_led);
+       u32 in[HCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
        u32 out[HCI_WORDS];
        acpi_status status;
-       enum led_brightness result;
 
-       /* First request : initialize communication. */
-       in[0] = 0xf100;
        status = hci_raw(dev, in, out);
-       if (ACPI_FAILURE(status)) {
-               pr_info("Illumination device not available\n");
+       if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to get ECO led failed\n");
                return LED_OFF;
        }
 
-       /* Check the illumination */
-       in[0] = 0xf300;
-       in[1] = 0x14e;
+       return out[2] ? LED_FULL : LED_OFF;
+}
+
+static void toshiba_eco_mode_set_status(struct led_classdev *cdev,
+                                    enum led_brightness brightness)
+{
+       struct toshiba_acpi_dev *dev = container_of(cdev,
+                       struct toshiba_acpi_dev, eco_led);
+       u32 in[HCI_WORDS] = { HCI_SET, HCI_ECO_MODE, 0, 1, 0, 0 };
+       u32 out[HCI_WORDS];
+       acpi_status status;
+
+       /* Switch the Eco Mode led on/off */
+       in[2] = (brightness) ? 1 : 0;
        status = hci_raw(dev, in, out);
-       if (ACPI_FAILURE(status)) {
-               pr_info("ACPI call for illumination failed.\n");
-               return LED_OFF;
+       if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to set ECO led failed\n");
+               return;
        }
+}
 
-       result = out[2] ? LED_FULL : LED_OFF;
+/* Accelerometer support */
+static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
+{
+       u32 in[HCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER2, 0, 0, 0, 0 };
+       u32 out[HCI_WORDS];
+       acpi_status status;
+
+       /* Check if the accelerometer call exists,
+        * this call also serves as initialization
+        */
+       status = hci_raw(dev, in, out);
+       if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to query the accelerometer failed\n");
+               return -EIO;
+       } else if (out[0] == HCI_DATA_NOT_AVAILABLE ||
+                  out[0] == HCI_NOT_INITIALIZED) {
+               pr_err("Accelerometer not initialized\n");
+               return -EIO;
+       } else if (out[0] == HCI_NOT_SUPPORTED) {
+               pr_info("Accelerometer not supported\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
+                                     u32 *xy, u32 *z)
+{
+       u32 in[HCI_WORDS] = { HCI_GET, HCI_ACCELEROMETER, 0, 1, 0, 0 };
+       u32 out[HCI_WORDS];
+       acpi_status status;
+
+       /* Check the Accelerometer status */
+       status = hci_raw(dev, in, out);
+       if (ACPI_FAILURE(status) || out[0] == SCI_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to query the accelerometer failed\n");
+               return -EIO;
+       }
 
-       /* Last request : close communication. */
-       in[0] = 0xf200;
-       in[1] = 0;
-       in[2] = 0;
-       hci_raw(dev, in, out);
+       *xy = out[2];
+       *z = out[4];
 
-       return result;
+       return 0;
 }
 
 /* Bluetooth rfkill handlers */
@@ -904,6 +1201,177 @@ static const struct backlight_ops toshiba_backlight_data = {
        .update_status  = set_lcd_status,
 };
 
+/*
+ * Sysfs files
+ */
+
+static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
+                                        struct device_attribute *attr,
+                                        const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int mode = -1;
+       int time = -1;
+
+       if (sscanf(buf, "%i", &mode) != 1 && (mode != 2 || mode != 1))
+               return -EINVAL;
+
+       /* Set the Keyboard Backlight Mode where:
+        * Mode - Auto (2) | FN-Z (1)
+        *      Auto - KBD backlight turns off automatically in given time
+        *      FN-Z - KBD backlight "toggles" when hotkey pressed
+        */
+       if (mode != -1 && toshiba->kbd_mode != mode) {
+               time = toshiba->kbd_time << HCI_MISC_SHIFT;
+               time = time + toshiba->kbd_mode;
+               if (toshiba_kbd_illum_status_set(toshiba, time) < 0)
+                       return -EIO;
+               toshiba->kbd_mode = mode;
+       }
+
+       return count;
+}
+
+static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 time;
+
+       if (toshiba_kbd_illum_status_get(toshiba, &time) < 0)
+               return -EIO;
+
+       return sprintf(buf, "%i\n", time & 0x07);
+}
+
+static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
+                                           struct device_attribute *attr,
+                                           const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int time = -1;
+
+       if (sscanf(buf, "%i", &time) != 1 && (time < 0 || time > 60))
+               return -EINVAL;
+
+       /* Set the Keyboard Backlight Timeout: 0-60 seconds */
+       if (time != -1 && toshiba->kbd_time != time) {
+               time = time << HCI_MISC_SHIFT;
+               time = (toshiba->kbd_mode == SCI_KBD_MODE_AUTO) ?
+                                                       time + 1 : time + 2;
+               if (toshiba_kbd_illum_status_set(toshiba, time) < 0)
+                       return -EIO;
+               toshiba->kbd_time = time >> HCI_MISC_SHIFT;
+       }
+
+       return count;
+}
+
+static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
+                                          struct device_attribute *attr,
+                                          char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 time;
+
+       if (toshiba_kbd_illum_status_get(toshiba, &time) < 0)
+               return -EIO;
+
+       return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT);
+}
+
+static ssize_t toshiba_touchpad_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+
+       /* Set the TouchPad on/off, 0 - Disable | 1 - Enable */
+       if (sscanf(buf, "%i", &state) == 1 && (state == 0 || state == 1)) {
+               if (toshiba_touchpad_set(toshiba, state) < 0)
+                       return -EIO;
+       }
+
+       return count;
+}
+
+static ssize_t toshiba_touchpad_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int ret;
+
+       ret = toshiba_touchpad_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%i\n", state);
+}
+
+static ssize_t toshiba_position_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 xyval, zval, tmp;
+       u16 x, y, z;
+       int ret;
+
+       xyval = zval = 0;
+       ret = toshiba_accelerometer_get(toshiba, &xyval, &zval);
+       if (ret < 0)
+               return ret;
+
+       x = xyval & HCI_ACCEL_MASK;
+       tmp = xyval >> HCI_MISC_SHIFT;
+       y = tmp & HCI_ACCEL_MASK;
+       z = zval & HCI_ACCEL_MASK;
+
+       return sprintf(buf, "%d %d %d\n", x, y, z);
+}
+
+static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR,
+                  toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store);
+static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR,
+                  toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store);
+static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR,
+                  toshiba_touchpad_show, toshiba_touchpad_store);
+static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL);
+
+static struct attribute *toshiba_attributes[] = {
+       &dev_attr_kbd_backlight_mode.attr,
+       &dev_attr_kbd_backlight_timeout.attr,
+       &dev_attr_touchpad.attr,
+       &dev_attr_position.attr,
+       NULL,
+};
+
+static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
+                                       struct attribute *attr, int idx)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct toshiba_acpi_dev *drv = dev_get_drvdata(dev);
+       bool exists = true;
+
+       if (attr == &dev_attr_kbd_backlight_mode.attr)
+               exists = (drv->kbd_illum_supported) ? true : false;
+       else if (attr == &dev_attr_kbd_backlight_timeout.attr)
+               exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false;
+       else if (attr == &dev_attr_touchpad.attr)
+               exists = (drv->touchpad_supported) ? true : false;
+       else if (attr == &dev_attr_position.attr)
+               exists = (drv->accelerometer_supported) ? true : false;
+
+       return exists ? attr->mode : 0;
+}
+
+static struct attribute_group toshiba_attr_group = {
+       .is_visible = toshiba_sysfs_is_visible,
+       .attrs = toshiba_attributes,
+};
+
 static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
                                      struct serio *port)
 {
@@ -1106,6 +1574,10 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
 
        remove_toshiba_proc_entries(dev);
 
+       if (dev->sysfs_created)
+               sysfs_remove_group(&dev->acpi_dev->dev.kobj,
+                                  &toshiba_attr_group);
+
        if (dev->ntfy_supported) {
                i8042_remove_filter(toshiba_acpi_i8042_filter);
                cancel_work_sync(&dev->hotkey_work);
@@ -1127,6 +1599,12 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev)
        if (dev->illumination_supported)
                led_classdev_unregister(&dev->led_dev);
 
+       if (dev->kbd_led_registered)
+               led_classdev_unregister(&dev->kbd_led);
+
+       if (dev->eco_supported)
+               led_classdev_unregister(&dev->eco_led);
+
        if (toshiba_acpi)
                toshiba_acpi = NULL;
 
@@ -1172,6 +1650,7 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
        dev->acpi_dev = acpi_dev;
        dev->method_hci = hci_method;
        acpi_dev->driver_data = dev;
+       dev_set_drvdata(&acpi_dev->dev, dev);
 
        if (toshiba_acpi_setup_keyboard(dev))
                pr_info("Unable to activate hotkeys\n");
@@ -1212,6 +1691,40 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
                        dev->illumination_supported = 1;
        }
 
+       if (toshiba_eco_mode_available(dev)) {
+               dev->eco_led.name = "toshiba::eco_mode";
+               dev->eco_led.max_brightness = 1;
+               dev->eco_led.brightness_set = toshiba_eco_mode_set_status;
+               dev->eco_led.brightness_get = toshiba_eco_mode_get_status;
+               if (!led_classdev_register(&dev->acpi_dev->dev, &dev->eco_led))
+                       dev->eco_supported = 1;
+       }
+
+       ret = toshiba_kbd_illum_status_get(dev, &dummy);
+       if (!ret) {
+               dev->kbd_time = dummy >> HCI_MISC_SHIFT;
+               dev->kbd_mode = dummy & 0x07;
+       }
+       dev->kbd_illum_supported = !ret;
+       /*
+        * Only register the LED if KBD illumination is supported
+        * and the keyboard backlight operation mode is set to FN-Z
+        */
+       if (dev->kbd_illum_supported && dev->kbd_mode == SCI_KBD_MODE_FNZ) {
+               dev->kbd_led.name = "toshiba::kbd_backlight";
+               dev->kbd_led.max_brightness = 1;
+               dev->kbd_led.brightness_set = toshiba_kbd_backlight_set;
+               dev->kbd_led.brightness_get = toshiba_kbd_backlight_get;
+               if (!led_classdev_register(&dev->acpi_dev->dev, &dev->kbd_led))
+                       dev->kbd_led_registered = 1;
+       }
+
+       ret = toshiba_touchpad_get(dev, &dummy);
+       dev->touchpad_supported = !ret;
+
+       ret = toshiba_accelerometer_supported(dev);
+       dev->accelerometer_supported = !ret;
+
        /* Determine whether or not BIOS supports fan and video interfaces */
 
        ret = get_video_status(dev, &dummy);
@@ -1220,6 +1733,14 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
        ret = get_fan_status(dev, &dummy);
        dev->fan_supported = !ret;
 
+       ret = sysfs_create_group(&dev->acpi_dev->dev.kobj,
+                                &toshiba_attr_group);
+       if (ret) {
+               dev->sysfs_created = 0;
+               goto error;
+       }
+       dev->sysfs_created = !ret;
+
        create_toshiba_proc_entries(dev);
 
        toshiba_acpi = dev;
index ab08ca7cfb0850289c1a62a3bdba4506f750cc15..c3750c5b382be65b61468074beb387a02acd4e71 100644 (file)
@@ -123,6 +123,7 @@ struct bcm590xx_info {
 #define BCM590XX_REG_RANGES(_name, _ranges) \
        { \
                .name = #_name, \
+               .n_voltages = 64, \
                .n_linear_ranges = ARRAY_SIZE(_ranges), \
                .linear_ranges = _ranges, \
        }
index 808b3aa7a42cc15ac6373fe4daa9f796d544113f..f19a30f0fb42d8928735c2fd319b5ad45490b341 100644 (file)
@@ -192,13 +192,11 @@ static int s2mpa01_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
        if (!ramp_enable)
                goto ramp_disable;
 
-       if (enable_shift) {
-               ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
-                                       1 << enable_shift, 1 << enable_shift);
-               if (ret) {
-                       dev_err(&rdev->dev, "failed to enable ramp rate\n");
-                       return ret;
-               }
+       ret = regmap_update_bits(rdev->regmap, S2MPA01_REG_RAMP1,
+                                1 << enable_shift, 1 << enable_shift);
+       if (ret) {
+               dev_err(&rdev->dev, "failed to enable ramp rate\n");
+               return ret;
        }
 
        ramp_val = get_ramp_delay(ramp_delay);
index 68fd54702edbf12436fbd5f7b7ebd2a6cb68e7ef..e713c162fbd41bd1e3ff7d52782a813d42e70986 100644 (file)
@@ -202,13 +202,11 @@ static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
        if (!ramp_enable)
                goto ramp_disable;
 
-       if (enable_shift) {
-               ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
-                                       1 << enable_shift, 1 << enable_shift);
-               if (ret) {
-                       dev_err(&rdev->dev, "failed to enable ramp rate\n");
-                       return ret;
-               }
+       ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
+                                1 << enable_shift, 1 << enable_shift);
+       if (ret) {
+               dev_err(&rdev->dev, "failed to enable ramp rate\n");
+               return ret;
        }
 
        ramp_val = get_ramp_delay(ramp_delay);
index f05badabd69e99169a0adcb9f4d4d335891cafb8..92f19a005dc3f49d0b2ec5384af5e03fde72e155 100644 (file)
@@ -964,6 +964,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
                config.driver_data = s5m8767;
                config.regmap = iodev->regmap_pmic;
                config.of_node = pdata->regulators[i].reg_node;
+               config.ena_gpio = config.ena_gpio_flags = 0;
                if (pdata->regulators[i].ext_control_gpio)
                        s5m8767_regulator_config_ext_control(s5m8767,
                                        &pdata->regulators[i], &config);
index c8bd092fc945fd5a1a407b170c398e126beaa0e4..02832d64d9187ea0ece202820bd11e1902979cdb 100644 (file)
@@ -263,6 +263,9 @@ config SCSI_SCAN_ASYNC
          You can override this choice by specifying "scsi_mod.scan=sync"
          or async on the kernel's command line.
 
+         Note that this setting also affects whether resuming from
+         system suspend will be performed asynchronously.
+
 menu "SCSI Transports"
        depends on SCSI
 
index 788c4fe2b0c9ec7a8113078c318421d7407ae855..68fb66fdb757fbc98784fad4ff7a5d03c49ee73d 100644 (file)
@@ -684,6 +684,20 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
        qlt_xmit_tm_rsp(mcmd);
 }
 
+static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
+{
+       struct qla_tgt_cmd *cmd = container_of(se_cmd,
+                               struct qla_tgt_cmd, se_cmd);
+       struct scsi_qla_host *vha = cmd->vha;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!cmd->sg_mapped)
+               return;
+
+       pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
+       cmd->sg_mapped = 0;
+}
+
 /* Local pointer to allocated TCM configfs fabric module */
 struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
 struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
@@ -1468,7 +1482,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
        }
        se_tpg = &tpg->se_tpg;
 
-       se_sess = transport_init_session();
+       se_sess = transport_init_session(TARGET_PROT_NORMAL);
        if (IS_ERR(se_sess)) {
                pr_err("Unable to initialize struct se_session\n");
                return PTR_ERR(se_sess);
@@ -1877,6 +1891,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
        .queue_data_in                  = tcm_qla2xxx_queue_data_in,
        .queue_status                   = tcm_qla2xxx_queue_status,
        .queue_tm_rsp                   = tcm_qla2xxx_queue_tm_rsp,
+       .aborted_task                   = tcm_qla2xxx_aborted_task,
        /*
         * Setup function pointers for generic logic in
         * target_core_fabric_configfs.c
@@ -1926,6 +1941,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
        .queue_data_in                  = tcm_qla2xxx_queue_data_in,
        .queue_status                   = tcm_qla2xxx_queue_status,
        .queue_tm_rsp                   = tcm_qla2xxx_queue_tm_rsp,
+       .aborted_task                   = tcm_qla2xxx_aborted_task,
        /*
         * Setup function pointers for generic logic in
         * target_core_fabric_configfs.c
index c4d632c27a3ecdf2c7b89020a26d16a4d029c8f8..88d46fe6bf987f0615486e1decafbd18f0ab459f 100644 (file)
@@ -91,6 +91,15 @@ EXPORT_SYMBOL(scsi_logging_level);
 ASYNC_DOMAIN(scsi_sd_probe_domain);
 EXPORT_SYMBOL(scsi_sd_probe_domain);
 
+/*
+ * Separate domain (from scsi_sd_probe_domain) to maximize the benefit of
+ * asynchronous system resume operations.  It is marked 'exclusive' to avoid
+ * being included in the async_synchronize_full() that is invoked by
+ * dpm_resume()
+ */
+ASYNC_DOMAIN_EXCLUSIVE(scsi_sd_pm_domain);
+EXPORT_SYMBOL(scsi_sd_pm_domain);
+
 /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
  * You may not alter any existing entry (although adding new ones is
  * encouraged once assigned by ANSI/INCITS T10
index 5681c05ac5061bc0050c3926cc3b7f300cef712b..65a123d9c67649822e2ab0333bf534f820b8b212 100644 (file)
@@ -184,7 +184,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
  */
 int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
                 int data_direction, void *buffer, unsigned bufflen,
-                unsigned char *sense, int timeout, int retries, int flags,
+                unsigned char *sense, int timeout, int retries, u64 flags,
                 int *resid)
 {
        struct request *req;
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(scsi_execute);
 int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
                     int data_direction, void *buffer, unsigned bufflen,
                     struct scsi_sense_hdr *sshdr, int timeout, int retries,
-                    int *resid, int flags)
+                    int *resid, u64 flags)
 {
        char *sense = NULL;
        int result;
index 001e9ceda4c3b51bed2ba47248d9333e18d39487..7454498c40911fbf3800bb03af084ae9dd02a21c 100644 (file)
 
 #ifdef CONFIG_PM_SLEEP
 
-static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *))
+static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
 {
+       return pm && pm->suspend ? pm->suspend(dev) : 0;
+}
+
+static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
+{
+       return pm && pm->freeze ? pm->freeze(dev) : 0;
+}
+
+static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
+{
+       return pm && pm->poweroff ? pm->poweroff(dev) : 0;
+}
+
+static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
+{
+       return pm && pm->resume ? pm->resume(dev) : 0;
+}
+
+static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
+{
+       return pm && pm->thaw ? pm->thaw(dev) : 0;
+}
+
+static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
+{
+       return pm && pm->restore ? pm->restore(dev) : 0;
+}
+
+static int scsi_dev_type_suspend(struct device *dev,
+               int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
        int err;
 
+       /* flush pending in-flight resume operations, suspend is synchronous */
+       async_synchronize_full_domain(&scsi_sd_pm_domain);
+
        err = scsi_device_quiesce(to_scsi_device(dev));
        if (err == 0) {
-               if (cb) {
-                       err = cb(dev);
-                       if (err)
-                               scsi_device_resume(to_scsi_device(dev));
-               }
+               err = cb(dev, pm);
+               if (err)
+                       scsi_device_resume(to_scsi_device(dev));
        }
        dev_dbg(dev, "scsi suspend: %d\n", err);
        return err;
 }
 
-static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *))
+static int scsi_dev_type_resume(struct device *dev,
+               int (*cb)(struct device *, const struct dev_pm_ops *))
 {
+       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
        int err = 0;
 
-       if (cb)
-               err = cb(dev);
+       err = cb(dev, pm);
        scsi_device_resume(to_scsi_device(dev));
        dev_dbg(dev, "scsi resume: %d\n", err);
+
+       if (err == 0) {
+               pm_runtime_disable(dev);
+               pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
+       }
+
        return err;
 }
 
 static int
-scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
+scsi_bus_suspend_common(struct device *dev,
+               int (*cb)(struct device *, const struct dev_pm_ops *))
 {
        int err = 0;
 
@@ -66,20 +108,54 @@ scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
        return err;
 }
 
-static int
-scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *))
+static void async_sdev_resume(void *dev, async_cookie_t cookie)
 {
-       int err = 0;
+       scsi_dev_type_resume(dev, do_scsi_resume);
+}
 
-       if (scsi_is_sdev_device(dev))
-               err = scsi_dev_type_resume(dev, cb);
+static void async_sdev_thaw(void *dev, async_cookie_t cookie)
+{
+       scsi_dev_type_resume(dev, do_scsi_thaw);
+}
 
-       if (err == 0) {
+static void async_sdev_restore(void *dev, async_cookie_t cookie)
+{
+       scsi_dev_type_resume(dev, do_scsi_restore);
+}
+
+static int scsi_bus_resume_common(struct device *dev,
+               int (*cb)(struct device *, const struct dev_pm_ops *))
+{
+       async_func_t fn;
+
+       if (!scsi_is_sdev_device(dev))
+               fn = NULL;
+       else if (cb == do_scsi_resume)
+               fn = async_sdev_resume;
+       else if (cb == do_scsi_thaw)
+               fn = async_sdev_thaw;
+       else if (cb == do_scsi_restore)
+               fn = async_sdev_restore;
+       else
+               fn = NULL;
+
+       if (fn) {
+               async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
+
+               /*
+                * If a user has disabled async probing a likely reason
+                * is due to a storage enclosure that does not inject
+                * staggered spin-ups.  For safety, make resume
+                * synchronous as well in that case.
+                */
+               if (strncmp(scsi_scan_type, "async", 5) != 0)
+                       async_synchronize_full_domain(&scsi_sd_pm_domain);
+       } else {
                pm_runtime_disable(dev);
                pm_runtime_set_active(dev);
                pm_runtime_enable(dev);
        }
-       return err;
+       return 0;
 }
 
 static int scsi_bus_prepare(struct device *dev)
@@ -97,38 +173,32 @@ static int scsi_bus_prepare(struct device *dev)
 
 static int scsi_bus_suspend(struct device *dev)
 {
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-       return scsi_bus_suspend_common(dev, pm ? pm->suspend : NULL);
+       return scsi_bus_suspend_common(dev, do_scsi_suspend);
 }
 
 static int scsi_bus_resume(struct device *dev)
 {
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-       return scsi_bus_resume_common(dev, pm ? pm->resume : NULL);
+       return scsi_bus_resume_common(dev, do_scsi_resume);
 }
 
 static int scsi_bus_freeze(struct device *dev)
 {
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-       return scsi_bus_suspend_common(dev, pm ? pm->freeze : NULL);
+       return scsi_bus_suspend_common(dev, do_scsi_freeze);
 }
 
 static int scsi_bus_thaw(struct device *dev)
 {
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-       return scsi_bus_resume_common(dev, pm ? pm->thaw : NULL);
+       return scsi_bus_resume_common(dev, do_scsi_thaw);
 }
 
 static int scsi_bus_poweroff(struct device *dev)
 {
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-       return scsi_bus_suspend_common(dev, pm ? pm->poweroff : NULL);
+       return scsi_bus_suspend_common(dev, do_scsi_poweroff);
 }
 
 static int scsi_bus_restore(struct device *dev)
 {
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-       return scsi_bus_resume_common(dev, pm ? pm->restore : NULL);
+       return scsi_bus_resume_common(dev, do_scsi_restore);
 }
 
 #else /* CONFIG_PM_SLEEP */
index f079a598bed4a4c82c6f3e1f0c8e1acf2025ea81..48e5b657e79f1f0c9f3c64fb8cf42d0c7242d473 100644 (file)
@@ -112,6 +112,7 @@ extern void scsi_exit_procfs(void);
 #endif /* CONFIG_PROC_FS */
 
 /* scsi_scan.c */
+extern char scsi_scan_type[];
 extern int scsi_complete_async_scans(void);
 extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
                                   unsigned int, unsigned int, int);
@@ -166,6 +167,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; }
 static inline void scsi_autopm_put_host(struct Scsi_Host *h) {}
 #endif /* CONFIG_PM_RUNTIME */
 
+extern struct async_domain scsi_sd_pm_domain;
 extern struct async_domain scsi_sd_probe_domain;
 
 /* 
index 27f96d5b768068326f67f5637a29a2e4ac8a503c..e02b3aab56ce3ecf147d4e33aa4156713ded1615 100644 (file)
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,
 #define SCSI_SCAN_TYPE_DEFAULT "sync"
 #endif
 
-static char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
+char scsi_scan_type[6] = SCSI_SCAN_TYPE_DEFAULT;
 
 module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type), S_IRUGO);
 MODULE_PARM_DESC(scan, "sync, async or none");
index 89e6c04ac595045739291ed63632f2c37efe891b..efcbcd182863318f296936bc7fa16c96bc58efe5 100644 (file)
@@ -3026,6 +3026,7 @@ static int sd_remove(struct device *dev)
        devt = disk_devt(sdkp->disk);
        scsi_autopm_get_device(sdkp->device);
 
+       async_synchronize_full_domain(&scsi_sd_pm_domain);
        async_synchronize_full_domain(&scsi_sd_probe_domain);
        blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
        blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
index efe1960af2b315704c39290617dbc2215f8aec48..60f2b41c7310529410939d1df6d0d1c3ee8b1222 100644 (file)
@@ -383,7 +383,7 @@ config SPI_RSPI
 
 config SPI_QUP
        tristate "Qualcomm SPI controller with QUP interface"
-       depends on ARCH_MSM_DT || (ARM && COMPILE_TEST)
+       depends on ARCH_QCOM || (ARM && COMPILE_TEST)
        help
          Qualcomm Universal Peripheral (QUP) core is an AHB slave that
          provides a common data path (an output FIFO and an input FIFO)
index 6fb2b75df821e78f80a0f6c1bf4c64e45e4292d5..e767f5831b9c7ce3d8ce015e34e67643f69a28df 100644 (file)
@@ -441,7 +441,8 @@ static void fsl_espi_do_one_msg(struct spi_message *m)
 
        m->actual_length = espi_trans.actual_length;
        m->status = espi_trans.status;
-       m->complete(m->context);
+       if (m->complete)
+               m->complete(m->context);
 }
 
 static int fsl_espi_setup(struct spi_device *spi)
index f35488ed62a9eaca3c353401d9bf7b935bf4a00c..b3e7775034dbd1d660b0ece95a197aea8cb24b54 100644 (file)
@@ -408,7 +408,8 @@ static void fsl_spi_do_one_msg(struct spi_message *m)
        }
 
        m->status = status;
-       m->complete(m->context);
+       if (m->complete)
+               m->complete(m->context);
 
        if (status || !cs_change) {
                ndelay(nsecs);
index 3822eef2ef9dacebd723ee2472a33dc04e1865a5..577d23a12763f79e5c9197aa8a252086c69cf7f2 100644 (file)
@@ -300,7 +300,8 @@ static int mpc512x_psc_spi_msg_xfer(struct spi_master *master,
        }
 
        m->status = status;
-       m->complete(m->context);
+       if (m->complete)
+               m->complete(m->context);
 
        if (status || !cs_change)
                mpc512x_psc_spi_deactivate_cs(spi);
index 3d18d93511854c1aa56fd6b5ebe6d10c65157242..de532aa11d34e9f76212e0e4fb74d6c2244d8c18 100644 (file)
@@ -247,7 +247,8 @@ static void mpc52xx_psc_spi_work(struct work_struct *work)
                }
 
                m->status = status;
-               m->complete(m->context);
+               if (m->complete)
+                       m->complete(m->context);
 
                if (status || !cs_change)
                        mpc52xx_psc_spi_deactivate_cs(spi);
index aac2a5ddd964efcf1086263fa1231235aa065141..b07db4b62d80d823804516caaf961d8c57b9bea6 100644 (file)
@@ -234,7 +234,8 @@ static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
                dev_err(&ms->master->dev, "mode fault\n");
                mpc52xx_spi_chipsel(ms, 0);
                ms->message->status = -EIO;
-               ms->message->complete(ms->message->context);
+               if (ms->message->complete)
+                       ms->message->complete(ms->message->context);
                ms->state = mpc52xx_spi_fsmstate_idle;
                return FSM_CONTINUE;
        }
@@ -288,7 +289,8 @@ mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
                ms->msg_count++;
                mpc52xx_spi_chipsel(ms, 0);
                ms->message->status = 0;
-               ms->message->complete(ms->message->context);
+               if (ms->message->complete)
+                       ms->message->complete(ms->message->context);
                ms->state = mpc52xx_spi_fsmstate_idle;
                return FSM_CONTINUE;
        }
index 2941c5b96ebc2e063c22a4b2fcfea946d55b8849..4dc77df388642601510c41fb02dcb5153d2b2ea3 100644 (file)
@@ -1379,12 +1379,13 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
 
        INIT_LIST_HEAD(&mcspi->ctx.cs);
 
-       mcspi->dma_channels = kcalloc(master->num_chipselect,
-                       sizeof(struct omap2_mcspi_dma),
-                       GFP_KERNEL);
-
-       if (mcspi->dma_channels == NULL)
+       mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
+                                          sizeof(struct omap2_mcspi_dma),
+                                          GFP_KERNEL);
+       if (mcspi->dma_channels == NULL) {
+               status = -ENOMEM;
                goto free_master;
+       }
 
        for (i = 0; i < master->num_chipselect; i++) {
                char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
@@ -1426,7 +1427,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
        }
 
        if (status < 0)
-               goto dma_chnl_free;
+               goto free_master;
 
        pm_runtime_use_autosuspend(&pdev->dev);
        pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
@@ -1444,8 +1445,6 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
 
 disable_pm:
        pm_runtime_disable(&pdev->dev);
-dma_chnl_free:
-       kfree(mcspi->dma_channels);
 free_master:
        spi_master_put(master);
        return status;
@@ -1453,19 +1452,12 @@ free_master:
 
 static int omap2_mcspi_remove(struct platform_device *pdev)
 {
-       struct spi_master       *master;
-       struct omap2_mcspi      *mcspi;
-       struct omap2_mcspi_dma  *dma_channels;
-
-       master = platform_get_drvdata(pdev);
-       mcspi = spi_master_get_devdata(master);
-       dma_channels = mcspi->dma_channels;
+       struct spi_master *master = platform_get_drvdata(pdev);
+       struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
 
        pm_runtime_put_sync(mcspi->dev);
        pm_runtime_disable(&pdev->dev);
 
-       kfree(dma_channels);
-
        return 0;
 }
 
index f6f2c701017795ed9a41133ac0da1151988c6f98..03edf5ed0e9fb5d3310a7442fed3c262864cad37 100644 (file)
@@ -322,7 +322,8 @@ static void spi_sh_work(struct work_struct *work)
                spin_lock_irqsave(&ss->lock, flags);
 
                mesg->status = 0;
-               mesg->complete(mesg->context);
+               if (mesg->complete)
+                       mesg->complete(mesg->context);
        }
 
        clear_fifo(ss);
@@ -340,7 +341,8 @@ static void spi_sh_work(struct work_struct *work)
 
  error:
        mesg->status = ret;
-       mesg->complete(mesg->context);
+       if (mesg->complete)
+               mesg->complete(mesg->context);
 
        spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
                         SPI_SH_CR1);
index 820b499816f8ebfb6bc45a56026dbe05b9b4311f..5f183baa91a98bb343747eced81ae6f4e09c4536 100644 (file)
@@ -262,7 +262,8 @@ static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
 
 exit:
        m->status = status;
-       m->complete(m->context);
+       if (m->complete)
+               m->complete(m->context);
 
        /* normally deactivate chipselect ... unless no error and
         * cs_change has hinted that the next message will probably
index a54b506ba7ca030230e99d91092638f0bcfc6cb8..b87b246111c0c9727709838c1860df6d0421fc00 100644 (file)
@@ -99,16 +99,7 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
                struct iovec   *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
                unsigned int    niov = tx->tx_niov;
 #endif
-               struct msghdr msg = {
-                       .msg_name       = NULL,
-                       .msg_namelen    = 0,
-                       .msg_iov        = scratchiov,
-                       .msg_iovlen     = niov,
-                       .msg_control    = NULL,
-                       .msg_controllen = 0,
-                       .msg_flags      = MSG_DONTWAIT
-               };
-               mm_segment_t oldmm = get_fs();
+               struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
                int  i;
 
                for (nob = i = 0; i < niov; i++) {
@@ -120,9 +111,7 @@ ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx)
                    nob < tx->tx_resid)
                        msg.msg_flags |= MSG_MORE;
 
-               set_fs (KERNEL_DS);
-               rc = sock_sendmsg(sock, &msg, nob);
-               set_fs (oldmm);
+               rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
        }
        return rc;
 }
@@ -174,16 +163,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
                struct iovec *scratchiov = conn->ksnc_scheduler->kss_scratch_iov;
                unsigned int  niov = tx->tx_nkiov;
 #endif
-               struct msghdr msg = {
-                       .msg_name       = NULL,
-                       .msg_namelen    = 0,
-                       .msg_iov        = scratchiov,
-                       .msg_iovlen     = niov,
-                       .msg_control    = NULL,
-                       .msg_controllen = 0,
-                       .msg_flags      = MSG_DONTWAIT
-               };
-               mm_segment_t  oldmm = get_fs();
+               struct msghdr msg = {.msg_flags = MSG_DONTWAIT};
                int        i;
 
                for (nob = i = 0; i < niov; i++) {
@@ -196,9 +176,7 @@ ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx)
                    nob < tx->tx_resid)
                        msg.msg_flags |= MSG_MORE;
 
-               set_fs (KERNEL_DS);
-               rc = sock_sendmsg(sock, &msg, nob);
-               set_fs (oldmm);
+               rc = kernel_sendmsg(sock, &msg, (struct kvec *)scratchiov, niov, nob);
 
                for (i = 0; i < niov; i++)
                        kunmap(kiov[i].kiov_page);
@@ -237,15 +215,8 @@ ksocknal_lib_recv_iov (ksock_conn_t *conn)
 #endif
        struct iovec *iov = conn->ksnc_rx_iov;
        struct msghdr msg = {
-               .msg_name       = NULL,
-               .msg_namelen    = 0,
-               .msg_iov        = scratchiov,
-               .msg_iovlen     = niov,
-               .msg_control    = NULL,
-               .msg_controllen = 0,
                .msg_flags      = 0
        };
-       mm_segment_t oldmm = get_fs();
        int       nob;
        int       i;
        int       rc;
@@ -263,10 +234,8 @@ ksocknal_lib_recv_iov (ksock_conn_t *conn)
        }
        LASSERT (nob <= conn->ksnc_rx_nob_wanted);
 
-       set_fs (KERNEL_DS);
-       rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
-       /* NB this is just a boolean..........................^ */
-       set_fs (oldmm);
+       rc = kernel_recvmsg(conn->ksnc_sock, &msg,
+               (struct kvec *)scratchiov, niov, nob, MSG_DONTWAIT);
 
        saved_csum = 0;
        if (conn->ksnc_proto == &ksocknal_protocol_v2x) {
@@ -355,14 +324,8 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
 #endif
        lnet_kiov_t   *kiov = conn->ksnc_rx_kiov;
        struct msghdr msg = {
-               .msg_name       = NULL,
-               .msg_namelen    = 0,
-               .msg_iov        = scratchiov,
-               .msg_control    = NULL,
-               .msg_controllen = 0,
                .msg_flags      = 0
        };
-       mm_segment_t oldmm = get_fs();
        int       nob;
        int       i;
        int       rc;
@@ -370,13 +333,14 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
        void    *addr;
        int       sum;
        int       fragnob;
+       int n;
 
        /* NB we can't trust socket ops to either consume our iovs
         * or leave them alone. */
        addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
        if (addr != NULL) {
                nob = scratchiov[0].iov_len;
-               msg.msg_iovlen = 1;
+               n = 1;
 
        } else {
                for (nob = i = 0; i < niov; i++) {
@@ -384,15 +348,13 @@ ksocknal_lib_recv_kiov (ksock_conn_t *conn)
                        scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
                                                 kiov[i].kiov_offset;
                }
-               msg.msg_iovlen = niov;
+               n = niov;
        }
 
        LASSERT (nob <= conn->ksnc_rx_nob_wanted);
 
-       set_fs (KERNEL_DS);
-       rc = sock_recvmsg (conn->ksnc_sock, &msg, nob, MSG_DONTWAIT);
-       /* NB this is just a boolean.......................^ */
-       set_fs (oldmm);
+       rc = kernel_recvmsg(conn->ksnc_sock, &msg,
+                       (struct kvec *)scratchiov, n, nob, MSG_DONTWAIT);
 
        if (conn->ksnc_msg.ksm_csum != 0) {
                for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
index e6069d78af6ba2e6c7ca02d1fe890a60e2bbad8a..7539fe16d76f1d9889745d0fc00f8124cbd01e92 100644 (file)
@@ -265,17 +265,11 @@ libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout)
         * empty enough to take the whole message immediately */
 
        for (;;) {
-               struct iovec  iov = {
+               struct kvec  iov = {
                        .iov_base = buffer,
                        .iov_len  = nob
                };
                struct msghdr msg = {
-                       .msg_name       = NULL,
-                       .msg_namelen    = 0,
-                       .msg_iov        = &iov,
-                       .msg_iovlen     = 1,
-                       .msg_control    = NULL,
-                       .msg_controllen = 0,
                        .msg_flags      = (timeout == 0) ? MSG_DONTWAIT : 0
                };
 
@@ -297,11 +291,9 @@ libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout)
                        }
                }
 
-               set_fs (KERNEL_DS);
                then = jiffies;
-               rc = sock_sendmsg (sock, &msg, iov.iov_len);
+               rc = kernel_sendmsg(sock, &msg, &iov, 1, nob);
                ticks -= jiffies - then;
-               set_fs (oldmm);
 
                if (rc == nob)
                        return 0;
@@ -338,17 +330,11 @@ libcfs_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
        LASSERT (ticks > 0);
 
        for (;;) {
-               struct iovec  iov = {
+               struct kvec  iov = {
                        .iov_base = buffer,
                        .iov_len  = nob
                };
                struct msghdr msg = {
-                       .msg_name       = NULL,
-                       .msg_namelen    = 0,
-                       .msg_iov        = &iov,
-                       .msg_iovlen     = 1,
-                       .msg_control    = NULL,
-                       .msg_controllen = 0,
                        .msg_flags      = 0
                };
 
@@ -367,11 +353,9 @@ libcfs_sock_read (struct socket *sock, void *buffer, int nob, int timeout)
                        return rc;
                }
 
-               set_fs(KERNEL_DS);
                then = jiffies;
-               rc = sock_recvmsg(sock, &msg, iov.iov_len, 0);
+               rc = kernel_recvmsg(sock, &msg, &iov, 1, nob, 0);
                ticks -= jiffies - then;
-               set_fs(oldmm);
 
                if (rc < 0)
                        return rc;
index ab06891f7fc7cf68c41cb82119a393d11a1797da..80d48b5ae24706655c0290ad01f59bb1591f3912 100644 (file)
@@ -115,27 +115,6 @@ failed:
        return rc;
 }
 
-static int ll_readlink(struct dentry *dentry, char *buffer, int buflen)
-{
-       struct inode *inode = dentry->d_inode;
-       struct ptlrpc_request *request;
-       char *symname;
-       int rc;
-
-       CDEBUG(D_VFSTRACE, "VFS Op\n");
-
-       ll_inode_size_lock(inode);
-       rc = ll_readlink_internal(inode, &request, &symname);
-       if (rc)
-               GOTO(out, rc);
-
-       rc = vfs_readlink(dentry, buffer, buflen, symname);
- out:
-       ptlrpc_req_finished(request);
-       ll_inode_size_unlock(inode);
-       return rc;
-}
-
 static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        struct inode *inode = dentry->d_inode;
@@ -175,7 +154,7 @@ static void ll_put_link(struct dentry *dentry, struct nameidata *nd, void *cooki
 }
 
 struct inode_operations ll_fast_symlink_inode_operations = {
-       .readlink       = ll_readlink,
+       .readlink       = generic_readlink,
        .setattr        = ll_setattr,
        .follow_link    = ll_follow_link,
        .put_link       = ll_put_link,
index ac43bae101021c74208347dc38349df888085d08..bd0b93cb6c539b9079b106f2e1c7caa509ed7166 100644 (file)
@@ -201,7 +201,7 @@ static int msi001_set_tuner(struct msi001 *s)
        dev_dbg(&s->spi->dev, "%s: bandwidth selected=%d\n",
                        __func__, bandwidth_lut[i].freq);
 
-       f_vco = (f_rf + f_if + f_if1) * lo_div;
+       f_vco = (u64) (f_rf + f_if + f_if1) * lo_div;
        tmp64 = f_vco;
        m = do_div(tmp64, F_REF * R_REF);
        n = (unsigned int) tmp64;
index 260d1b7367212637a70e79ed4628f2025777b263..65d351f99da2e48607d3f8e1c0d6ff84ba8af17f 100644 (file)
@@ -913,7 +913,6 @@ static int msi3101_set_usb_adc(struct msi3101_state *s)
 
        /* set tuner, subdev, filters according to sampling rate */
        bandwidth_auto = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH_AUTO);
-       bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
        if (v4l2_ctrl_g_ctrl(bandwidth_auto)) {
                bandwidth = v4l2_ctrl_find(&s->hdl, V4L2_CID_RF_TUNER_BANDWIDTH);
                v4l2_ctrl_s_ctrl(bandwidth, s->f_adc);
@@ -1078,6 +1077,7 @@ static int msi3101_start_streaming(struct vb2_queue *vq, unsigned int count)
 static int msi3101_stop_streaming(struct vb2_queue *vq)
 {
        struct msi3101_state *s = vb2_get_drv_priv(vq);
+       int ret;
        dev_dbg(&s->udev->dev, "%s:\n", __func__);
 
        if (mutex_lock_interruptible(&s->v4l2_lock))
@@ -1090,17 +1090,22 @@ static int msi3101_stop_streaming(struct vb2_queue *vq)
 
        /* according to tests, at least 700us delay is required  */
        msleep(20);
-       msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0);
+       ret = msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0);
+       if (ret)
+               goto err_sleep_tuner;
 
        /* sleep USB IF / ADC */
-       msi3101_ctrl_msg(s, CMD_WREG, 0x01000003);
+       ret = msi3101_ctrl_msg(s, CMD_WREG, 0x01000003);
+       if (ret)
+               goto err_sleep_tuner;
 
+err_sleep_tuner:
        /* sleep tuner */
-       v4l2_subdev_call(s->v4l2_subdev, core, s_power, 0);
+       ret = v4l2_subdev_call(s->v4l2_subdev, core, s_power, 0);
 
        mutex_unlock(&s->v4l2_lock);
 
-       return 0;
+       return ret;
 }
 
 static struct vb2_ops msi3101_vb2_ops = {
index 773d8ca07a004c6ec7972ae62f9be9d266c366a4..de692d7011a5cc4937298e5d3d697f519f0c9d04 100644 (file)
@@ -86,7 +86,6 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
        struct stub_device *sdev = dev_get_drvdata(dev);
        int sockfd = 0;
        struct socket *socket;
-       ssize_t err = -EINVAL;
        int rv;
 
        if (!sdev) {
@@ -99,6 +98,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        if (sockfd != -1) {
+               int err;
                dev_info(dev, "stub up\n");
 
                spin_lock_irq(&sdev->ud.lock);
@@ -108,7 +108,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
                        goto err;
                }
 
-               socket = sockfd_to_socket(sockfd);
+               socket = sockfd_lookup(sockfd, &err);
                if (!socket)
                        goto err;
 
@@ -141,7 +141,7 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
 
 err:
        spin_unlock_irq(&sdev->ud.lock);
-       return err;
+       return -EINVAL;
 }
 static DEVICE_ATTR(usbip_sockfd, S_IWUSR, NULL, store_sockfd);
 
@@ -211,7 +211,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
         * not touch NULL socket.
         */
        if (ud->tcp_socket) {
-               fput(ud->tcp_socket->file);
+               sockfd_put(ud->tcp_socket);
                ud->tcp_socket = NULL;
        }
 
index 184fa70365db3e32a37f55efc819858b431578d6..facaaf003f19931b2f15603568bb565f3de40607 100644 (file)
@@ -382,31 +382,6 @@ err:
 }
 EXPORT_SYMBOL_GPL(usbip_recv);
 
-struct socket *sockfd_to_socket(unsigned int sockfd)
-{
-       struct socket *socket;
-       struct file *file;
-       struct inode *inode;
-
-       file = fget(sockfd);
-       if (!file) {
-               pr_err("invalid sockfd\n");
-               return NULL;
-       }
-
-       inode = file_inode(file);
-
-       if (!inode || !S_ISSOCK(inode->i_mode)) {
-               fput(file);
-               return NULL;
-       }
-
-       socket = SOCKET_I(inode);
-
-       return socket;
-}
-EXPORT_SYMBOL_GPL(sockfd_to_socket);
-
 /* there may be more cases to tweak the flags. */
 static unsigned int tweak_transfer_flags(unsigned int flags)
 {
index 732fb636a1e5b26c19b816448f7faa08391f5a35..f555d834f134a8a72e52751de62d3305bde27ecc 100644 (file)
@@ -299,7 +299,6 @@ void usbip_dump_urb(struct urb *purb);
 void usbip_dump_header(struct usbip_header *pdu);
 
 int usbip_recv(struct socket *sock, void *buf, int size);
-struct socket *sockfd_to_socket(unsigned int sockfd);
 
 void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
                    int pack);
index 1e84577230ef4120f4847b6ca67af1e028a102cb..70e17551943dc45bb49edfd4f3ba9e38a87df8fe 100644 (file)
@@ -788,7 +788,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
 
        /* active connection is closed */
        if (vdev->ud.tcp_socket) {
-               fput(vdev->ud.tcp_socket->file);
+               sockfd_put(vdev->ud.tcp_socket);
                vdev->ud.tcp_socket = NULL;
        }
        pr_info("release socket\n");
@@ -835,7 +835,7 @@ static void vhci_device_reset(struct usbip_device *ud)
        vdev->udev = NULL;
 
        if (ud->tcp_socket) {
-               fput(ud->tcp_socket->file);
+               sockfd_put(ud->tcp_socket);
                ud->tcp_socket = NULL;
        }
        ud->status = VDEV_ST_NULL;
index e0980324fb0366e06a39b84ca26c109b36381e35..47bddcdde0a621330a063d297b132f8da8396b72 100644 (file)
@@ -176,6 +176,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
        struct socket *socket;
        int sockfd = 0;
        __u32 rhport = 0, devid = 0, speed = 0;
+       int err;
 
        /*
         * @rhport: port number of vhci_hcd
@@ -194,8 +195,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        /* Extract socket from fd. */
-       /* The correct way to clean this up is to fput(socket->file). */
-       socket = sockfd_to_socket(sockfd);
+       socket = sockfd_lookup(sockfd, &err);
        if (!socket)
                return -EINVAL;
 
@@ -211,7 +211,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
                spin_unlock(&vdev->ud.lock);
                spin_unlock(&the_controller->lock);
 
-               fput(socket->file);
+               sockfd_put(socket);
 
                dev_err(dev, "port %d already used\n", rhport);
                return -EINVAL;
index b83ec378d04f8f1ed5fbc820429c6f1657b4d7a0..78cab13bbb1be3796b0e00af4a0667329ed4a2d8 100644 (file)
@@ -499,6 +499,23 @@ static int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
        return 0;
 }
 
+static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
+{
+       bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD);
+
+       spin_lock_bh(&conn->cmd_lock);
+       if (!list_empty(&cmd->i_conn_node))
+               list_del_init(&cmd->i_conn_node);
+       spin_unlock_bh(&conn->cmd_lock);
+
+       __iscsit_free_cmd(cmd, scsi_cmd, true);
+}
+
+static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
+{
+       return TARGET_PROT_NORMAL;
+}
+
 static struct iscsit_transport iscsi_target_transport = {
        .name                   = "iSCSI/TCP",
        .transport_type         = ISCSI_TCP,
@@ -513,6 +530,8 @@ static struct iscsit_transport iscsi_target_transport = {
        .iscsit_response_queue  = iscsit_response_queue,
        .iscsit_queue_data_in   = iscsit_queue_rsp,
        .iscsit_queue_status    = iscsit_queue_rsp,
+       .iscsit_aborted_task    = iscsit_aborted_task,
+       .iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
 };
 
 static int __init iscsi_target_init_module(void)
@@ -1503,6 +1522,16 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 {
        u32 payload_length = ntoh24(hdr->dlength);
 
+       if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
+               pr_err("NopOUT Flag's, Left Most Bit not set, protocol error.\n");
+               if (!cmd)
+                       return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+                                                (unsigned char *)hdr);
+               
+               return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
+                                        (unsigned char *)hdr);
+       }
+
        if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
                pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
                        " not set, protocol error.\n");
@@ -2468,6 +2497,7 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
 {
        struct iscsi_cmd *cmd;
        struct iscsi_conn *conn_p;
+       bool found = false;
 
        /*
         * Only send a Asynchronous Message on connections whos network
@@ -2476,11 +2506,12 @@ static void iscsit_build_conn_drop_async_message(struct iscsi_conn *conn)
        list_for_each_entry(conn_p, &conn->sess->sess_conn_list, conn_list) {
                if (conn_p->conn_state == TARG_CONN_STATE_LOGGED_IN) {
                        iscsit_inc_conn_usage_count(conn_p);
+                       found = true;
                        break;
                }
        }
 
-       if (!conn_p)
+       if (!found)
                return;
 
        cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
index 1c0088fe9e99368c2dfb50b378dee6033cd42cab..ae03f3e5de1e47f8ce9bb43ae134361d8e9e325a 100644 (file)
@@ -1052,6 +1052,11 @@ TPG_ATTR(demo_mode_discovery, S_IRUGO | S_IWUSR);
  */
 DEF_TPG_ATTRIB(default_erl);
 TPG_ATTR(default_erl, S_IRUGO | S_IWUSR);
+/*
+ * Define iscsi_tpg_attrib_s_t10_pi
+ */
+DEF_TPG_ATTRIB(t10_pi);
+TPG_ATTR(t10_pi, S_IRUGO | S_IWUSR);
 
 static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
        &iscsi_tpg_attrib_authentication.attr,
@@ -1064,6 +1069,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
        &iscsi_tpg_attrib_prod_mode_write_protect.attr,
        &iscsi_tpg_attrib_demo_mode_discovery.attr,
        &iscsi_tpg_attrib_default_erl.attr,
+       &iscsi_tpg_attrib_t10_pi.attr,
        NULL,
 };
 
@@ -1815,6 +1821,13 @@ static void lio_queue_tm_rsp(struct se_cmd *se_cmd)
        iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
 }
 
+static void lio_aborted_task(struct se_cmd *se_cmd)
+{
+       struct iscsi_cmd *cmd = container_of(se_cmd, struct iscsi_cmd, se_cmd);
+
+       cmd->conn->conn_transport->iscsit_aborted_task(cmd->conn, cmd);
+}
+
 static char *lio_tpg_get_endpoint_wwn(struct se_portal_group *se_tpg)
 {
        struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
@@ -1999,6 +2012,7 @@ int iscsi_target_register_configfs(void)
        fabric->tf_ops.queue_data_in = &lio_queue_data_in;
        fabric->tf_ops.queue_status = &lio_queue_status;
        fabric->tf_ops.queue_tm_rsp = &lio_queue_tm_rsp;
+       fabric->tf_ops.aborted_task = &lio_aborted_task;
        /*
         * Setup function pointers for generic logic in target_core_fabric_configfs.c
         */
index 48f7b3bf4e8c3d2eec3bb413d33d99b6760ce3be..886d74d6f3d4669b6050d2872ea38d924ac3e484 100644 (file)
@@ -58,7 +58,8 @@
 #define TA_DEMO_MODE_DISCOVERY         1
 #define TA_DEFAULT_ERL                 0
 #define TA_CACHE_CORE_NPS              0
-
+/* T10 protection information disabled by default */
+#define TA_DEFAULT_T10_PI              0
 
 #define ISCSI_IOV_DATA_BUFFER          5
 
@@ -765,6 +766,7 @@ struct iscsi_tpg_attrib {
        u32                     prod_mode_write_protect;
        u32                     demo_mode_discovery;
        u32                     default_erl;
+       u8                      t10_pi;
        struct iscsi_portal_group *tpg;
 };
 
@@ -787,6 +789,7 @@ struct iscsi_np {
        void                    *np_context;
        struct iscsit_transport *np_transport;
        struct list_head        np_list;
+       struct iscsi_tpg_np     *tpg_np;
 } ____cacheline_aligned;
 
 struct iscsi_tpg_np {
index e29279e6b577dd564e8271f95c171838ead5ca39..8739b98f6f93539b8c6eb95f27d7fde3601b40d7 100644 (file)
@@ -259,6 +259,7 @@ static int iscsi_login_zero_tsih_s1(
 {
        struct iscsi_session *sess = NULL;
        struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
+       enum target_prot_op sup_pro_ops;
        int ret;
 
        sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -320,8 +321,9 @@ static int iscsi_login_zero_tsih_s1(
                kfree(sess);
                return -ENOMEM;
        }
+       sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
 
-       sess->se_sess = transport_init_session();
+       sess->se_sess = transport_init_session(sup_pro_ops);
        if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
index 44a5471de00ffe95c5fccdf5462931efa0acc361..eb96b20dc09e13ffe32e226df38b73241f176a63 100644 (file)
@@ -225,6 +225,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg)
        a->prod_mode_write_protect = TA_PROD_MODE_WRITE_PROTECT;
        a->demo_mode_discovery = TA_DEMO_MODE_DISCOVERY;
        a->default_erl = TA_DEFAULT_ERL;
+       a->t10_pi = TA_DEFAULT_T10_PI;
 }
 
 int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -500,6 +501,7 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
        init_completion(&tpg_np->tpg_np_comp);
        kref_init(&tpg_np->tpg_np_kref);
        tpg_np->tpg_np          = np;
+       np->tpg_np              = tpg_np;
        tpg_np->tpg             = tpg;
 
        spin_lock(&tpg->tpg_np_lock);
@@ -858,3 +860,22 @@ int iscsit_ta_default_erl(
 
        return 0;
 }
+
+int iscsit_ta_t10_pi(
+       struct iscsi_portal_group *tpg,
+       u32 flag)
+{
+       struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+       if ((flag != 0) && (flag != 1)) {
+               pr_err("Illegal value %d\n", flag);
+               return -EINVAL;
+       }
+
+       a->t10_pi = flag;
+       pr_debug("iSCSI_TPG[%hu] - T10 Protection information bit:"
+               " %s\n", tpg->tpgt, (a->t10_pi) ?
+               "ON" : "OFF");
+
+       return 0;
+}
index 213c0fc7fdc9058913bc3b0da38a7d7ef20455e2..0a182f2aa8a25ea07cad39ac58a0d954010abd8b 100644 (file)
@@ -39,5 +39,6 @@ extern int iscsit_ta_demo_mode_write_protect(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_prod_mode_write_protect(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_demo_mode_discovery(struct iscsi_portal_group *, u32);
 extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
 
 #endif /* ISCSI_TARGET_TPG_H */
index e655b042ed1895cbb2390dfc2bc13adeed89d517..53e157cb8c547e7776717ffab50137b204bdef90 100644 (file)
@@ -705,8 +705,8 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)
 }
 EXPORT_SYMBOL(iscsit_release_cmd);
 
-static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
-                             bool check_queues)
+void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd,
+                      bool check_queues)
 {
        struct iscsi_conn *conn = cmd->conn;
 
index 561a424d19800f4540a97ce92312c04ff0f0bc12..a68508c4fec862b325c6a5015b9493871f166e08 100644 (file)
@@ -30,6 +30,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co
 extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *);
 extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
 extern void iscsit_release_cmd(struct iscsi_cmd *);
+extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool);
 extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
 extern int iscsit_check_session_usage_count(struct iscsi_session *);
 extern void iscsit_dec_session_usage_count(struct iscsi_session *);
index fadad7c5f635f01496acdc79d56a678a4e4fb1a6..c886ad1c39fb357ddb6e72a384474bdc89b01278 100644 (file)
@@ -212,6 +212,10 @@ static void tcm_loop_submission_work(struct work_struct *work)
                se_cmd->se_cmd_flags |= SCF_BIDI;
 
        }
+
+       if (!scsi_prot_sg_count(sc) && scsi_get_prot_op(sc) != SCSI_PROT_NORMAL)
+               se_cmd->prot_pto = true;
+
        rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
                        &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
                        scsi_bufflen(sc), tcm_loop_sam_attr(sc),
@@ -915,6 +919,11 @@ static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
        wake_up(&tl_tmr->tl_tmr_wait);
 }
 
+static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
+{
+       return;
+}
+
 static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
 {
        switch (tl_hba->tl_proto_id) {
@@ -1009,7 +1018,7 @@ static int tcm_loop_make_nexus(
        /*
         * Initialize the struct se_session pointer
         */
-       tl_nexus->se_sess = transport_init_session();
+       tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
        if (IS_ERR(tl_nexus->se_sess)) {
                ret = PTR_ERR(tl_nexus->se_sess);
                goto out;
@@ -1483,6 +1492,7 @@ static int tcm_loop_register_configfs(void)
        fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
        fabric->tf_ops.queue_status = &tcm_loop_queue_status;
        fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
+       fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
 
        /*
         * Setup function pointers for generic logic in target_core_fabric_configfs.c
index 24884cac19ced8f9e6c0cae03787f7d8cb771880..e7e93727553cef8d3503201f381bd64b3f02d6b3 100644 (file)
@@ -210,7 +210,7 @@ static struct sbp_session *sbp_session_create(
                return ERR_PTR(-ENOMEM);
        }
 
-       sess->se_sess = transport_init_session();
+       sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
        if (IS_ERR(sess->se_sess)) {
                pr_err("failed to init se_session\n");
 
@@ -1846,6 +1846,11 @@ static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
 {
 }
 
+static void sbp_aborted_task(struct se_cmd *se_cmd)
+{
+       return;
+}
+
 static int sbp_check_stop_free(struct se_cmd *se_cmd)
 {
        struct sbp_target_request *req = container_of(se_cmd,
@@ -2526,6 +2531,7 @@ static struct target_core_fabric_ops sbp_ops = {
        .queue_data_in                  = sbp_queue_data_in,
        .queue_status                   = sbp_queue_status,
        .queue_tm_rsp                   = sbp_queue_tm_rsp,
+       .aborted_task                   = sbp_aborted_task,
        .check_stop_free                = sbp_check_stop_free,
 
        .fabric_make_wwn                = sbp_make_tport,
index c3d9df6aaf5f35bc4665355bf7cfe18c9e522aa7..fcbe6125b73ee51289ffe93f9d7951d3ff4485e2 100644 (file)
@@ -455,11 +455,26 @@ out:
        return rc;
 }
 
-static inline int core_alua_state_nonoptimized(
+static inline void set_ascq(struct se_cmd *cmd, u8 alua_ascq)
+{
+       /*
+        * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
+        * The ALUA additional sense code qualifier (ASCQ) is determined
+        * by the ALUA primary or secondary access state..
+        */
+       pr_debug("[%s]: ALUA TG Port not available, "
+               "SenseKey: NOT_READY, ASC/ASCQ: "
+               "0x04/0x%02x\n",
+               cmd->se_tfo->get_fabric_name(), alua_ascq);
+
+       cmd->scsi_asc = 0x04;
+       cmd->scsi_ascq = alua_ascq;
+}
+
+static inline void core_alua_state_nonoptimized(
        struct se_cmd *cmd,
        unsigned char *cdb,
-       int nonop_delay_msecs,
-       u8 *alua_ascq)
+       int nonop_delay_msecs)
 {
        /*
         * Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
@@ -468,13 +483,11 @@ static inline int core_alua_state_nonoptimized(
         */
        cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
        cmd->alua_nonop_delay = nonop_delay_msecs;
-       return 0;
 }
 
 static inline int core_alua_state_lba_dependent(
        struct se_cmd *cmd,
-       struct t10_alua_tg_pt_gp *tg_pt_gp,
-       u8 *alua_ascq)
+       struct t10_alua_tg_pt_gp *tg_pt_gp)
 {
        struct se_device *dev = cmd->se_dev;
        u64 segment_size, segment_mult, sectors, lba;
@@ -520,7 +533,7 @@ static inline int core_alua_state_lba_dependent(
                }
                if (!cur_map) {
                        spin_unlock(&dev->t10_alua.lba_map_lock);
-                       *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
                        return 1;
                }
                list_for_each_entry(map_mem, &cur_map->lba_map_mem_list,
@@ -531,11 +544,11 @@ static inline int core_alua_state_lba_dependent(
                        switch(map_mem->lba_map_mem_alua_state) {
                        case ALUA_ACCESS_STATE_STANDBY:
                                spin_unlock(&dev->t10_alua.lba_map_lock);
-                               *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+                               set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
                                return 1;
                        case ALUA_ACCESS_STATE_UNAVAILABLE:
                                spin_unlock(&dev->t10_alua.lba_map_lock);
-                               *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+                               set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
                                return 1;
                        default:
                                break;
@@ -548,8 +561,7 @@ static inline int core_alua_state_lba_dependent(
 
 static inline int core_alua_state_standby(
        struct se_cmd *cmd,
-       unsigned char *cdb,
-       u8 *alua_ascq)
+       unsigned char *cdb)
 {
        /*
         * Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
@@ -570,7 +582,7 @@ static inline int core_alua_state_standby(
                case MI_REPORT_TARGET_PGS:
                        return 0;
                default:
-                       *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
                        return 1;
                }
        case MAINTENANCE_OUT:
@@ -578,7 +590,7 @@ static inline int core_alua_state_standby(
                case MO_SET_TARGET_PGS:
                        return 0;
                default:
-                       *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
                        return 1;
                }
        case REQUEST_SENSE:
@@ -588,7 +600,7 @@ static inline int core_alua_state_standby(
        case WRITE_BUFFER:
                return 0;
        default:
-               *alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
+               set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_STANDBY);
                return 1;
        }
 
@@ -597,8 +609,7 @@ static inline int core_alua_state_standby(
 
 static inline int core_alua_state_unavailable(
        struct se_cmd *cmd,
-       unsigned char *cdb,
-       u8 *alua_ascq)
+       unsigned char *cdb)
 {
        /*
         * Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
@@ -613,7 +624,7 @@ static inline int core_alua_state_unavailable(
                case MI_REPORT_TARGET_PGS:
                        return 0;
                default:
-                       *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
                        return 1;
                }
        case MAINTENANCE_OUT:
@@ -621,7 +632,7 @@ static inline int core_alua_state_unavailable(
                case MO_SET_TARGET_PGS:
                        return 0;
                default:
-                       *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+                       set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
                        return 1;
                }
        case REQUEST_SENSE:
@@ -629,7 +640,7 @@ static inline int core_alua_state_unavailable(
        case WRITE_BUFFER:
                return 0;
        default:
-               *alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
+               set_ascq(cmd, ASCQ_04H_ALUA_TG_PT_UNAVAILABLE);
                return 1;
        }
 
@@ -638,8 +649,7 @@ static inline int core_alua_state_unavailable(
 
 static inline int core_alua_state_transition(
        struct se_cmd *cmd,
-       unsigned char *cdb,
-       u8 *alua_ascq)
+       unsigned char *cdb)
 {
        /*
         * Allowed CDBs for ALUA_ACCESS_STATE_TRANSITION as defined by
@@ -654,7 +664,7 @@ static inline int core_alua_state_transition(
                case MI_REPORT_TARGET_PGS:
                        return 0;
                default:
-                       *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+                       set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
                        return 1;
                }
        case REQUEST_SENSE:
@@ -662,7 +672,7 @@ static inline int core_alua_state_transition(
        case WRITE_BUFFER:
                return 0;
        default:
-               *alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
+               set_ascq(cmd, ASCQ_04H_ALUA_STATE_TRANSITION);
                return 1;
        }
 
@@ -684,8 +694,6 @@ target_alua_state_check(struct se_cmd *cmd)
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
        int out_alua_state, nonop_delay_msecs;
-       u8 alua_ascq;
-       int ret;
 
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
                return 0;
@@ -701,9 +709,8 @@ target_alua_state_check(struct se_cmd *cmd)
        if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
                pr_debug("ALUA: Got secondary offline status for local"
                                " target port\n");
-               alua_ascq = ASCQ_04H_ALUA_OFFLINE;
-               ret = 1;
-               goto out;
+               set_ascq(cmd, ASCQ_04H_ALUA_OFFLINE);
+               return TCM_CHECK_CONDITION_NOT_READY;
        }
         /*
         * Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
@@ -731,20 +738,23 @@ target_alua_state_check(struct se_cmd *cmd)
 
        switch (out_alua_state) {
        case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
-               ret = core_alua_state_nonoptimized(cmd, cdb,
-                                       nonop_delay_msecs, &alua_ascq);
+               core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs);
                break;
        case ALUA_ACCESS_STATE_STANDBY:
-               ret = core_alua_state_standby(cmd, cdb, &alua_ascq);
+               if (core_alua_state_standby(cmd, cdb))
+                       return TCM_CHECK_CONDITION_NOT_READY;
                break;
        case ALUA_ACCESS_STATE_UNAVAILABLE:
-               ret = core_alua_state_unavailable(cmd, cdb, &alua_ascq);
+               if (core_alua_state_unavailable(cmd, cdb))
+                       return TCM_CHECK_CONDITION_NOT_READY;
                break;
        case ALUA_ACCESS_STATE_TRANSITION:
-               ret = core_alua_state_transition(cmd, cdb, &alua_ascq);
+               if (core_alua_state_transition(cmd, cdb))
+                       return TCM_CHECK_CONDITION_NOT_READY;
                break;
        case ALUA_ACCESS_STATE_LBA_DEPENDENT:
-               ret = core_alua_state_lba_dependent(cmd, tg_pt_gp, &alua_ascq);
+               if (core_alua_state_lba_dependent(cmd, tg_pt_gp))
+                       return TCM_CHECK_CONDITION_NOT_READY;
                break;
        /*
         * OFFLINE is a secondary ALUA target port group access state, that is
@@ -757,23 +767,6 @@ target_alua_state_check(struct se_cmd *cmd)
                return TCM_INVALID_CDB_FIELD;
        }
 
-out:
-       if (ret > 0) {
-               /*
-                * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';
-                * The ALUA additional sense code qualifier (ASCQ) is determined
-                * by the ALUA primary or secondary access state..
-                */
-               pr_debug("[%s]: ALUA TG Port not available, "
-                       "SenseKey: NOT_READY, ASC/ASCQ: "
-                       "0x04/0x%02x\n",
-                       cmd->se_tfo->get_fabric_name(), alua_ascq);
-
-               cmd->scsi_asc = 0x04;
-               cmd->scsi_ascq = alua_ascq;
-               return TCM_CHECK_CONDITION_NOT_READY;
-       }
-
        return 0;
 }
 
index f0e85b1196926383149854c8e373104f1344ea59..60a9ae6df763d4928c4077ee210db39480872daa 100644 (file)
@@ -457,6 +457,10 @@ static int target_fabric_tf_ops_check(
                pr_err("Missing tfo->queue_tm_rsp()\n");
                return -EINVAL;
        }
+       if (!tfo->aborted_task) {
+               pr_err("Missing tfo->aborted_task()\n");
+               return -EINVAL;
+       }
        /*
         * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
         * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
index cf991a91a8a9699f655fd325f654cb16ed3046d5..7d6cddaec5250cf28239c49b22eeb269a30e69d0 100644 (file)
@@ -854,25 +854,6 @@ static int fd_init_prot(struct se_device *dev)
        return 0;
 }
 
-static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
-                              u32 unit_size, u32 *ref_tag, u16 app_tag,
-                              bool inc_reftag)
-{
-       unsigned char *p = buf;
-       int i;
-
-       for (i = 0; i < unit_size; i += dev->prot_length) {
-               *((u16 *)&p[0]) = 0xffff;
-               *((__be16 *)&p[2]) = cpu_to_be16(app_tag);
-               *((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
-
-               if (inc_reftag)
-                       (*ref_tag)++;
-
-               p += dev->prot_length;
-       }
-}
-
 static int fd_format_prot(struct se_device *dev)
 {
        struct fd_dev *fd_dev = FD_DEV(dev);
@@ -880,10 +861,8 @@ static int fd_format_prot(struct se_device *dev)
        sector_t prot_length, prot;
        unsigned char *buf;
        loff_t pos = 0;
-       u32 ref_tag = 0;
        int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
        int rc, ret = 0, size, len;
-       bool inc_reftag = false;
 
        if (!dev->dev_attrib.pi_prot_type) {
                pr_err("Unable to format_prot while pi_prot_type == 0\n");
@@ -894,37 +873,20 @@ static int fd_format_prot(struct se_device *dev)
                return -ENODEV;
        }
 
-       switch (dev->dev_attrib.pi_prot_type) {
-       case TARGET_DIF_TYPE3_PROT:
-               ref_tag = 0xffffffff;
-               break;
-       case TARGET_DIF_TYPE2_PROT:
-       case TARGET_DIF_TYPE1_PROT:
-               inc_reftag = true;
-               break;
-       default:
-               break;
-       }
-
        buf = vzalloc(unit_size);
        if (!buf) {
                pr_err("Unable to allocate FILEIO prot buf\n");
                return -ENOMEM;
        }
-
        prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
        size = prot_length;
 
        pr_debug("Using FILEIO prot_length: %llu\n",
                 (unsigned long long)prot_length);
 
+       memset(buf, 0xff, unit_size);
        for (prot = 0; prot < prot_length; prot += unit_size) {
-
-               fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
-                                  inc_reftag);
-
                len = min(unit_size, size);
-
                rc = kernel_write(prot_fd, buf, len, pos);
                if (rc != len) {
                        pr_err("vfs_write to prot file failed: %d\n", rc);
index 554d4f75a75a6263ac7fb731804f787af64731ac..9e0232cca92e03356c768e3dada2a27f53427452 100644 (file)
@@ -203,10 +203,9 @@ static void iblock_free_device(struct se_device *dev)
 
        if (ib_dev->ibd_bd != NULL)
                blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
-       if (ib_dev->ibd_bio_set != NULL) {
-               bioset_integrity_free(ib_dev->ibd_bio_set);
+       if (ib_dev->ibd_bio_set != NULL)
                bioset_free(ib_dev->ibd_bio_set);
-       }
+
        kfree(ib_dev);
 }
 
index 66a5aba5a0d9c6a6708582a026b25b0f6a17075e..b920db3388cdc19d4bef721a6c6d55680cfd2d8c 100644 (file)
@@ -242,7 +242,7 @@ static void rd_release_prot_space(struct rd_dev *rd_dev)
        rd_dev->sg_prot_count = 0;
 }
 
-static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
+static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length, int block_size)
 {
        struct rd_dev_sg_table *sg_table;
        u32 total_sg_needed, sg_tables;
@@ -252,8 +252,13 @@ static int rd_build_prot_space(struct rd_dev *rd_dev, int prot_length)
 
        if (rd_dev->rd_flags & RDF_NULLIO)
                return 0;
-
-       total_sg_needed = rd_dev->rd_page_count / prot_length;
+       /*
+        * prot_length=8byte dif data
+        * tot sg needed = rd_page_count * (PGSZ/block_size) *
+        *                 (prot_length/block_size) + pad
+        * PGSZ canceled each other.
+        */
+       total_sg_needed = (rd_dev->rd_page_count * prot_length / block_size) + 1;
 
        sg_tables = (total_sg_needed / max_sg_per_table) + 1;
 
@@ -606,7 +611,8 @@ static int rd_init_prot(struct se_device *dev)
         if (!dev->dev_attrib.pi_prot_type)
                return 0;
 
-       return rd_build_prot_space(rd_dev, dev->prot_length);
+       return rd_build_prot_space(rd_dev, dev->prot_length,
+                                  dev->dev_attrib.block_size);
 }
 
 static void rd_free_prot(struct se_device *dev)
index 77e6531fb0a1c0a25ed16b71d1936ec37c2ed50a..e0229592ec5509656aed292970af719be1f9111d 100644 (file)
@@ -89,6 +89,7 @@ static sense_reason_t
 sbc_emulate_readcapacity_16(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
+       struct se_session *sess = cmd->se_sess;
        unsigned char *rbuf;
        unsigned char buf[32];
        unsigned long long blocks = dev->transport->get_blocks(dev);
@@ -109,8 +110,10 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
        /*
         * Set P_TYPE and PROT_EN bits for DIF support
         */
-       if (dev->dev_attrib.pi_prot_type)
-               buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+       if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+               if (dev->dev_attrib.pi_prot_type)
+                       buf[12] = (dev->dev_attrib.pi_prot_type - 1) << 1 | 0x1;
+       }
 
        if (dev->transport->get_lbppbe)
                buf[13] = dev->transport->get_lbppbe(dev) & 0x0f;
@@ -425,13 +428,14 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
                goto out;
        }
 
-       write_sg = kzalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+       write_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
                           GFP_KERNEL);
        if (!write_sg) {
                pr_err("Unable to allocate compare_and_write sg\n");
                ret = TCM_OUT_OF_RESOURCES;
                goto out;
        }
+       sg_init_table(write_sg, cmd->t_data_nents);
        /*
         * Setup verify and write data payloads from total NumberLBAs.
         */
@@ -569,30 +573,85 @@ sbc_compare_and_write(struct se_cmd *cmd)
        return TCM_NO_SENSE;
 }
 
+static int
+sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
+                      bool is_write, struct se_cmd *cmd)
+{
+       if (is_write) {
+               cmd->prot_op = protect ? TARGET_PROT_DOUT_PASS :
+                                        TARGET_PROT_DOUT_INSERT;
+               switch (protect) {
+               case 0x0:
+               case 0x3:
+                       cmd->prot_checks = 0;
+                       break;
+               case 0x1:
+               case 0x5:
+                       cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+                       if (prot_type == TARGET_DIF_TYPE1_PROT)
+                               cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+                       break;
+               case 0x2:
+                       if (prot_type == TARGET_DIF_TYPE1_PROT)
+                               cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+                       break;
+               case 0x4:
+                       cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+                       break;
+               default:
+                       pr_err("Unsupported protect field %d\n", protect);
+                       return -EINVAL;
+               }
+       } else {
+               cmd->prot_op = protect ? TARGET_PROT_DIN_PASS :
+                                        TARGET_PROT_DIN_STRIP;
+               switch (protect) {
+               case 0x0:
+               case 0x1:
+               case 0x5:
+                       cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+                       if (prot_type == TARGET_DIF_TYPE1_PROT)
+                               cmd->prot_checks |= TARGET_DIF_CHECK_REFTAG;
+                       break;
+               case 0x2:
+                       if (prot_type == TARGET_DIF_TYPE1_PROT)
+                               cmd->prot_checks = TARGET_DIF_CHECK_REFTAG;
+                       break;
+               case 0x3:
+                       cmd->prot_checks = 0;
+                       break;
+               case 0x4:
+                       cmd->prot_checks = TARGET_DIF_CHECK_GUARD;
+                       break;
+               default:
+                       pr_err("Unsupported protect field %d\n", protect);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
 static bool
 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
-              u32 sectors)
+              u32 sectors, bool is_write)
 {
-       if (!cmd->t_prot_sg || !cmd->t_prot_nents)
+       u8 protect = cdb[1] >> 5;
+
+       if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
                return true;
 
        switch (dev->dev_attrib.pi_prot_type) {
        case TARGET_DIF_TYPE3_PROT:
-               if (!(cdb[1] & 0xe0))
-                       return true;
-
                cmd->reftag_seed = 0xffffffff;
                break;
        case TARGET_DIF_TYPE2_PROT:
-               if (cdb[1] & 0xe0)
+               if (protect)
                        return false;
 
                cmd->reftag_seed = cmd->t_task_lba;
                break;
        case TARGET_DIF_TYPE1_PROT:
-               if (!(cdb[1] & 0xe0))
-                       return true;
-
                cmd->reftag_seed = cmd->t_task_lba;
                break;
        case TARGET_DIF_TYPE0_PROT:
@@ -600,9 +659,15 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                return true;
        }
 
+       if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
+                                  is_write, cmd))
+               return false;
+
        cmd->prot_type = dev->dev_attrib.pi_prot_type;
        cmd->prot_length = dev->prot_length * sectors;
-       cmd->prot_handover = PROT_SEPERATED;
+       pr_debug("%s: prot_type=%d, prot_length=%d prot_op=%d prot_checks=%d\n",
+                __func__, cmd->prot_type, cmd->prot_length,
+                cmd->prot_op, cmd->prot_checks);
 
        return true;
 }
@@ -628,7 +693,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_10(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors))
+               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -639,7 +704,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_12(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors))
+               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -650,7 +715,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_16(cdb);
                cmd->t_task_lba = transport_lba_64(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors))
+               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
@@ -669,7 +734,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_10(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors))
+               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
                if (cdb[1] & 0x8)
@@ -682,7 +747,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_12(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors))
+               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
                if (cdb[1] & 0x8)
@@ -695,7 +760,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_16(cdb);
                cmd->t_task_lba = transport_lba_64(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors))
+               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
                if (cdb[1] & 0x8)
@@ -1031,6 +1096,50 @@ err:
 }
 EXPORT_SYMBOL(sbc_execute_unmap);
 
+void
+sbc_dif_generate(struct se_cmd *cmd)
+{
+       struct se_device *dev = cmd->se_dev;
+       struct se_dif_v1_tuple *sdt;
+       struct scatterlist *dsg, *psg = cmd->t_prot_sg;
+       sector_t sector = cmd->t_task_lba;
+       void *daddr, *paddr;
+       int i, j, offset = 0;
+
+       for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
+               daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
+               paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+
+               for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
+
+                       if (offset >= psg->length) {
+                               kunmap_atomic(paddr);
+                               psg = sg_next(psg);
+                               paddr = kmap_atomic(sg_page(psg)) + psg->offset;
+                               offset = 0;
+                       }
+
+                       sdt = paddr + offset;
+                       sdt->guard_tag = cpu_to_be16(crc_t10dif(daddr + j,
+                                               dev->dev_attrib.block_size));
+                       if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+                               sdt->ref_tag = cpu_to_be32(sector & 0xffffffff);
+                       sdt->app_tag = 0;
+
+                       pr_debug("DIF WRITE INSERT sector: %llu guard_tag: 0x%04x"
+                                " app_tag: 0x%04x ref_tag: %u\n",
+                                (unsigned long long)sector, sdt->guard_tag,
+                                sdt->app_tag, be32_to_cpu(sdt->ref_tag));
+
+                       sector++;
+                       offset += sizeof(struct se_dif_v1_tuple);
+               }
+
+               kunmap_atomic(paddr);
+               kunmap_atomic(daddr);
+       }
+}
+
 static sense_reason_t
 sbc_dif_v1_verify(struct se_device *dev, struct se_dif_v1_tuple *sdt,
                  const void *p, sector_t sector, unsigned int ei_lba)
@@ -1162,9 +1271,9 @@ sbc_dif_verify_write(struct se_cmd *cmd, sector_t start, unsigned int sectors,
 }
 EXPORT_SYMBOL(sbc_dif_verify_write);
 
-sense_reason_t
-sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
-                   unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+static sense_reason_t
+__sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+                     unsigned int ei_lba, struct scatterlist *sg, int sg_off)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_dif_v1_tuple *sdt;
@@ -1217,8 +1326,31 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
                kunmap_atomic(paddr);
                kunmap_atomic(daddr);
        }
-       sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
 
        return 0;
 }
+
+sense_reason_t
+sbc_dif_read_strip(struct se_cmd *cmd)
+{
+       struct se_device *dev = cmd->se_dev;
+       u32 sectors = cmd->prot_length / dev->prot_length;
+
+       return __sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors, 0,
+                                    cmd->t_prot_sg, 0);
+}
+
+sense_reason_t
+sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
+                   unsigned int ei_lba, struct scatterlist *sg, int sg_off)
+{
+       sense_reason_t rc;
+
+       rc = __sbc_dif_verify_read(cmd, start, sectors, ei_lba, sg, sg_off);
+       if (rc)
+               return rc;
+
+       sbc_dif_copy_prot(cmd, sectors, true, sg, sg_off);
+       return 0;
+}
 EXPORT_SYMBOL(sbc_dif_verify_read);
index 3bebc71ea033908e8e64843ba98b295b030a78ad..8653666612a802f5cbfcde2c4d82ac7d20ca9294 100644 (file)
@@ -71,6 +71,7 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_lun *lun = cmd->se_lun;
        struct se_device *dev = cmd->se_dev;
+       struct se_session *sess = cmd->se_sess;
 
        /* Set RMB (removable media) for tape devices */
        if (dev->transport->get_device_type(dev) == TYPE_TAPE)
@@ -101,10 +102,13 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
        if (dev->dev_attrib.emulate_3pc)
                buf[5] |= 0x8;
        /*
-        * Set Protection (PROTECT) bit when DIF has been enabled.
+        * Set Protection (PROTECT) bit when DIF has been enabled on the
+        * device, and the transport supports VERIFY + PASS.
         */
-       if (dev->dev_attrib.pi_prot_type)
-               buf[5] |= 0x1;
+       if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+               if (dev->dev_attrib.pi_prot_type)
+                       buf[5] |= 0x1;
+       }
 
        buf[7] = 0x2; /* CmdQue=1 */
 
@@ -473,16 +477,19 @@ static sense_reason_t
 spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
+       struct se_session *sess = cmd->se_sess;
 
        buf[3] = 0x3c;
        /*
         * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK
         * only for TYPE3 protection.
         */
-       if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
-               buf[4] = 0x5;
-       else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
-               buf[4] = 0x4;
+       if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+               if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT)
+                       buf[4] = 0x5;
+               else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT)
+                       buf[4] = 0x4;
+       }
 
        /* Set HEADSUP, ORDSUP, SIMPSUP */
        buf[5] = 0x07;
@@ -762,7 +769,7 @@ out:
        return ret;
 }
 
-static int spc_modesense_rwrecovery(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p)
 {
        p[0] = 0x01;
        p[1] = 0x0a;
@@ -775,8 +782,11 @@ out:
        return 12;
 }
 
-static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p)
 {
+       struct se_device *dev = cmd->se_dev;
+       struct se_session *sess = cmd->se_sess;
+
        p[0] = 0x0a;
        p[1] = 0x0a;
 
@@ -868,8 +878,10 @@ static int spc_modesense_control(struct se_device *dev, u8 pc, u8 *p)
         * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE
         * TAG field.
         */
-       if (dev->dev_attrib.pi_prot_type)
-               p[5] |= 0x80;
+       if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) {
+               if (dev->dev_attrib.pi_prot_type)
+                       p[5] |= 0x80;
+       }
 
        p[8] = 0xff;
        p[9] = 0xff;
@@ -879,8 +891,10 @@ out:
        return 12;
 }
 
-static int spc_modesense_caching(struct se_device *dev, u8 pc, u8 *p)
+static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p)
 {
+       struct se_device *dev = cmd->se_dev;
+
        p[0] = 0x08;
        p[1] = 0x12;
 
@@ -896,7 +910,7 @@ out:
        return 20;
 }
 
-static int spc_modesense_informational_exceptions(struct se_device *dev, u8 pc, unsigned char *p)
+static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p)
 {
        p[0] = 0x1c;
        p[1] = 0x0a;
@@ -912,7 +926,7 @@ out:
 static struct {
        uint8_t         page;
        uint8_t         subpage;
-       int             (*emulate)(struct se_device *, u8, unsigned char *);
+       int             (*emulate)(struct se_cmd *, u8, unsigned char *);
 } modesense_handlers[] = {
        { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery },
        { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching },
@@ -1050,7 +1064,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
                         * the only two possibilities).
                         */
                        if ((modesense_handlers[i].subpage & ~subpage) == 0) {
-                               ret = modesense_handlers[i].emulate(dev, pc, &buf[length]);
+                               ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]);
                                if (!ten && length + ret >= 255)
                                        break;
                                length += ret;
@@ -1063,7 +1077,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
        for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i)
                if (modesense_handlers[i].page == page &&
                    modesense_handlers[i].subpage == subpage) {
-                       length += modesense_handlers[i].emulate(dev, pc, &buf[length]);
+                       length += modesense_handlers[i].emulate(cmd, pc, &buf[length]);
                        goto set_length;
                }
 
@@ -1095,7 +1109,6 @@ set_length:
 
 static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
 {
-       struct se_device *dev = cmd->se_dev;
        char *cdb = cmd->t_task_cdb;
        bool ten = cdb[0] == MODE_SELECT_10;
        int off = ten ? 8 : 4;
@@ -1131,7 +1144,7 @@ static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd)
                if (modesense_handlers[i].page == page &&
                    modesense_handlers[i].subpage == subpage) {
                        memset(tbuf, 0, SE_MODE_PAGE_BUF);
-                       length = modesense_handlers[i].emulate(dev, 0, tbuf);
+                       length = modesense_handlers[i].emulate(cmd, 0, tbuf);
                        goto check_contents;
                }
 
index 70c638f730af078e25e4ca75b45372e3d16ac24a..f7cd95e8111a7e496cf198c9f6a7ec18f854942b 100644 (file)
@@ -87,14 +87,17 @@ static void core_tmr_handle_tas_abort(
        struct se_cmd *cmd,
        int tas)
 {
+       bool remove = true;
        /*
         * TASK ABORTED status (TAS) bit support
        */
        if ((tmr_nacl &&
-            (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
+            (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
+               remove = false;
                transport_send_task_abort(cmd);
+       }
 
-       transport_cmd_finish_abort(cmd, 0);
+       transport_cmd_finish_abort(cmd, remove);
 }
 
 static int target_check_cdb_and_preempt(struct list_head *list,
@@ -127,6 +130,11 @@ void core_tmr_abort_task(
 
                if (dev != se_cmd->se_dev)
                        continue;
+
+               /* skip se_cmd associated with tmr */
+               if (tmr->task_cmd == se_cmd)
+                       continue;
+
                ref_tag = se_cmd->se_tfo->get_task_tag(se_cmd);
                if (tmr->ref_task_tag != ref_tag)
                        continue;
@@ -150,18 +158,9 @@ void core_tmr_abort_task(
 
                cancel_work_sync(&se_cmd->work);
                transport_wait_for_tasks(se_cmd);
-               /*
-                * Now send SAM_STAT_TASK_ABORTED status for the referenced
-                * se_cmd descriptor..
-                */
-               transport_send_task_abort(se_cmd);
-               /*
-                * Also deal with possible extra acknowledge reference..
-                */
-               if (se_cmd->se_cmd_flags & SCF_ACK_KREF)
-                       target_put_sess_cmd(se_sess, se_cmd);
 
                target_put_sess_cmd(se_sess, se_cmd);
+               transport_cmd_finish_abort(se_cmd, true);
 
                printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
                                " ref_tag: %d\n", ref_tag);
index 2956250b7225c99d77a7475b27d1bf881231539f..d4b98690a73680244676b6e608ede6c85ff724cb 100644 (file)
@@ -235,7 +235,7 @@ void transport_subsystem_check_init(void)
        sub_api_initialized = 1;
 }
 
-struct se_session *transport_init_session(void)
+struct se_session *transport_init_session(enum target_prot_op sup_prot_ops)
 {
        struct se_session *se_sess;
 
@@ -251,6 +251,7 @@ struct se_session *transport_init_session(void)
        INIT_LIST_HEAD(&se_sess->sess_wait_list);
        spin_lock_init(&se_sess->sess_cmd_lock);
        kref_init(&se_sess->sess_kref);
+       se_sess->sup_prot_ops = sup_prot_ops;
 
        return se_sess;
 }
@@ -288,12 +289,13 @@ int transport_alloc_session_tags(struct se_session *se_sess,
 EXPORT_SYMBOL(transport_alloc_session_tags);
 
 struct se_session *transport_init_session_tags(unsigned int tag_num,
-                                              unsigned int tag_size)
+                                              unsigned int tag_size,
+                                              enum target_prot_op sup_prot_ops)
 {
        struct se_session *se_sess;
        int rc;
 
-       se_sess = transport_init_session();
+       se_sess = transport_init_session(sup_prot_ops);
        if (IS_ERR(se_sess))
                return se_sess;
 
@@ -603,6 +605,15 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
 
 void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
 {
+       if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
+               transport_lun_remove_cmd(cmd);
+       /*
+        * Allow the fabric driver to unmap any resources before
+        * releasing the descriptor via TFO->release_cmd()
+        */
+       if (remove)
+               cmd->se_tfo->aborted_task(cmd);
+
        if (transport_cmd_check_stop_to_fabric(cmd))
                return;
        if (remove)
@@ -1365,6 +1376,13 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
                target_put_sess_cmd(se_sess, se_cmd);
                return 0;
        }
+
+       rc = target_setup_cmd_from_cdb(se_cmd, cdb);
+       if (rc != 0) {
+               transport_generic_request_failure(se_cmd, rc);
+               return 0;
+       }
+
        /*
         * Save pointers for SGLs containing protection information,
         * if present.
@@ -1374,11 +1392,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
                se_cmd->t_prot_nents = sgl_prot_count;
        }
 
-       rc = target_setup_cmd_from_cdb(se_cmd, cdb);
-       if (rc != 0) {
-               transport_generic_request_failure(se_cmd, rc);
-               return 0;
-       }
        /*
         * When a non zero sgl_count has been passed perform SGL passthrough
         * mapping for pre-allocated fabric memory instead of having target
@@ -1754,6 +1767,15 @@ void target_execute_cmd(struct se_cmd *cmd)
        cmd->t_state = TRANSPORT_PROCESSING;
        cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT;
        spin_unlock_irq(&cmd->t_state_lock);
+       /*
+        * Perform WRITE_INSERT of PI using software emulation when backend
+        * device has PI enabled, if the transport has not already generated
+        * PI using hardware WRITE_INSERT offload.
+        */
+       if (cmd->prot_op == TARGET_PROT_DOUT_INSERT) {
+               if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
+                       sbc_dif_generate(cmd);
+       }
 
        if (target_handle_task_attr(cmd)) {
                spin_lock_irq(&cmd->t_state_lock);
@@ -1883,6 +1905,21 @@ static void transport_handle_queue_full(
        schedule_work(&cmd->se_dev->qf_work_queue);
 }
 
+static bool target_check_read_strip(struct se_cmd *cmd)
+{
+       sense_reason_t rc;
+
+       if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
+               rc = sbc_dif_read_strip(cmd);
+               if (rc) {
+                       cmd->pi_err = rc;
+                       return true;
+               }
+       }
+
+       return false;
+}
+
 static void target_complete_ok_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -1947,6 +1984,22 @@ static void target_complete_ok_work(struct work_struct *work)
                                        cmd->data_length;
                }
                spin_unlock(&cmd->se_lun->lun_sep_lock);
+               /*
+                * Perform READ_STRIP of PI using software emulation when
+                * backend had PI enabled, if the transport will not be
+                * performing hardware READ_STRIP offload.
+                */
+               if (cmd->prot_op == TARGET_PROT_DIN_STRIP &&
+                   target_check_read_strip(cmd)) {
+                       ret = transport_send_check_condition_and_sense(cmd,
+                                               cmd->pi_err, 0);
+                       if (ret == -EAGAIN || ret == -ENOMEM)
+                               goto queue_full;
+
+                       transport_lun_remove_cmd(cmd);
+                       transport_cmd_check_stop_to_fabric(cmd);
+                       return;
+               }
 
                trace_target_cmd_complete(cmd);
                ret = cmd->se_tfo->queue_data_in(cmd);
@@ -2039,6 +2092,10 @@ static inline void transport_free_pages(struct se_cmd *cmd)
        transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
        cmd->t_bidi_data_sg = NULL;
        cmd->t_bidi_data_nents = 0;
+
+       transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+       cmd->t_prot_sg = NULL;
+       cmd->t_prot_nents = 0;
 }
 
 /**
@@ -2202,6 +2259,14 @@ transport_generic_new_cmd(struct se_cmd *cmd)
                                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
 
+               if (cmd->prot_op != TARGET_PROT_NORMAL) {
+                       ret = target_alloc_sgl(&cmd->t_prot_sg,
+                                              &cmd->t_prot_nents,
+                                              cmd->prot_length, true);
+                       if (ret < 0)
+                               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               }
+
                ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
                                       cmd->data_length, zero_flag);
                if (ret < 0)
@@ -2770,13 +2835,17 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
        if (!(cmd->transport_state & CMD_T_ABORTED))
                return 0;
 
-       if (!send_status || (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
+       /*
+        * If cmd has been aborted but either no status is to be sent or it has
+        * already been sent, just return
+        */
+       if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
                return 1;
 
        pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08x\n",
                 cmd->t_task_cdb[0], cmd->se_tfo->get_task_tag(cmd));
 
-       cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
+       cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
        trace_target_cmd_complete(cmd);
        cmd->se_tfo->queue_status(cmd);
@@ -2790,7 +2859,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
        unsigned long flags;
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
-       if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) {
+       if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION)) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                return;
        }
@@ -2805,6 +2874,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
        if (cmd->data_direction == DMA_TO_DEVICE) {
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
                        cmd->transport_state |= CMD_T_ABORTED;
+                       cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
                        smp_mb__after_atomic_inc();
                        return;
                }
index 752863acecb8ce5716d99fc81172f52d8568937a..a0bcfd3e7e7d5924b06c56928d8c51989c6c66da 100644 (file)
@@ -94,20 +94,19 @@ struct ft_lun {
  */
 struct ft_tpg {
        u32 index;
-       struct ft_lport_acl *lport_acl;
+       struct ft_lport_wwn *lport_wwn;
        struct ft_tport *tport;         /* active tport or NULL */
-       struct list_head list;          /* linkage in ft_lport_acl tpg_list */
        struct list_head lun_list;      /* head of LUNs */
        struct se_portal_group se_tpg;
        struct workqueue_struct *workqueue;
 };
 
-struct ft_lport_acl {
+struct ft_lport_wwn {
        u64 wwpn;
        char name[FT_NAMELEN];
-       struct list_head list;
-       struct list_head tpg_list;
-       struct se_wwn fc_lport_wwn;
+       struct list_head ft_wwn_node;
+       struct ft_tpg *tpg;
+       struct se_wwn se_wwn;
 };
 
 /*
@@ -128,7 +127,6 @@ struct ft_cmd {
        u32 sg_cnt;                     /* No. of item in scatterlist */
 };
 
-extern struct list_head ft_lport_list;
 extern struct mutex ft_lport_lock;
 extern struct fc4_prov ft_prov;
 extern struct target_fabric_configfs *ft_configfs;
@@ -163,6 +161,7 @@ int ft_write_pending_status(struct se_cmd *);
 u32 ft_get_task_tag(struct se_cmd *);
 int ft_get_cmd_state(struct se_cmd *);
 void ft_queue_tm_resp(struct se_cmd *);
+void ft_aborted_task(struct se_cmd *);
 
 /*
  * other internal functions.
index 8b2c1aaf81dede06ebe9263f5c2d010ad4f8a997..01cf37f212c30724ed6a0addbe8c7cbe69dfd6a3 100644 (file)
@@ -426,6 +426,11 @@ void ft_queue_tm_resp(struct se_cmd *se_cmd)
        ft_send_resp_code(cmd, code);
 }
 
+void ft_aborted_task(struct se_cmd *se_cmd)
+{
+       return;
+}
+
 static void ft_send_work(struct work_struct *work);
 
 /*
index e879da81ad9303c2ad1d0d4d0487662523bc1b1c..efdcb9663a1a699520d3543b544ebac5c34ad860 100644 (file)
@@ -50,7 +50,7 @@
 
 struct target_fabric_configfs *ft_configfs;
 
-LIST_HEAD(ft_lport_list);
+static LIST_HEAD(ft_wwn_list);
 DEFINE_MUTEX(ft_lport_lock);
 
 unsigned int ft_debug_logging;
@@ -298,7 +298,7 @@ static struct se_portal_group *ft_add_tpg(
        struct config_group *group,
        const char *name)
 {
-       struct ft_lport_acl *lacl;
+       struct ft_lport_wwn *ft_wwn;
        struct ft_tpg *tpg;
        struct workqueue_struct *wq;
        unsigned long index;
@@ -318,12 +318,17 @@ static struct se_portal_group *ft_add_tpg(
        if (index > UINT_MAX)
                return NULL;
 
-       lacl = container_of(wwn, struct ft_lport_acl, fc_lport_wwn);
+       if ((index != 1)) {
+               pr_err("Error, a single TPG=1 is used for HW port mappings\n");
+               return ERR_PTR(-ENOSYS);
+       }
+
+       ft_wwn = container_of(wwn, struct ft_lport_wwn, se_wwn);
        tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
        if (!tpg)
                return NULL;
        tpg->index = index;
-       tpg->lport_acl = lacl;
+       tpg->lport_wwn = ft_wwn;
        INIT_LIST_HEAD(&tpg->lun_list);
 
        wq = alloc_workqueue("tcm_fc", 0, 1);
@@ -342,7 +347,7 @@ static struct se_portal_group *ft_add_tpg(
        tpg->workqueue = wq;
 
        mutex_lock(&ft_lport_lock);
-       list_add_tail(&tpg->list, &lacl->tpg_list);
+       ft_wwn->tpg = tpg;
        mutex_unlock(&ft_lport_lock);
 
        return &tpg->se_tpg;
@@ -351,6 +356,7 @@ static struct se_portal_group *ft_add_tpg(
 static void ft_del_tpg(struct se_portal_group *se_tpg)
 {
        struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
+       struct ft_lport_wwn *ft_wwn = tpg->lport_wwn;
 
        pr_debug("del tpg %s\n",
                    config_item_name(&tpg->se_tpg.tpg_group.cg_item));
@@ -361,7 +367,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
        synchronize_rcu();
 
        mutex_lock(&ft_lport_lock);
-       list_del(&tpg->list);
+       ft_wwn->tpg = NULL;
        if (tpg->tport) {
                tpg->tport->tpg = NULL;
                tpg->tport = NULL;
@@ -380,15 +386,11 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
  */
 struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
 {
-       struct ft_lport_acl *lacl;
-       struct ft_tpg *tpg;
+       struct ft_lport_wwn *ft_wwn;
 
-       list_for_each_entry(lacl, &ft_lport_list, list) {
-               if (lacl->wwpn == lport->wwpn) {
-                       list_for_each_entry(tpg, &lacl->tpg_list, list)
-                               return tpg; /* XXX for now return first entry */
-                       return NULL;
-               }
+       list_for_each_entry(ft_wwn, &ft_wwn_list, ft_wwn_node) {
+               if (ft_wwn->wwpn == lport->wwpn)
+                       return ft_wwn->tpg;
        }
        return NULL;
 }
@@ -401,50 +403,49 @@ struct ft_tpg *ft_lport_find_tpg(struct fc_lport *lport)
  * Add lport to allowed config.
  * The name is the WWPN in lower-case ASCII, colon-separated bytes.
  */
-static struct se_wwn *ft_add_lport(
+static struct se_wwn *ft_add_wwn(
        struct target_fabric_configfs *tf,
        struct config_group *group,
        const char *name)
 {
-       struct ft_lport_acl *lacl;
-       struct ft_lport_acl *old_lacl;
+       struct ft_lport_wwn *ft_wwn;
+       struct ft_lport_wwn *old_ft_wwn;
        u64 wwpn;
 
-       pr_debug("add lport %s\n", name);
+       pr_debug("add wwn %s\n", name);
        if (ft_parse_wwn(name, &wwpn, 1) < 0)
                return NULL;
-       lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
-       if (!lacl)
+       ft_wwn = kzalloc(sizeof(*ft_wwn), GFP_KERNEL);
+       if (!ft_wwn)
                return NULL;
-       lacl->wwpn = wwpn;
-       INIT_LIST_HEAD(&lacl->tpg_list);
+       ft_wwn->wwpn = wwpn;
 
        mutex_lock(&ft_lport_lock);
-       list_for_each_entry(old_lacl, &ft_lport_list, list) {
-               if (old_lacl->wwpn == wwpn) {
+       list_for_each_entry(old_ft_wwn, &ft_wwn_list, ft_wwn_node) {
+               if (old_ft_wwn->wwpn == wwpn) {
                        mutex_unlock(&ft_lport_lock);
-                       kfree(lacl);
+                       kfree(ft_wwn);
                        return NULL;
                }
        }
-       list_add_tail(&lacl->list, &ft_lport_list);
-       ft_format_wwn(lacl->name, sizeof(lacl->name), wwpn);
+       list_add_tail(&ft_wwn->ft_wwn_node, &ft_wwn_list);
+       ft_format_wwn(ft_wwn->name, sizeof(ft_wwn->name), wwpn);
        mutex_unlock(&ft_lport_lock);
 
-       return &lacl->fc_lport_wwn;
+       return &ft_wwn->se_wwn;
 }
 
-static void ft_del_lport(struct se_wwn *wwn)
+static void ft_del_wwn(struct se_wwn *wwn)
 {
-       struct ft_lport_acl *lacl = container_of(wwn,
-                               struct ft_lport_acl, fc_lport_wwn);
+       struct ft_lport_wwn *ft_wwn = container_of(wwn,
+                               struct ft_lport_wwn, se_wwn);
 
-       pr_debug("del lport %s\n", lacl->name);
+       pr_debug("del wwn %s\n", ft_wwn->name);
        mutex_lock(&ft_lport_lock);
-       list_del(&lacl->list);
+       list_del(&ft_wwn->ft_wwn_node);
        mutex_unlock(&ft_lport_lock);
 
-       kfree(lacl);
+       kfree(ft_wwn);
 }
 
 static ssize_t ft_wwn_show_attr_version(
@@ -471,7 +472,7 @@ static char *ft_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
        struct ft_tpg *tpg = se_tpg->se_tpg_fabric_ptr;
 
-       return tpg->lport_acl->name;
+       return tpg->lport_wwn->name;
 }
 
 static u16 ft_get_tag(struct se_portal_group *se_tpg)
@@ -536,12 +537,13 @@ static struct target_core_fabric_ops ft_fabric_ops = {
        .queue_data_in =                ft_queue_data_in,
        .queue_status =                 ft_queue_status,
        .queue_tm_rsp =                 ft_queue_tm_resp,
+       .aborted_task =                 ft_aborted_task,
        /*
         * Setup function pointers for generic logic in
         * target_core_fabric_configfs.c
         */
-       .fabric_make_wwn =              &ft_add_lport,
-       .fabric_drop_wwn =              &ft_del_lport,
+       .fabric_make_wwn =              &ft_add_wwn,
+       .fabric_drop_wwn =              &ft_del_wwn,
        .fabric_make_tpg =              &ft_add_tpg,
        .fabric_drop_tpg =              &ft_del_tpg,
        .fabric_post_link =             NULL,
index ae52c08dad09071114e730c44b142f49bde1924e..21ce50880c79d1480166b08b52b4bb81cc85d4d6 100644 (file)
@@ -51,7 +51,7 @@ static void ft_sess_delete_all(struct ft_tport *);
  * Lookup or allocate target local port.
  * Caller holds ft_lport_lock.
  */
-static struct ft_tport *ft_tport_create(struct fc_lport *lport)
+static struct ft_tport *ft_tport_get(struct fc_lport *lport)
 {
        struct ft_tpg *tpg;
        struct ft_tport *tport;
@@ -68,6 +68,7 @@ static struct ft_tport *ft_tport_create(struct fc_lport *lport)
 
        if (tport) {
                tport->tpg = tpg;
+               tpg->tport = tport;
                return tport;
        }
 
@@ -114,7 +115,7 @@ static void ft_tport_delete(struct ft_tport *tport)
 void ft_lport_add(struct fc_lport *lport, void *arg)
 {
        mutex_lock(&ft_lport_lock);
-       ft_tport_create(lport);
+       ft_tport_get(lport);
        mutex_unlock(&ft_lport_lock);
 }
 
@@ -211,7 +212,8 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
                return NULL;
 
        sess->se_sess = transport_init_session_tags(TCM_FC_DEFAULT_TAGS,
-                                                   sizeof(struct ft_cmd));
+                                                   sizeof(struct ft_cmd),
+                                                   TARGET_PROT_NORMAL);
        if (IS_ERR(sess->se_sess)) {
                kfree(sess);
                return NULL;
@@ -350,7 +352,7 @@ static int ft_prli_locked(struct fc_rport_priv *rdata, u32 spp_len,
        struct ft_node_acl *acl;
        u32 fcp_parm;
 
-       tport = ft_tport_create(rdata->local_port);
+       tport = ft_tport_get(rdata->local_port);
        if (!tport)
                goto not_target;        /* not a target for this local port */
 
index 45af765a3198c4b0e55e96c878d609f8b6b34d8e..a99c63152b8dc4f9aa73b6d4ff26e3867721e9b0 100644 (file)
@@ -62,12 +62,16 @@ enum imx_thermal_trip {
 #define IMX_POLLING_DELAY              2000 /* millisecond */
 #define IMX_PASSIVE_DELAY              1000
 
+#define FACTOR0                                10000000
+#define FACTOR1                                15976
+#define FACTOR2                                4297157
+
 struct imx_thermal_data {
        struct thermal_zone_device *tz;
        struct thermal_cooling_device *cdev;
        enum thermal_device_mode mode;
        struct regmap *tempmon;
-       int c1, c2; /* See formula in imx_get_sensor_data() */
+       u32 c1, c2; /* See formula in imx_get_sensor_data() */
        unsigned long temp_passive;
        unsigned long temp_critical;
        unsigned long alarm_temp;
@@ -84,7 +88,7 @@ static void imx_set_alarm_temp(struct imx_thermal_data *data,
        int alarm_value;
 
        data->alarm_temp = alarm_temp;
-       alarm_value = (alarm_temp - data->c2) / data->c1;
+       alarm_value = (data->c2 - alarm_temp) / data->c1;
        regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_ALARM_VALUE_MASK);
        regmap_write(map, TEMPSENSE0 + REG_SET, alarm_value <<
                        TEMPSENSE0_ALARM_VALUE_SHIFT);
@@ -136,7 +140,7 @@ static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
        n_meas = (val & TEMPSENSE0_TEMP_CNT_MASK) >> TEMPSENSE0_TEMP_CNT_SHIFT;
 
        /* See imx_get_sensor_data() for formula derivation */
-       *temp = data->c2 + data->c1 * n_meas;
+       *temp = data->c2 - n_meas * data->c1;
 
        /* Update alarm value to next higher trip point */
        if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive)
@@ -305,6 +309,7 @@ static int imx_get_sensor_data(struct platform_device *pdev)
        int t1, t2, n1, n2;
        int ret;
        u32 val;
+       u64 temp64;
 
        map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
                                              "fsl,tempmon-data");
@@ -330,6 +335,8 @@ static int imx_get_sensor_data(struct platform_device *pdev)
         *   [31:20] - sensor value @ 25C
         *    [19:8] - sensor value of hot
         *     [7:0] - hot temperature value
+        * Use universal formula now and only need sensor value @ 25C
+        * slope = 0.4297157 - (0.0015976 * 25C fuse)
         */
        n1 = val >> 20;
        n2 = (val & 0xfff00) >> 8;
@@ -337,20 +344,26 @@ static int imx_get_sensor_data(struct platform_device *pdev)
        t1 = 25; /* t1 always 25C */
 
        /*
-        * Derived from linear interpolation,
-        * Tmeas = T2 + (Nmeas - N2) * (T1 - T2) / (N1 - N2)
+        * Derived from linear interpolation:
+        * slope = 0.4297157 - (0.0015976 * 25C fuse)
+        * slope = (FACTOR2 - FACTOR1 * n1) / FACTOR0
+        * (Nmeas - n1) / (Tmeas - t1) = slope
         * We want to reduce this down to the minimum computation necessary
         * for each temperature read.  Also, we want Tmeas in millicelsius
         * and we don't want to lose precision from integer division. So...
-        * milli_Tmeas = 1000 * T2 + 1000 * (Nmeas - N2) * (T1 - T2) / (N1 - N2)
-        * Let constant c1 = 1000 * (T1 - T2) / (N1 - N2)
-        * milli_Tmeas = (1000 * T2) + c1 * (Nmeas - N2)
-        * milli_Tmeas = (1000 * T2) + (c1 * Nmeas) - (c1 * N2)
-        * Let constant c2 = (1000 * T2) - (c1 * N2)
-        * milli_Tmeas = c2 + (c1 * Nmeas)
+        * Tmeas = (Nmeas - n1) / slope + t1
+        * milli_Tmeas = 1000 * (Nmeas - n1) / slope + 1000 * t1
+        * milli_Tmeas = -1000 * (n1 - Nmeas) / slope + 1000 * t1
+        * Let constant c1 = (-1000 / slope)
+        * milli_Tmeas = (n1 - Nmeas) * c1 + 1000 * t1
+        * Let constant c2 = n1 *c1 + 1000 * t1
+        * milli_Tmeas = c2 - Nmeas * c1
         */
-       data->c1 = 1000 * (t1 - t2) / (n1 - n2);
-       data->c2 = 1000 * t2 - data->c1 * n2;
+       temp64 = FACTOR0;
+       temp64 *= 1000;
+       do_div(temp64, FACTOR1 * n1 - FACTOR2);
+       data->c1 = temp64;
+       data->c2 = n1 * data->c1 + 1000 * t1;
 
        /*
         * Set the default passive cooling trip point to 20 Â°C below the
index 79a09d02bbca04ce97c1bf621508f3ae46d7d93d..5a37940b02c99df447a0fc024177edccabd96635 100644 (file)
@@ -299,12 +299,17 @@ static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
 static void rcar_thermal_work(struct work_struct *work)
 {
        struct rcar_thermal_priv *priv;
+       unsigned long cctemp, nctemp;
 
        priv = container_of(work, struct rcar_thermal_priv, work.work);
 
+       rcar_thermal_get_temp(priv->zone, &cctemp);
        rcar_thermal_update_temp(priv);
        rcar_thermal_irq_enable(priv);
-       thermal_zone_device_update(priv->zone);
+
+       rcar_thermal_get_temp(priv->zone, &nctemp);
+       if (nctemp != cctemp)
+               thermal_zone_device_update(priv->zone);
 }
 
 static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
@@ -313,7 +318,7 @@ static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
 
        status = (status >> rcar_id_to_shift(priv)) & 0x3;
 
-       if (status & 0x3) {
+       if (status) {
                dev_dbg(dev, "thermal%d %s%s\n",
                        priv->id,
                        (status & 0x2) ? "Rising " : "",
index 74c0e3474d6e935a7d16bf29e6304e59b9a01a1f..3ab12ee359b79325751d6d1671dda2850ec8b019 100644 (file)
@@ -1500,10 +1500,8 @@ static int ti_bandgap_resume(struct device *dev)
 
        return ti_bandgap_restore_ctxt(bgp);
 }
-static const struct dev_pm_ops ti_bandgap_dev_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ti_bandgap_suspend,
-                               ti_bandgap_resume)
-};
+static SIMPLE_DEV_PM_OPS(ti_bandgap_dev_pm_ops, ti_bandgap_suspend,
+                        ti_bandgap_resume);
 
 #define DEV_PM_OPS     (&ti_bandgap_dev_pm_ops)
 #else
index b0e540137e398cf0ae2ef643949bf1250cc02399..90ca082935f635236a5c2ca5f4da48b99297b48f 100644 (file)
@@ -65,6 +65,7 @@ static void tty_audit_log(const char *description, int major, int minor,
 {
        struct audit_buffer *ab;
        struct task_struct *tsk = current;
+       pid_t pid = task_pid_nr(tsk);
        uid_t uid = from_kuid(&init_user_ns, task_uid(tsk));
        uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk));
        unsigned int sessionid = audit_get_sessionid(tsk);
@@ -74,7 +75,7 @@ static void tty_audit_log(const char *description, int major, int minor,
                char name[sizeof(tsk->comm)];
 
                audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
-                                " minor=%d comm=", description, tsk->pid, uid,
+                                " minor=%d comm=", description, pid, uid,
                                 loginuid, sessionid, major, minor);
                get_task_comm(name, tsk);
                audit_log_untrustedstring(ab, name);
index 460c266b8e24bf2bf3c4b8459f2cb5443b9694e2..f058c0368d61a4b4663bca3bd58a3558e6c5ec04 100644 (file)
@@ -1471,6 +1471,11 @@ static void usbg_queue_tm_rsp(struct se_cmd *se_cmd)
 {
 }
 
+static void usbg_aborted_task(struct se_cmd *se_cmd)
+{
+       return;
+}
+
 static const char *usbg_check_wwn(const char *name)
 {
        const char *n;
@@ -1726,7 +1731,7 @@ static int tcm_usbg_make_nexus(struct usbg_tpg *tpg, char *name)
                pr_err("Unable to allocate struct tcm_vhost_nexus\n");
                goto err_unlock;
        }
-       tv_nexus->tvn_se_sess = transport_init_session();
+       tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL);
        if (IS_ERR(tv_nexus->tvn_se_sess))
                goto err_free;
 
@@ -1897,6 +1902,7 @@ static struct target_core_fabric_ops usbg_ops = {
        .queue_data_in                  = usbg_send_read_response,
        .queue_status                   = usbg_send_status_response,
        .queue_tm_rsp                   = usbg_queue_tm_rsp,
+       .aborted_task                   = usbg_aborted_task,
        .check_stop_free                = usbg_check_stop_free,
 
        .fabric_make_wwn                = usbg_make_tport,
index e1e22e0f01e881fe2961dbf1c43cb56f4e22cade..be414d2b2b22d8005723b0c1c45674c3c33aa41b 100644 (file)
@@ -818,9 +818,9 @@ static int vhost_net_release(struct inode *inode, struct file *f)
        vhost_dev_cleanup(&n->dev, false);
        vhost_net_vq_reset(n);
        if (tx_sock)
-               fput(tx_sock->file);
+               sockfd_put(tx_sock);
        if (rx_sock)
-               fput(rx_sock->file);
+               sockfd_put(rx_sock);
        /* Make sure no callbacks are outstanding */
        synchronize_rcu_bh();
        /* We do an extra flush before freeing memory,
@@ -860,7 +860,7 @@ static struct socket *get_raw_socket(int fd)
        }
        return sock;
 err:
-       fput(sock->file);
+       sockfd_put(sock);
        return ERR_PTR(r);
 }
 
@@ -966,7 +966,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
 
        if (oldsock) {
                vhost_net_flush_vq(n, index);
-               fput(oldsock->file);
+               sockfd_put(oldsock);
        }
 
        mutex_unlock(&n->dev.mutex);
@@ -978,7 +978,7 @@ err_used:
        if (ubufs)
                vhost_net_ubuf_put_wait_and_free(ubufs);
 err_ubufs:
-       fput(sock->file);
+       sockfd_put(sock);
 err_vq:
        mutex_unlock(&vq->mutex);
 err:
@@ -1009,9 +1009,9 @@ static long vhost_net_reset_owner(struct vhost_net *n)
 done:
        mutex_unlock(&n->dev.mutex);
        if (tx_sock)
-               fput(tx_sock->file);
+               sockfd_put(tx_sock);
        if (rx_sock)
-               fput(rx_sock->file);
+               sockfd_put(rx_sock);
        return err;
 }
 
index e48d4a672580cd5eefaf741946435073a4e58e8c..cf50ce93975bcddb240c3356b4326aac0fc8c3cb 100644 (file)
@@ -539,6 +539,11 @@ static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
        return;
 }
 
+static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
+{
+       return;
+}
+
 static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
 {
        vs->vs_events_nr--;
@@ -1740,7 +1745,8 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
         */
        tv_nexus->tvn_se_sess = transport_init_session_tags(
                                        TCM_VHOST_DEFAULT_TAGS,
-                                       sizeof(struct tcm_vhost_cmd));
+                                       sizeof(struct tcm_vhost_cmd),
+                                       TARGET_PROT_NORMAL);
        if (IS_ERR(tv_nexus->tvn_se_sess)) {
                mutex_unlock(&tpg->tv_tpg_mutex);
                kfree(tv_nexus);
@@ -2131,6 +2137,7 @@ static struct target_core_fabric_ops tcm_vhost_ops = {
        .queue_data_in                  = tcm_vhost_queue_data_in,
        .queue_status                   = tcm_vhost_queue_status,
        .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
+       .aborted_task                   = tcm_vhost_aborted_task,
        /*
         * Setup callers for generic logic in target_core_fabric_configfs.c
         */
index 27d3cf255e78f3045a3190cd0c1451e1a216d0f3..bd2172c2d650705a5e9e03fe4a251aefccc7b789 100644 (file)
@@ -347,7 +347,7 @@ struct backlight_device *backlight_device_register(const char *name,
 
        rc = device_register(&new_bd->dev);
        if (rc) {
-               kfree(new_bd);
+               put_device(&new_bd->dev);
                return ERR_PTR(rc);
        }
 
index 81fb12770c2a259f1ff2a3cc45103b6bc85b5d8a..a2eba12e1cb78b36fcfd96a0f38ce98bce2406a2 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
 #include <linux/platform_data/gpio_backlight.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
@@ -23,6 +25,7 @@ struct gpio_backlight {
 
        int gpio;
        int active;
+       int def_value;
 };
 
 static int gpio_backlight_update_status(struct backlight_device *bl)
@@ -60,6 +63,29 @@ static const struct backlight_ops gpio_backlight_ops = {
        .check_fb       = gpio_backlight_check_fb,
 };
 
+static int gpio_backlight_probe_dt(struct platform_device *pdev,
+                                  struct gpio_backlight *gbl)
+{
+       struct device_node *np = pdev->dev.of_node;
+       enum of_gpio_flags gpio_flags;
+
+       gbl->gpio = of_get_gpio_flags(np, 0, &gpio_flags);
+
+       if (!gpio_is_valid(gbl->gpio)) {
+               if (gbl->gpio != -EPROBE_DEFER) {
+                       dev_err(&pdev->dev,
+                               "Error: The gpios parameter is missing or invalid.\n");
+               }
+               return gbl->gpio;
+       }
+
+       gbl->active = (gpio_flags & OF_GPIO_ACTIVE_LOW) ? 0 : 1;
+
+       gbl->def_value = of_property_read_bool(np, "default-on");
+
+       return 0;
+}
+
 static int gpio_backlight_probe(struct platform_device *pdev)
 {
        struct gpio_backlight_platform_data *pdata =
@@ -67,10 +93,12 @@ static int gpio_backlight_probe(struct platform_device *pdev)
        struct backlight_properties props;
        struct backlight_device *bl;
        struct gpio_backlight *gbl;
+       struct device_node *np = pdev->dev.of_node;
        int ret;
 
-       if (!pdata) {
-               dev_err(&pdev->dev, "failed to find platform data\n");
+       if (!pdata && !np) {
+               dev_err(&pdev->dev,
+                       "failed to find platform data or device tree node.\n");
                return -ENODEV;
        }
 
@@ -79,14 +107,22 @@ static int gpio_backlight_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        gbl->dev = &pdev->dev;
-       gbl->fbdev = pdata->fbdev;
-       gbl->gpio = pdata->gpio;
-       gbl->active = pdata->active_low ? 0 : 1;
+
+       if (np) {
+               ret = gpio_backlight_probe_dt(pdev, gbl);
+               if (ret)
+                       return ret;
+       } else {
+               gbl->fbdev = pdata->fbdev;
+               gbl->gpio = pdata->gpio;
+               gbl->active = pdata->active_low ? 0 : 1;
+               gbl->def_value = pdata->def_value;
+       }
 
        ret = devm_gpio_request_one(gbl->dev, gbl->gpio, GPIOF_DIR_OUT |
                                    (gbl->active ? GPIOF_INIT_LOW
                                                 : GPIOF_INIT_HIGH),
-                                   pdata->name);
+                                   pdata ? pdata->name : "backlight");
        if (ret < 0) {
                dev_err(&pdev->dev, "unable to request GPIO\n");
                return ret;
@@ -103,17 +139,25 @@ static int gpio_backlight_probe(struct platform_device *pdev)
                return PTR_ERR(bl);
        }
 
-       bl->props.brightness = pdata->def_value;
+       bl->props.brightness = gbl->def_value;
        backlight_update_status(bl);
 
        platform_set_drvdata(pdev, bl);
        return 0;
 }
 
+#ifdef CONFIG_OF
+static struct of_device_id gpio_backlight_of_match[] = {
+       { .compatible = "gpio-backlight" },
+       { /* sentinel */ }
+};
+#endif
+
 static struct platform_driver gpio_backlight_driver = {
        .driver         = {
                .name           = "gpio-backlight",
                .owner          = THIS_MODULE,
+               .of_match_table = of_match_ptr(gpio_backlight_of_match),
        },
        .probe          = gpio_backlight_probe,
 };
index 6fd60adf922ede6393bcd153776bfeebebc998af..5f36808d214f0d27665ced0baaf87e4f9a95b6ff 100644 (file)
@@ -349,8 +349,9 @@ static int lm3639_probe(struct i2c_client *client,
        props.brightness = pdata->init_brt_led;
        props.max_brightness = pdata->max_brt_led;
        pchip->bled =
-           backlight_device_register("lm3639_bled", pchip->dev, pchip,
-                                     &lm3639_bled_ops, &props);
+           devm_backlight_device_register(pchip->dev, "lm3639_bled",
+                                          pchip->dev, pchip, &lm3639_bled_ops,
+                                          &props);
        if (IS_ERR(pchip->bled)) {
                dev_err(&client->dev, "fail : backlight register\n");
                ret = PTR_ERR(pchip->bled);
@@ -360,7 +361,7 @@ static int lm3639_probe(struct i2c_client *client,
        ret = device_create_file(&(pchip->bled->dev), &dev_attr_bled_mode);
        if (ret < 0) {
                dev_err(&client->dev, "failed : add sysfs entries\n");
-               goto err_bled_mode;
+               goto err_out;
        }
 
        /* flash */
@@ -391,8 +392,6 @@ err_torch:
        led_classdev_unregister(&pchip->cdev_flash);
 err_flash:
        device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
-err_bled_mode:
-       backlight_device_unregister(pchip->bled);
 err_out:
        return ret;
 }
@@ -407,10 +406,8 @@ static int lm3639_remove(struct i2c_client *client)
                led_classdev_unregister(&pchip->cdev_torch);
        if (&pchip->cdev_flash)
                led_classdev_unregister(&pchip->cdev_flash);
-       if (pchip->bled) {
+       if (pchip->bled)
                device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
-               backlight_device_unregister(pchip->bled);
-       }
        return 0;
 }
 
@@ -432,6 +429,6 @@ static struct i2c_driver lm3639_i2c_driver = {
 module_i2c_driver(lm3639_i2c_driver);
 
 MODULE_DESCRIPTION("Texas Instruments Backlight+Flash LED driver for LM3639");
-MODULE_AUTHOR("Daniel Jeong <daniel.jeong@ti.com>");
-MODULE_AUTHOR("G.Shark Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("Daniel Jeong <gshark.jeong@gmail.com>");
+MODULE_AUTHOR("Ldd Mlp <ldd-mlp@list.ti.com>");
 MODULE_LICENSE("GPL v2");
index 062a5f6a1448c6cff1cd1dc09e84db3f0d59cc27..12a3de0ee6dacbdea873ec9ea28bdd88d1ea999d 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -52,7 +52,8 @@
 struct aio_ring {
        unsigned        id;     /* kernel internal index number */
        unsigned        nr;     /* number of io_events */
-       unsigned        head;
+       unsigned        head;   /* Written to by userland or under ring_lock
+                                * mutex by aio_read_events_ring(). */
        unsigned        tail;
 
        unsigned        magic;
@@ -243,6 +244,11 @@ static void aio_free_ring(struct kioctx *ctx)
 {
        int i;
 
+       /* Disconnect the kiotx from the ring file.  This prevents future
+        * accesses to the kioctx from page migration.
+        */
+       put_aio_ring_file(ctx);
+
        for (i = 0; i < ctx->nr_pages; i++) {
                struct page *page;
                pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
@@ -254,8 +260,6 @@ static void aio_free_ring(struct kioctx *ctx)
                put_page(page);
        }
 
-       put_aio_ring_file(ctx);
-
        if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
                kfree(ctx->ring_pages);
                ctx->ring_pages = NULL;
@@ -283,29 +287,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
 {
        struct kioctx *ctx;
        unsigned long flags;
+       pgoff_t idx;
        int rc;
 
        rc = 0;
 
-       /* Make sure the old page hasn't already been changed */
+       /* mapping->private_lock here protects against the kioctx teardown.  */
        spin_lock(&mapping->private_lock);
        ctx = mapping->private_data;
-       if (ctx) {
-               pgoff_t idx;
-               spin_lock_irqsave(&ctx->completion_lock, flags);
-               idx = old->index;
-               if (idx < (pgoff_t)ctx->nr_pages) {
-                       if (ctx->ring_pages[idx] != old)
-                               rc = -EAGAIN;
-               } else
-                       rc = -EINVAL;
-               spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       if (!ctx) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       /* The ring_lock mutex.  The prevents aio_read_events() from writing
+        * to the ring's head, and prevents page migration from mucking in
+        * a partially initialized kiotx.
+        */
+       if (!mutex_trylock(&ctx->ring_lock)) {
+               rc = -EAGAIN;
+               goto out;
+       }
+
+       idx = old->index;
+       if (idx < (pgoff_t)ctx->nr_pages) {
+               /* Make sure the old page hasn't already been changed */
+               if (ctx->ring_pages[idx] != old)
+                       rc = -EAGAIN;
        } else
                rc = -EINVAL;
-       spin_unlock(&mapping->private_lock);
 
        if (rc != 0)
-               return rc;
+               goto out_unlock;
 
        /* Writeback must be complete */
        BUG_ON(PageWriteback(old));
@@ -314,38 +327,26 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
        rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
        if (rc != MIGRATEPAGE_SUCCESS) {
                put_page(new);
-               return rc;
+               goto out_unlock;
        }
 
-       /* We can potentially race against kioctx teardown here.  Use the
-        * address_space's private data lock to protect the mapping's
-        * private_data.
+       /* Take completion_lock to prevent other writes to the ring buffer
+        * while the old page is copied to the new.  This prevents new
+        * events from being lost.
         */
-       spin_lock(&mapping->private_lock);
-       ctx = mapping->private_data;
-       if (ctx) {
-               pgoff_t idx;
-               spin_lock_irqsave(&ctx->completion_lock, flags);
-               migrate_page_copy(new, old);
-               idx = old->index;
-               if (idx < (pgoff_t)ctx->nr_pages) {
-                       /* And only do the move if things haven't changed */
-                       if (ctx->ring_pages[idx] == old)
-                               ctx->ring_pages[idx] = new;
-                       else
-                               rc = -EAGAIN;
-               } else
-                       rc = -EINVAL;
-               spin_unlock_irqrestore(&ctx->completion_lock, flags);
-       } else
-               rc = -EBUSY;
-       spin_unlock(&mapping->private_lock);
+       spin_lock_irqsave(&ctx->completion_lock, flags);
+       migrate_page_copy(new, old);
+       BUG_ON(ctx->ring_pages[idx] != old);
+       ctx->ring_pages[idx] = new;
+       spin_unlock_irqrestore(&ctx->completion_lock, flags);
 
-       if (rc == MIGRATEPAGE_SUCCESS)
-               put_page(old);
-       else
-               put_page(new);
+       /* The old page is no longer accessible. */
+       put_page(old);
 
+out_unlock:
+       mutex_unlock(&ctx->ring_lock);
+out:
+       spin_unlock(&mapping->private_lock);
        return rc;
 }
 #endif
@@ -380,7 +381,7 @@ static int aio_setup_ring(struct kioctx *ctx)
        file = aio_private_file(ctx, nr_pages);
        if (IS_ERR(file)) {
                ctx->aio_ring_file = NULL;
-               return -EAGAIN;
+               return -ENOMEM;
        }
 
        ctx->aio_ring_file = file;
@@ -415,7 +416,7 @@ static int aio_setup_ring(struct kioctx *ctx)
 
        if (unlikely(i != nr_pages)) {
                aio_free_ring(ctx);
-               return -EAGAIN;
+               return -ENOMEM;
        }
 
        ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -429,7 +430,7 @@ static int aio_setup_ring(struct kioctx *ctx)
        if (IS_ERR((void *)ctx->mmap_base)) {
                ctx->mmap_size = 0;
                aio_free_ring(ctx);
-               return -EAGAIN;
+               return -ENOMEM;
        }
 
        pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
@@ -556,6 +557,10 @@ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
                                        rcu_read_unlock();
                                        spin_unlock(&mm->ioctx_lock);
 
+                                       /* While kioctx setup is in progress,
+                                        * we are protected from page migration
+                                        * changes ring_pages by ->ring_lock.
+                                        */
                                        ring = kmap_atomic(ctx->ring_pages[0]);
                                        ring->id = ctx->id;
                                        kunmap_atomic(ring);
@@ -640,24 +645,28 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
 
        ctx->max_reqs = nr_events;
 
-       if (percpu_ref_init(&ctx->users, free_ioctx_users))
-               goto err;
-
-       if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
-               goto err;
-
        spin_lock_init(&ctx->ctx_lock);
        spin_lock_init(&ctx->completion_lock);
        mutex_init(&ctx->ring_lock);
+       /* Protect against page migration throughout kiotx setup by keeping
+        * the ring_lock mutex held until setup is complete. */
+       mutex_lock(&ctx->ring_lock);
        init_waitqueue_head(&ctx->wait);
 
        INIT_LIST_HEAD(&ctx->active_reqs);
 
+       if (percpu_ref_init(&ctx->users, free_ioctx_users))
+               goto err;
+
+       if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
+               goto err;
+
        ctx->cpu = alloc_percpu(struct kioctx_cpu);
        if (!ctx->cpu)
                goto err;
 
-       if (aio_setup_ring(ctx) < 0)
+       err = aio_setup_ring(ctx);
+       if (err < 0)
                goto err;
 
        atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
@@ -683,6 +692,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
        if (err)
                goto err_cleanup;
 
+       /* Release the ring_lock mutex now that all setup is complete. */
+       mutex_unlock(&ctx->ring_lock);
+
        pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
                 ctx, ctx->user_id, mm, ctx->nr_events);
        return ctx;
@@ -692,6 +704,7 @@ err_cleanup:
 err_ctx:
        aio_free_ring(ctx);
 err:
+       mutex_unlock(&ctx->ring_lock);
        free_percpu(ctx->cpu);
        free_percpu(ctx->reqs.pcpu_count);
        free_percpu(ctx->users.pcpu_count);
@@ -1024,6 +1037,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
 
        mutex_lock(&ctx->ring_lock);
 
+       /* Access to ->ring_pages here is protected by ctx->ring_lock. */
        ring = kmap_atomic(ctx->ring_pages[0]);
        head = ring->head;
        tail = ring->tail;
index 29696b78d1f49f105914dca5ce0a981fafb63a9c..1c2ce0c8771133194ecb9d91517a7dd67c571765 100644 (file)
@@ -182,6 +182,9 @@ static int bdev_integrity_enabled(struct block_device *bdev, int rw)
  */
 int bio_integrity_enabled(struct bio *bio)
 {
+       if (!bio_is_rw(bio))
+               return 0;
+
        /* Already protected? */
        if (bio_integrity(bio))
                return 0;
@@ -309,10 +312,9 @@ static int bio_integrity_generate_verify(struct bio *bio, int operate)
 {
        struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
        struct blk_integrity_exchg bix;
-       struct bio_vec bv;
-       struct bvec_iter iter;
+       struct bio_vec *bv;
        sector_t sector;
-       unsigned int sectors, ret = 0;
+       unsigned int sectors, ret = 0, i;
        void *prot_buf = bio->bi_integrity->bip_buf;
 
        if (operate)
@@ -323,16 +325,16 @@ static int bio_integrity_generate_verify(struct bio *bio, int operate)
        bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
        bix.sector_size = bi->sector_size;
 
-       bio_for_each_segment(bv, bio, iter) {
-               void *kaddr = kmap_atomic(bv.bv_page);
-               bix.data_buf = kaddr + bv.bv_offset;
-               bix.data_size = bv.bv_len;
+       bio_for_each_segment_all(bv, bio, i) {
+               void *kaddr = kmap_atomic(bv->bv_page);
+               bix.data_buf = kaddr + bv->bv_offset;
+               bix.data_size = bv->bv_len;
                bix.prot_buf = prot_buf;
                bix.sector = sector;
 
-               if (operate) {
+               if (operate)
                        bi->generate_fn(&bix);
-               else {
+               else {
                        ret = bi->verify_fn(&bix);
                        if (ret) {
                                kunmap_atomic(kaddr);
@@ -340,7 +342,7 @@ static int bio_integrity_generate_verify(struct bio *bio, int operate)
                        }
                }
 
-               sectors = bv.bv_len / bi->sector_size;
+               sectors = bv->bv_len / bi->sector_size;
                sector += sectors;
                prot_buf += sectors * bi->tuple_size;
 
index b1bc722b89aa6b99a6e2c8dcf64aa4dbbd0d82ff..6f0362b77806c61909aa37433a9e77eb77476cff 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1002,7 +1002,7 @@ struct bio_map_data {
 };
 
 static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
-                            struct sg_iovec *iov, int iov_count,
+                            const struct sg_iovec *iov, int iov_count,
                             int is_our_pages)
 {
        memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
@@ -1022,7 +1022,7 @@ static struct bio_map_data *bio_alloc_map_data(int nr_segs,
                       sizeof(struct sg_iovec) * iov_count, gfp_mask);
 }
 
-static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
+static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count,
                          int to_user, int from_user, int do_free_page)
 {
        int ret = 0, i;
@@ -1120,7 +1120,7 @@ EXPORT_SYMBOL(bio_uncopy_user);
  */
 struct bio *bio_copy_user_iov(struct request_queue *q,
                              struct rq_map_data *map_data,
-                             struct sg_iovec *iov, int iov_count,
+                             const struct sg_iovec *iov, int iov_count,
                              int write_to_vm, gfp_t gfp_mask)
 {
        struct bio_map_data *bmd;
@@ -1259,7 +1259,7 @@ EXPORT_SYMBOL(bio_copy_user);
 
 static struct bio *__bio_map_user_iov(struct request_queue *q,
                                      struct block_device *bdev,
-                                     struct sg_iovec *iov, int iov_count,
+                                     const struct sg_iovec *iov, int iov_count,
                                      int write_to_vm, gfp_t gfp_mask)
 {
        int i, j;
@@ -1407,7 +1407,7 @@ EXPORT_SYMBOL(bio_map_user);
  *     device. Returns an error pointer in case of error.
  */
 struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev,
-                            struct sg_iovec *iov, int iov_count,
+                            const struct sg_iovec *iov, int iov_count,
                             int write_to_vm, gfp_t gfp_mask)
 {
        struct bio *bio;
index ba0d2b05bb787a28e59629a05586441e8b9386af..552a8d13bc321f4d1cf64fb9b3171893e28e73e9 100644 (file)
@@ -1518,7 +1518,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
        BUG_ON(iocb->ki_pos != pos);
 
        blk_start_plug(&plug);
-       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       ret = __generic_file_aio_write(iocb, iov, nr_segs);
        if (ret > 0) {
                ssize_t err;
 
index ecb5832c0967e96ffcb0084435f3a37f69f0d661..5a201d81049c09fcb280f26539d0ad5c6c5c5b51 100644 (file)
@@ -323,6 +323,8 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
 
 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
 {
+       if (!wq)
+               return;
        wq->normal->max_active = max;
        if (wq->high)
                wq->high->max_active = max;
index aad7201ad11bb767cbe760df38f88572fe788cf5..10db21fa09263786db6d094c64ee7b16a1c2c2ae 100644 (file)
@@ -330,7 +330,10 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                goto out;
        }
 
-       root_level = btrfs_old_root_level(root, time_seq);
+       if (path->search_commit_root)
+               root_level = btrfs_header_level(root->commit_root);
+       else
+               root_level = btrfs_old_root_level(root, time_seq);
 
        if (root_level + 1 == level) {
                srcu_read_unlock(&fs_info->subvol_srcu, index);
@@ -1099,9 +1102,9 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
  *
  * returns 0 on success, < 0 on error.
  */
-int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
-                               struct btrfs_fs_info *fs_info, u64 bytenr,
-                               u64 time_seq, struct ulist **roots)
+static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
+                                 struct btrfs_fs_info *fs_info, u64 bytenr,
+                                 u64 time_seq, struct ulist **roots)
 {
        struct ulist *tmp;
        struct ulist_node *node = NULL;
@@ -1137,6 +1140,20 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
+                        struct btrfs_fs_info *fs_info, u64 bytenr,
+                        u64 time_seq, struct ulist **roots)
+{
+       int ret;
+
+       if (!trans)
+               down_read(&fs_info->commit_root_sem);
+       ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots);
+       if (!trans)
+               up_read(&fs_info->commit_root_sem);
+       return ret;
+}
+
 /*
  * this makes the path point to (inum INODE_ITEM ioff)
  */
@@ -1516,6 +1533,8 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
                if (IS_ERR(trans))
                        return PTR_ERR(trans);
                btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
+       } else {
+               down_read(&fs_info->commit_root_sem);
        }
 
        ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
@@ -1526,8 +1545,8 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
 
        ULIST_ITER_INIT(&ref_uiter);
        while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
-               ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
-                                          tree_mod_seq_elem.seq, &roots);
+               ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val,
+                                            tree_mod_seq_elem.seq, &roots);
                if (ret)
                        break;
                ULIST_ITER_INIT(&root_uiter);
@@ -1549,6 +1568,8 @@ out:
        if (!search_commit_root) {
                btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
                btrfs_end_transaction(trans, fs_info->extent_root);
+       } else {
+               up_read(&fs_info->commit_root_sem);
        }
 
        return ret;
index 88d1b1eedc9cd3a9679758dc1d209e0bf88767aa..1bcfcdb23cf4c4b999e75bc0896466938aaaaeb7 100644 (file)
@@ -2769,9 +2769,13 @@ again:
                 * the commit roots are read only
                 * so we always do read locks
                 */
+               if (p->need_commit_sem)
+                       down_read(&root->fs_info->commit_root_sem);
                b = root->commit_root;
                extent_buffer_get(b);
                level = btrfs_header_level(b);
+               if (p->need_commit_sem)
+                       up_read(&root->fs_info->commit_root_sem);
                if (!p->skip_locking)
                        btrfs_tree_read_lock(b);
        } else {
@@ -5360,7 +5364,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
 {
        int ret;
        int cmp;
-       struct btrfs_trans_handle *trans = NULL;
        struct btrfs_path *left_path = NULL;
        struct btrfs_path *right_path = NULL;
        struct btrfs_key left_key;
@@ -5378,9 +5381,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
        u64 right_blockptr;
        u64 left_gen;
        u64 right_gen;
-       u64 left_start_ctransid;
-       u64 right_start_ctransid;
-       u64 ctransid;
 
        left_path = btrfs_alloc_path();
        if (!left_path) {
@@ -5404,21 +5404,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
        right_path->search_commit_root = 1;
        right_path->skip_locking = 1;
 
-       spin_lock(&left_root->root_item_lock);
-       left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
-       spin_unlock(&left_root->root_item_lock);
-
-       spin_lock(&right_root->root_item_lock);
-       right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
-       spin_unlock(&right_root->root_item_lock);
-
-       trans = btrfs_join_transaction(left_root);
-       if (IS_ERR(trans)) {
-               ret = PTR_ERR(trans);
-               trans = NULL;
-               goto out;
-       }
-
        /*
         * Strategy: Go to the first items of both trees. Then do
         *
@@ -5455,6 +5440,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
         *   the right if possible or go up and right.
         */
 
+       down_read(&left_root->fs_info->commit_root_sem);
        left_level = btrfs_header_level(left_root->commit_root);
        left_root_level = left_level;
        left_path->nodes[left_level] = left_root->commit_root;
@@ -5464,6 +5450,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
        right_root_level = right_level;
        right_path->nodes[right_level] = right_root->commit_root;
        extent_buffer_get(right_path->nodes[right_level]);
+       up_read(&left_root->fs_info->commit_root_sem);
 
        if (left_level == 0)
                btrfs_item_key_to_cpu(left_path->nodes[left_level],
@@ -5482,67 +5469,6 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
        advance_left = advance_right = 0;
 
        while (1) {
-               /*
-                * We need to make sure the transaction does not get committed
-                * while we do anything on commit roots. This means, we need to
-                * join and leave transactions for every item that we process.
-                */
-               if (trans && btrfs_should_end_transaction(trans, left_root)) {
-                       btrfs_release_path(left_path);
-                       btrfs_release_path(right_path);
-
-                       ret = btrfs_end_transaction(trans, left_root);
-                       trans = NULL;
-                       if (ret < 0)
-                               goto out;
-               }
-               /* now rejoin the transaction */
-               if (!trans) {
-                       trans = btrfs_join_transaction(left_root);
-                       if (IS_ERR(trans)) {
-                               ret = PTR_ERR(trans);
-                               trans = NULL;
-                               goto out;
-                       }
-
-                       spin_lock(&left_root->root_item_lock);
-                       ctransid = btrfs_root_ctransid(&left_root->root_item);
-                       spin_unlock(&left_root->root_item_lock);
-                       if (ctransid != left_start_ctransid)
-                               left_start_ctransid = 0;
-
-                       spin_lock(&right_root->root_item_lock);
-                       ctransid = btrfs_root_ctransid(&right_root->root_item);
-                       spin_unlock(&right_root->root_item_lock);
-                       if (ctransid != right_start_ctransid)
-                               right_start_ctransid = 0;
-
-                       if (!left_start_ctransid || !right_start_ctransid) {
-                               WARN(1, KERN_WARNING
-                                       "BTRFS: btrfs_compare_tree detected "
-                                       "a change in one of the trees while "
-                                       "iterating. This is probably a "
-                                       "bug.\n");
-                               ret = -EIO;
-                               goto out;
-                       }
-
-                       /*
-                        * the commit root may have changed, so start again
-                        * where we stopped
-                        */
-                       left_path->lowest_level = left_level;
-                       right_path->lowest_level = right_level;
-                       ret = btrfs_search_slot(NULL, left_root,
-                                       &left_key, left_path, 0, 0);
-                       if (ret < 0)
-                               goto out;
-                       ret = btrfs_search_slot(NULL, right_root,
-                                       &right_key, right_path, 0, 0);
-                       if (ret < 0)
-                               goto out;
-               }
-
                if (advance_left && !left_end_reached) {
                        ret = tree_advance(left_root, left_path, &left_level,
                                        left_root_level,
@@ -5672,14 +5598,6 @@ out:
        btrfs_free_path(left_path);
        btrfs_free_path(right_path);
        kfree(tmp_buf);
-
-       if (trans) {
-               if (!ret)
-                       ret = btrfs_end_transaction(trans, left_root);
-               else
-                       btrfs_end_transaction(trans, left_root);
-       }
-
        return ret;
 }
 
index bc96c03dd259836de717f2ccaabc7b9032d33a34..4c48df572bd65d74636df643c77acda204af5418 100644 (file)
@@ -609,6 +609,7 @@ struct btrfs_path {
        unsigned int skip_locking:1;
        unsigned int leave_spinning:1;
        unsigned int search_commit_root:1;
+       unsigned int need_commit_sem:1;
 };
 
 /*
@@ -986,7 +987,8 @@ struct btrfs_dev_replace_item {
 #define BTRFS_BLOCK_GROUP_RAID10       (1ULL << 6)
 #define BTRFS_BLOCK_GROUP_RAID5         (1ULL << 7)
 #define BTRFS_BLOCK_GROUP_RAID6         (1ULL << 8)
-#define BTRFS_BLOCK_GROUP_RESERVED     BTRFS_AVAIL_ALLOC_BIT_SINGLE
+#define BTRFS_BLOCK_GROUP_RESERVED     (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
+                                        BTRFS_SPACE_INFO_GLOBAL_RSV)
 
 enum btrfs_raid_types {
        BTRFS_RAID_RAID10,
@@ -1018,6 +1020,12 @@ enum btrfs_raid_types {
  */
 #define BTRFS_AVAIL_ALLOC_BIT_SINGLE   (1ULL << 48)
 
+/*
+ * A fake block group type that is used to communicate global block reserve
+ * size to userspace via the SPACE_INFO ioctl.
+ */
+#define BTRFS_SPACE_INFO_GLOBAL_RSV    (1ULL << 49)
+
 #define BTRFS_EXTENDED_PROFILE_MASK    (BTRFS_BLOCK_GROUP_PROFILE_MASK | \
                                         BTRFS_AVAIL_ALLOC_BIT_SINGLE)
 
@@ -1440,7 +1448,7 @@ struct btrfs_fs_info {
         */
        struct mutex ordered_extent_flush_mutex;
 
-       struct rw_semaphore extent_commit_sem;
+       struct rw_semaphore commit_root_sem;
 
        struct rw_semaphore cleanup_work_sem;
 
@@ -1711,7 +1719,6 @@ struct btrfs_root {
        struct btrfs_block_rsv *block_rsv;
 
        /* free ino cache stuff */
-       struct mutex fs_commit_mutex;
        struct btrfs_free_space_ctl *free_ino_ctl;
        enum btrfs_caching_type cached;
        spinlock_t cache_lock;
index bd0f752b797ba3c3f9f5eedb9186b304b0ac1433..029d46c2e17048a20a02e7cb34955d22ff80e752 100644 (file)
@@ -329,6 +329,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
 {
        struct extent_state *cached_state = NULL;
        int ret;
+       bool need_lock = (current->journal_info ==
+                         (void *)BTRFS_SEND_TRANS_STUB);
 
        if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
                return 0;
@@ -336,6 +338,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
        if (atomic)
                return -EAGAIN;
 
+       if (need_lock) {
+               btrfs_tree_read_lock(eb);
+               btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+       }
+
        lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1,
                         0, &cached_state);
        if (extent_buffer_uptodate(eb) &&
@@ -347,10 +354,21 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
                       "found %llu\n",
                       eb->start, parent_transid, btrfs_header_generation(eb));
        ret = 1;
-       clear_extent_buffer_uptodate(eb);
+
+       /*
+        * Things reading via commit roots that don't have normal protection,
+        * like send, can have a really old block in cache that may point at a
+        * block that has been free'd and re-allocated.  So don't clear uptodate
+        * if we find an eb that is under IO (dirty/writeback) because we could
+        * end up reading in the stale data and then writing it back out and
+        * making everybody very sad.
+        */
+       if (!extent_buffer_under_io(eb))
+               clear_extent_buffer_uptodate(eb);
 out:
        unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
                             &cached_state, GFP_NOFS);
+       btrfs_tree_read_unlock_blocking(eb);
        return ret;
 }
 
@@ -1546,7 +1564,6 @@ int btrfs_init_fs_root(struct btrfs_root *root)
        root->subv_writers = writers;
 
        btrfs_init_free_ino_ctl(root);
-       mutex_init(&root->fs_commit_mutex);
        spin_lock_init(&root->cache_lock);
        init_waitqueue_head(&root->cache_wait);
 
@@ -2324,7 +2341,7 @@ int open_ctree(struct super_block *sb,
        mutex_init(&fs_info->transaction_kthread_mutex);
        mutex_init(&fs_info->cleaner_mutex);
        mutex_init(&fs_info->volume_mutex);
-       init_rwsem(&fs_info->extent_commit_sem);
+       init_rwsem(&fs_info->commit_root_sem);
        init_rwsem(&fs_info->cleanup_work_sem);
        init_rwsem(&fs_info->subvol_sem);
        sema_init(&fs_info->uuid_tree_rescan_sem, 1);
index c6b6a6e3e735ce73bf06a735a9ee85533d94e4bf..1306487c82cf6a05c8c528f8851fbe70d84c1f80 100644 (file)
@@ -419,7 +419,7 @@ static noinline void caching_thread(struct btrfs_work *work)
 again:
        mutex_lock(&caching_ctl->mutex);
        /* need to make sure the commit_root doesn't disappear */
-       down_read(&fs_info->extent_commit_sem);
+       down_read(&fs_info->commit_root_sem);
 
 next:
        ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
@@ -443,10 +443,10 @@ next:
                                break;
 
                        if (need_resched() ||
-                           rwsem_is_contended(&fs_info->extent_commit_sem)) {
+                           rwsem_is_contended(&fs_info->commit_root_sem)) {
                                caching_ctl->progress = last;
                                btrfs_release_path(path);
-                               up_read(&fs_info->extent_commit_sem);
+                               up_read(&fs_info->commit_root_sem);
                                mutex_unlock(&caching_ctl->mutex);
                                cond_resched();
                                goto again;
@@ -513,7 +513,7 @@ next:
 
 err:
        btrfs_free_path(path);
-       up_read(&fs_info->extent_commit_sem);
+       up_read(&fs_info->commit_root_sem);
 
        free_excluded_extents(extent_root, block_group);
 
@@ -633,10 +633,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
                return 0;
        }
 
-       down_write(&fs_info->extent_commit_sem);
+       down_write(&fs_info->commit_root_sem);
        atomic_inc(&caching_ctl->count);
        list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
-       up_write(&fs_info->extent_commit_sem);
+       up_write(&fs_info->commit_root_sem);
 
        btrfs_get_block_group(cache);
 
@@ -2444,7 +2444,8 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                        spin_unlock(&locked_ref->lock);
                        spin_lock(&delayed_refs->lock);
                        spin_lock(&locked_ref->lock);
-                       if (rb_first(&locked_ref->ref_root)) {
+                       if (rb_first(&locked_ref->ref_root) ||
+                           locked_ref->extent_op) {
                                spin_unlock(&locked_ref->lock);
                                spin_unlock(&delayed_refs->lock);
                                continue;
@@ -5470,7 +5471,7 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
        struct btrfs_block_group_cache *cache;
        struct btrfs_space_info *space_info;
 
-       down_write(&fs_info->extent_commit_sem);
+       down_write(&fs_info->commit_root_sem);
 
        list_for_each_entry_safe(caching_ctl, next,
                                 &fs_info->caching_block_groups, list) {
@@ -5489,7 +5490,7 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
        else
                fs_info->pinned_extents = &fs_info->freed_extents[0];
 
-       up_write(&fs_info->extent_commit_sem);
+       up_write(&fs_info->commit_root_sem);
 
        list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
                percpu_counter_set(&space_info->total_bytes_pinned, 0);
@@ -5744,6 +5745,8 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                        "unable to find ref byte nr %llu parent %llu root %llu  owner %llu offset %llu",
                        bytenr, parent, root_objectid, owner_objectid,
                        owner_offset);
+               btrfs_abort_transaction(trans, extent_root, ret);
+               goto out;
        } else {
                btrfs_abort_transaction(trans, extent_root, ret);
                goto out;
@@ -8255,14 +8258,14 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
        struct btrfs_caching_control *caching_ctl;
        struct rb_node *n;
 
-       down_write(&info->extent_commit_sem);
+       down_write(&info->commit_root_sem);
        while (!list_empty(&info->caching_block_groups)) {
                caching_ctl = list_entry(info->caching_block_groups.next,
                                         struct btrfs_caching_control, list);
                list_del(&caching_ctl->list);
                put_caching_control(caching_ctl);
        }
-       up_write(&info->extent_commit_sem);
+       up_write(&info->commit_root_sem);
 
        spin_lock(&info->block_group_cache_lock);
        while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
@@ -8336,9 +8339,15 @@ static void __link_block_group(struct btrfs_space_info *space_info,
                               struct btrfs_block_group_cache *cache)
 {
        int index = get_block_group_index(cache);
+       bool first = false;
 
        down_write(&space_info->groups_sem);
-       if (list_empty(&space_info->block_groups[index])) {
+       if (list_empty(&space_info->block_groups[index]))
+               first = true;
+       list_add_tail(&cache->list, &space_info->block_groups[index]);
+       up_write(&space_info->groups_sem);
+
+       if (first) {
                struct kobject *kobj = &space_info->block_group_kobjs[index];
                int ret;
 
@@ -8350,8 +8359,6 @@ static void __link_block_group(struct btrfs_space_info *space_info,
                        kobject_put(&space_info->kobj);
                }
        }
-       list_add_tail(&cache->list, &space_info->block_groups[index]);
-       up_write(&space_info->groups_sem);
 }
 
 static struct btrfs_block_group_cache *
index ae69a00387e75149f2bca5565787ec2440b5275b..3955e475ceece295ea9db55def2178a319a817cc 100644 (file)
@@ -749,6 +749,7 @@ again:
                 * our range starts
                 */
                node = tree_search(tree, start);
+process_node:
                if (!node)
                        break;
 
@@ -769,7 +770,10 @@ again:
                if (start > end)
                        break;
 
-               cond_resched_lock(&tree->lock);
+               if (!cond_resched_lock(&tree->lock)) {
+                       node = rb_next(node);
+                       goto process_node;
+               }
        }
 out:
        spin_unlock(&tree->lock);
@@ -4306,7 +4310,7 @@ static void __free_extent_buffer(struct extent_buffer *eb)
        kmem_cache_free(extent_buffer_cache, eb);
 }
 
-static int extent_buffer_under_io(struct extent_buffer *eb)
+int extent_buffer_under_io(struct extent_buffer *eb)
 {
        return (atomic_read(&eb->io_pages) ||
                test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) ||
index 58b27e5ab52158a2d03a1bf7d7d612878489a9ac..c488b45237bf82c544471338d3f13ba6a1ab41d0 100644 (file)
@@ -320,6 +320,7 @@ int set_extent_buffer_dirty(struct extent_buffer *eb);
 int set_extent_buffer_uptodate(struct extent_buffer *eb);
 int clear_extent_buffer_uptodate(struct extent_buffer *eb);
 int extent_buffer_uptodate(struct extent_buffer *eb);
+int extent_buffer_under_io(struct extent_buffer *eb);
 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
                      unsigned long min_len, char **map,
                      unsigned long *map_start,
index c660527af83880e56877de2a703526c3f4d18622..eb742c07e7a41aacdb595b0252a12b3584bbee83 100644 (file)
@@ -425,13 +425,8 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
                struct page *page = prepared_pages[pg];
                /*
                 * Copy data from userspace to the current page
-                *
-                * Disable pagefault to avoid recursive lock since
-                * the pages are already locked
                 */
-               pagefault_disable();
                copied = iov_iter_copy_from_user_atomic(page, i, offset, count);
-               pagefault_enable();
 
                /* Flush processor's dcache for this page */
                flush_dcache_page(page);
@@ -1665,7 +1660,7 @@ again:
 static ssize_t __btrfs_direct_write(struct kiocb *iocb,
                                    const struct iovec *iov,
                                    unsigned long nr_segs, loff_t pos,
-                                   loff_t *ppos, size_t count, size_t ocount)
+                                   size_t count, size_t ocount)
 {
        struct file *file = iocb->ki_filp;
        struct iov_iter i;
@@ -1674,7 +1669,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
        loff_t endbyte;
        int err;
 
-       written = generic_file_direct_write(iocb, iov, &nr_segs, pos, ppos,
+       written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
                                            count, ocount);
 
        if (written < 0 || written == count)
@@ -1693,7 +1688,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
        if (err)
                goto out;
        written += written_buffered;
-       *ppos = pos + written_buffered;
+       iocb->ki_pos = pos + written_buffered;
        invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT,
                                 endbyte >> PAGE_CACHE_SHIFT);
 out:
@@ -1725,8 +1720,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       loff_t *ppos = &iocb->ki_pos;
        u64 start_pos;
+       u64 end_pos;
        ssize_t num_written = 0;
        ssize_t err = 0;
        size_t count, ocount;
@@ -1781,7 +1776,9 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
 
        start_pos = round_down(pos, root->sectorsize);
        if (start_pos > i_size_read(inode)) {
-               err = btrfs_cont_expand(inode, i_size_read(inode), start_pos);
+               /* Expand hole size to cover write data, preventing empty gap */
+               end_pos = round_up(pos + iov->iov_len, root->sectorsize);
+               err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
                if (err) {
                        mutex_unlock(&inode->i_mutex);
                        goto out;
@@ -1793,7 +1790,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
 
        if (unlikely(file->f_flags & O_DIRECT)) {
                num_written = __btrfs_direct_write(iocb, iov, nr_segs,
-                                                  pos, ppos, count, ocount);
+                                                  pos, count, ocount);
        } else {
                struct iov_iter i;
 
@@ -1801,7 +1798,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
 
                num_written = __btrfs_buffered_write(file, &i, pos);
                if (num_written > 0)
-                       *ppos = pos + num_written;
+                       iocb->ki_pos = pos + num_written;
        }
 
        mutex_unlock(&inode->i_mutex);
index ab485e57b6fe6c7ea14f580976dd46c7efd88ffb..cc8ca193d830f62ec5202933f49d9867b1427969 100644 (file)
@@ -55,7 +55,7 @@ static int caching_kthread(void *data)
        key.type = BTRFS_INODE_ITEM_KEY;
 again:
        /* need to make sure the commit_root doesn't disappear */
-       mutex_lock(&root->fs_commit_mutex);
+       down_read(&fs_info->commit_root_sem);
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
@@ -88,7 +88,7 @@ again:
                                btrfs_item_key_to_cpu(leaf, &key, 0);
                                btrfs_release_path(path);
                                root->cache_progress = last;
-                               mutex_unlock(&root->fs_commit_mutex);
+                               up_read(&fs_info->commit_root_sem);
                                schedule_timeout(1);
                                goto again;
                        } else
@@ -127,7 +127,7 @@ next:
        btrfs_unpin_free_ino(root);
 out:
        wake_up(&root->cache_wait);
-       mutex_unlock(&root->fs_commit_mutex);
+       up_read(&fs_info->commit_root_sem);
 
        btrfs_free_path(path);
 
@@ -223,11 +223,11 @@ again:
                 * or the caching work is done.
                 */
 
-               mutex_lock(&root->fs_commit_mutex);
+               down_write(&root->fs_info->commit_root_sem);
                spin_lock(&root->cache_lock);
                if (root->cached == BTRFS_CACHE_FINISHED) {
                        spin_unlock(&root->cache_lock);
-                       mutex_unlock(&root->fs_commit_mutex);
+                       up_write(&root->fs_info->commit_root_sem);
                        goto again;
                }
                spin_unlock(&root->cache_lock);
@@ -240,7 +240,7 @@ again:
                else
                        __btrfs_add_free_space(pinned, objectid, 1);
 
-               mutex_unlock(&root->fs_commit_mutex);
+               up_write(&root->fs_info->commit_root_sem);
        }
 }
 
@@ -250,7 +250,7 @@ again:
  * and others will just be dropped, because the commit root we were
  * searching has changed.
  *
- * Must be called with root->fs_commit_mutex held
+ * Must be called with root->fs_info->commit_root_sem held
  */
 void btrfs_unpin_free_ino(struct btrfs_root *root)
 {
index 06e9a4152b1419c6e4afbd4fd3580615ffc4eadd..5f805bc944fad00127cf812a922e54ccd9f7b396 100644 (file)
@@ -394,6 +394,14 @@ static noinline int compress_file_range(struct inode *inode,
            (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
                btrfs_add_inode_defrag(NULL, inode);
 
+       /*
+        * skip compression for a small file range(<=blocksize) that
+        * isn't an inline extent, since it dosen't save disk space at all.
+        */
+       if ((end - start + 1) <= blocksize &&
+           (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
+               goto cleanup_and_bail_uncompressed;
+
        actual_end = min_t(u64, isize, end + 1);
 again:
        will_compress = 0;
@@ -1270,6 +1278,15 @@ next_slot:
                        disk_bytenr += extent_offset;
                        disk_bytenr += cur_offset - found_key.offset;
                        num_bytes = min(end + 1, extent_end) - cur_offset;
+                       /*
+                        * if there are pending snapshots for this root,
+                        * we fall into common COW way.
+                        */
+                       if (!nolock) {
+                               err = btrfs_start_nocow_write(root);
+                               if (!err)
+                                       goto out_check;
+                       }
                        /*
                         * force cow if csum exists in the range.
                         * this ensure that csum for a given extent are
@@ -1289,6 +1306,8 @@ next_slot:
 out_check:
                if (extent_end <= start) {
                        path->slots[0]++;
+                       if (!nolock && nocow)
+                               btrfs_end_nocow_write(root);
                        goto next_slot;
                }
                if (!nocow) {
@@ -1306,8 +1325,11 @@ out_check:
                        ret = cow_file_range(inode, locked_page,
                                             cow_start, found_key.offset - 1,
                                             page_started, nr_written, 1);
-                       if (ret)
+                       if (ret) {
+                               if (!nolock && nocow)
+                                       btrfs_end_nocow_write(root);
                                goto error;
+                       }
                        cow_start = (u64)-1;
                }
 
@@ -1354,8 +1376,11 @@ out_check:
                    BTRFS_DATA_RELOC_TREE_OBJECTID) {
                        ret = btrfs_reloc_clone_csums(inode, cur_offset,
                                                      num_bytes);
-                       if (ret)
+                       if (ret) {
+                               if (!nolock && nocow)
+                                       btrfs_end_nocow_write(root);
                                goto error;
+                       }
                }
 
                extent_clear_unlock_delalloc(inode, cur_offset,
@@ -1363,6 +1388,8 @@ out_check:
                                             locked_page, EXTENT_LOCKED |
                                             EXTENT_DELALLOC, PAGE_UNLOCK |
                                             PAGE_SET_PRIVATE2);
+               if (!nolock && nocow)
+                       btrfs_end_nocow_write(root);
                cur_offset = extent_end;
                if (cur_offset > end)
                        break;
@@ -8476,19 +8503,20 @@ static int __start_delalloc_inodes(struct btrfs_root *root, int delay_iput,
                        else
                                iput(inode);
                        ret = -ENOMEM;
-                       break;
+                       goto out;
                }
                list_add_tail(&work->list, &works);
                btrfs_queue_work(root->fs_info->flush_workers,
                                 &work->work);
                ret++;
                if (nr != -1 && ret >= nr)
-                       break;
+                       goto out;
                cond_resched();
                spin_lock(&root->delalloc_lock);
        }
        spin_unlock(&root->delalloc_lock);
 
+out:
        list_for_each_entry_safe(work, next, &works, list) {
                list_del_init(&work->list);
                btrfs_wait_and_free_delalloc_work(work);
index 0401397b5c92787eba43d9dbc3097a0dfc2dbdac..e79ff6b90cb71bb131426b97838c369ae0e6f48c 100644 (file)
@@ -1472,6 +1472,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
        struct btrfs_trans_handle *trans;
        struct btrfs_device *device = NULL;
        char *sizestr;
+       char *retptr;
        char *devstr = NULL;
        int ret = 0;
        int mod = 0;
@@ -1539,8 +1540,8 @@ static noinline int btrfs_ioctl_resize(struct file *file,
                        mod = 1;
                        sizestr++;
                }
-               new_size = memparse(sizestr, NULL);
-               if (new_size == 0) {
+               new_size = memparse(sizestr, &retptr);
+               if (*retptr != '\0' || new_size == 0) {
                        ret = -EINVAL;
                        goto out_free;
                }
@@ -3140,8 +3141,9 @@ process_slot:
                                                         new_key.offset + datal,
                                                         1);
                                if (ret) {
-                                       btrfs_abort_transaction(trans, root,
-                                                               ret);
+                                       if (ret != -EINVAL)
+                                               btrfs_abort_transaction(trans,
+                                                       root, ret);
                                        btrfs_end_transaction(trans, root);
                                        goto out;
                                }
@@ -3538,6 +3540,11 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
                up_read(&info->groups_sem);
        }
 
+       /*
+        * Global block reserve, exported as a space_info
+        */
+       slot_count++;
+
        /* space_slots == 0 means they are asking for a count */
        if (space_args.space_slots == 0) {
                space_args.total_spaces = slot_count;
@@ -3596,6 +3603,21 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
                up_read(&info->groups_sem);
        }
 
+       /*
+        * Add global block reserve
+        */
+       if (slot_count) {
+               struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
+
+               spin_lock(&block_rsv->lock);
+               space.total_bytes = block_rsv->size;
+               space.used_bytes = block_rsv->size - block_rsv->reserved;
+               spin_unlock(&block_rsv->lock);
+               space.flags = BTRFS_SPACE_INFO_GLOBAL_RSV;
+               memcpy(dest, &space, sizeof(space));
+               space_args.total_spaces++;
+       }
+
        user_dest = (struct btrfs_ioctl_space_info __user *)
                (arg + sizeof(struct btrfs_ioctl_space_args));
 
@@ -4531,9 +4553,8 @@ static long btrfs_ioctl_set_received_subvol_32(struct file *file,
        }
 
        args64 = kmalloc(sizeof(*args64), GFP_NOFS);
-       if (IS_ERR(args64)) {
-               ret = PTR_ERR(args64);
-               args64 = NULL;
+       if (!args64) {
+               ret = -ENOMEM;
                goto out;
        }
 
index def428a25b2ab915f1331fccf4d8e98d62509ec7..7f92ab1daa87d3dc80137488405769016f1929a0 100644 (file)
@@ -2317,7 +2317,6 @@ void free_reloc_roots(struct list_head *list)
 static noinline_for_stack
 int merge_reloc_roots(struct reloc_control *rc)
 {
-       struct btrfs_trans_handle *trans;
        struct btrfs_root *root;
        struct btrfs_root *reloc_root;
        u64 last_snap;
@@ -2375,26 +2374,6 @@ again:
                                list_add_tail(&reloc_root->root_list,
                                              &reloc_roots);
                        goto out;
-               } else if (!ret) {
-                       /*
-                        * recover the last snapshot tranid to avoid
-                        * the space balance break NOCOW.
-                        */
-                       root = read_fs_root(rc->extent_root->fs_info,
-                                           objectid);
-                       if (IS_ERR(root))
-                               continue;
-
-                       trans = btrfs_join_transaction(root);
-                       BUG_ON(IS_ERR(trans));
-
-                       /* Check if the fs/file tree was snapshoted or not. */
-                       if (btrfs_root_last_snapshot(&root->root_item) ==
-                           otransid - 1)
-                               btrfs_set_root_last_snapshot(&root->root_item,
-                                                            last_snap);
-                               
-                       btrfs_end_transaction(trans, root);
                }
        }
 
index 93e6d717284477d02b54c290dd6956af4a23038b..0be77993378e2b99cf4f44acff6b7dbe36f9d517 100644 (file)
@@ -2235,6 +2235,47 @@ behind_scrub_pages:
        return 0;
 }
 
+/*
+ * Given a physical address, this will calculate it's
+ * logical offset. if this is a parity stripe, it will return
+ * the most left data stripe's logical offset.
+ *
+ * return 0 if it is a data stripe, 1 means parity stripe.
+ */
+static int get_raid56_logic_offset(u64 physical, int num,
+                                  struct map_lookup *map, u64 *offset)
+{
+       int i;
+       int j = 0;
+       u64 stripe_nr;
+       u64 last_offset;
+       int stripe_index;
+       int rot;
+
+       last_offset = (physical - map->stripes[num].physical) *
+                     nr_data_stripes(map);
+       *offset = last_offset;
+       for (i = 0; i < nr_data_stripes(map); i++) {
+               *offset = last_offset + i * map->stripe_len;
+
+               stripe_nr = *offset;
+               do_div(stripe_nr, map->stripe_len);
+               do_div(stripe_nr, nr_data_stripes(map));
+
+               /* Work out the disk rotation on this stripe-set */
+               rot = do_div(stripe_nr, map->num_stripes);
+               /* calculate which stripe this data locates */
+               rot += i;
+               stripe_index = rot % map->num_stripes;
+               if (stripe_index == num)
+                       return 0;
+               if (stripe_index < num)
+                       j++;
+       }
+       *offset = last_offset + j * map->stripe_len;
+       return 1;
+}
+
 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
                                           struct map_lookup *map,
                                           struct btrfs_device *scrub_dev,
@@ -2256,6 +2297,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        u64 physical;
        u64 logical;
        u64 logic_end;
+       u64 physical_end;
        u64 generation;
        int mirror_num;
        struct reada_control *reada1;
@@ -2269,16 +2311,10 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        u64 extent_len;
        struct btrfs_device *extent_dev;
        int extent_mirror_num;
-       int stop_loop;
-
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_RAID6)) {
-               if (num >= nr_data_stripes(map)) {
-                       return 0;
-               }
-       }
+       int stop_loop = 0;
 
        nstripes = length;
+       physical = map->stripes[num].physical;
        offset = 0;
        do_div(nstripes, map->stripe_len);
        if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
@@ -2296,6 +2332,11 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
                increment = map->stripe_len;
                mirror_num = num % map->num_stripes + 1;
+       } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                               BTRFS_BLOCK_GROUP_RAID6)) {
+               get_raid56_logic_offset(physical, num, map, &offset);
+               increment = map->stripe_len * nr_data_stripes(map);
+               mirror_num = 1;
        } else {
                increment = map->stripe_len;
                mirror_num = 1;
@@ -2319,7 +2360,15 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
         * to not hold off transaction commits
         */
        logical = base + offset;
-
+       physical_end = physical + nstripes * map->stripe_len;
+       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                        BTRFS_BLOCK_GROUP_RAID6)) {
+               get_raid56_logic_offset(physical_end, num,
+                                       map, &logic_end);
+               logic_end += base;
+       } else {
+               logic_end = logical + increment * nstripes;
+       }
        wait_event(sctx->list_wait,
                   atomic_read(&sctx->bios_in_flight) == 0);
        scrub_blocked_if_needed(fs_info);
@@ -2328,7 +2377,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        key_start.objectid = logical;
        key_start.type = BTRFS_EXTENT_ITEM_KEY;
        key_start.offset = (u64)0;
-       key_end.objectid = base + offset + nstripes * increment;
+       key_end.objectid = logic_end;
        key_end.type = BTRFS_METADATA_ITEM_KEY;
        key_end.offset = (u64)-1;
        reada1 = btrfs_reada_add(root, &key_start, &key_end);
@@ -2338,7 +2387,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        key_start.offset = logical;
        key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
        key_end.type = BTRFS_EXTENT_CSUM_KEY;
-       key_end.offset = base + offset + nstripes * increment;
+       key_end.offset = logic_end;
        reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
 
        if (!IS_ERR(reada1))
@@ -2356,11 +2405,17 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        /*
         * now find all extents for each stripe and scrub them
         */
-       logical = base + offset;
-       physical = map->stripes[num].physical;
-       logic_end = logical + increment * nstripes;
        ret = 0;
-       while (logical < logic_end) {
+       while (physical < physical_end) {
+               /* for raid56, we skip parity stripe */
+               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                               BTRFS_BLOCK_GROUP_RAID6)) {
+                       ret = get_raid56_logic_offset(physical, num,
+                                       map, &logical);
+                       logical += base;
+                       if (ret)
+                               goto skip;
+               }
                /*
                 * canceled?
                 */
@@ -2504,15 +2559,29 @@ again:
                        scrub_free_csums(sctx);
                        if (extent_logical + extent_len <
                            key.objectid + bytes) {
-                               logical += increment;
-                               physical += map->stripe_len;
-
+                               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+                                       BTRFS_BLOCK_GROUP_RAID6)) {
+                                       /*
+                                        * loop until we find next data stripe
+                                        * or we have finished all stripes.
+                                        */
+                                       do {
+                                               physical += map->stripe_len;
+                                               ret = get_raid56_logic_offset(
+                                                               physical, num,
+                                                               map, &logical);
+                                               logical += base;
+                                       } while (physical < physical_end && ret);
+                               } else {
+                                       physical += map->stripe_len;
+                                       logical += increment;
+                               }
                                if (logical < key.objectid + bytes) {
                                        cond_resched();
                                        goto again;
                                }
 
-                               if (logical >= logic_end) {
+                               if (physical >= physical_end) {
                                        stop_loop = 1;
                                        break;
                                }
@@ -2521,6 +2590,7 @@ next:
                        path->slots[0]++;
                }
                btrfs_release_path(path);
+skip:
                logical += increment;
                physical += map->stripe_len;
                spin_lock(&sctx->stat_lock);
index 9b6da9d55f9a8778c47cf64c15a143951fc2ccf7..1ac3ca98c4294ae54781f97931278386e8bb4bcc 100644 (file)
@@ -493,6 +493,7 @@ static struct btrfs_path *alloc_path_for_send(void)
                return NULL;
        path->search_commit_root = 1;
        path->skip_locking = 1;
+       path->need_commit_sem = 1;
        return path;
 }
 
@@ -771,29 +772,22 @@ out:
 /*
  * Helper function to retrieve some fields from an inode item.
  */
-static int get_inode_info(struct btrfs_root *root,
-                         u64 ino, u64 *size, u64 *gen,
-                         u64 *mode, u64 *uid, u64 *gid,
-                         u64 *rdev)
+static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
+                         u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
+                         u64 *gid, u64 *rdev)
 {
        int ret;
        struct btrfs_inode_item *ii;
        struct btrfs_key key;
-       struct btrfs_path *path;
-
-       path = alloc_path_for_send();
-       if (!path)
-               return -ENOMEM;
 
        key.objectid = ino;
        key.type = BTRFS_INODE_ITEM_KEY;
        key.offset = 0;
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-       if (ret < 0)
-               goto out;
        if (ret) {
-               ret = -ENOENT;
-               goto out;
+               if (ret > 0)
+                       ret = -ENOENT;
+               return ret;
        }
 
        ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -811,7 +805,22 @@ static int get_inode_info(struct btrfs_root *root,
        if (rdev)
                *rdev = btrfs_inode_rdev(path->nodes[0], ii);
 
-out:
+       return ret;
+}
+
+static int get_inode_info(struct btrfs_root *root,
+                         u64 ino, u64 *size, u64 *gen,
+                         u64 *mode, u64 *uid, u64 *gid,
+                         u64 *rdev)
+{
+       struct btrfs_path *path;
+       int ret;
+
+       path = alloc_path_for_send();
+       if (!path)
+               return -ENOMEM;
+       ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
+                              rdev);
        btrfs_free_path(path);
        return ret;
 }
@@ -1085,6 +1094,7 @@ out:
 struct backref_ctx {
        struct send_ctx *sctx;
 
+       struct btrfs_path *path;
        /* number of total found references */
        u64 found;
 
@@ -1155,8 +1165,9 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
         * There are inodes that have extents that lie behind its i_size. Don't
         * accept clones from these extents.
         */
-       ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL,
-                       NULL);
+       ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
+                              NULL, NULL, NULL);
+       btrfs_release_path(bctx->path);
        if (ret < 0)
                return ret;
 
@@ -1235,12 +1246,17 @@ static int find_extent_clone(struct send_ctx *sctx,
        if (!tmp_path)
                return -ENOMEM;
 
+       /* We only use this path under the commit sem */
+       tmp_path->need_commit_sem = 0;
+
        backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
        if (!backref_ctx) {
                ret = -ENOMEM;
                goto out;
        }
 
+       backref_ctx->path = tmp_path;
+
        if (data_offset >= ino_size) {
                /*
                 * There may be extents that lie behind the file's size.
@@ -1268,8 +1284,10 @@ static int find_extent_clone(struct send_ctx *sctx,
        }
        logical = disk_byte + btrfs_file_extent_offset(eb, fi);
 
+       down_read(&sctx->send_root->fs_info->commit_root_sem);
        ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path,
                                  &found_key, &flags);
+       up_read(&sctx->send_root->fs_info->commit_root_sem);
        btrfs_release_path(tmp_path);
 
        if (ret < 0)
@@ -4418,14 +4436,14 @@ static int send_hole(struct send_ctx *sctx, u64 end)
        p = fs_path_alloc();
        if (!p)
                return -ENOMEM;
+       ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+       if (ret < 0)
+               goto tlv_put_failure;
        memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
        while (offset < end) {
                len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
 
                ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
-               if (ret < 0)
-                       break;
-               ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
                if (ret < 0)
                        break;
                TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
@@ -4968,7 +4986,9 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
 
        if (S_ISREG(sctx->cur_inode_mode)) {
                if (need_send_hole(sctx)) {
-                       if (sctx->cur_inode_last_extent == (u64)-1) {
+                       if (sctx->cur_inode_last_extent == (u64)-1 ||
+                           sctx->cur_inode_last_extent <
+                           sctx->cur_inode_size) {
                                ret = get_last_extent(sctx, (u64)-1);
                                if (ret)
                                        goto out;
@@ -5367,57 +5387,21 @@ out:
 static int full_send_tree(struct send_ctx *sctx)
 {
        int ret;
-       struct btrfs_trans_handle *trans = NULL;
        struct btrfs_root *send_root = sctx->send_root;
        struct btrfs_key key;
        struct btrfs_key found_key;
        struct btrfs_path *path;
        struct extent_buffer *eb;
        int slot;
-       u64 start_ctransid;
-       u64 ctransid;
 
        path = alloc_path_for_send();
        if (!path)
                return -ENOMEM;
 
-       spin_lock(&send_root->root_item_lock);
-       start_ctransid = btrfs_root_ctransid(&send_root->root_item);
-       spin_unlock(&send_root->root_item_lock);
-
        key.objectid = BTRFS_FIRST_FREE_OBJECTID;
        key.type = BTRFS_INODE_ITEM_KEY;
        key.offset = 0;
 
-join_trans:
-       /*
-        * We need to make sure the transaction does not get committed
-        * while we do anything on commit roots. Join a transaction to prevent
-        * this.
-        */
-       trans = btrfs_join_transaction(send_root);
-       if (IS_ERR(trans)) {
-               ret = PTR_ERR(trans);
-               trans = NULL;
-               goto out;
-       }
-
-       /*
-        * Make sure the tree has not changed after re-joining. We detect this
-        * by comparing start_ctransid and ctransid. They should always match.
-        */
-       spin_lock(&send_root->root_item_lock);
-       ctransid = btrfs_root_ctransid(&send_root->root_item);
-       spin_unlock(&send_root->root_item_lock);
-
-       if (ctransid != start_ctransid) {
-               WARN(1, KERN_WARNING "BTRFS: the root that you're trying to "
-                                    "send was modified in between. This is "
-                                    "probably a bug.\n");
-               ret = -EIO;
-               goto out;
-       }
-
        ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
        if (ret < 0)
                goto out;
@@ -5425,19 +5409,6 @@ join_trans:
                goto out_finish;
 
        while (1) {
-               /*
-                * When someone want to commit while we iterate, end the
-                * joined transaction and rejoin.
-                */
-               if (btrfs_should_end_transaction(trans, send_root)) {
-                       ret = btrfs_end_transaction(trans, send_root);
-                       trans = NULL;
-                       if (ret < 0)
-                               goto out;
-                       btrfs_release_path(path);
-                       goto join_trans;
-               }
-
                eb = path->nodes[0];
                slot = path->slots[0];
                btrfs_item_key_to_cpu(eb, &found_key, slot);
@@ -5465,12 +5436,6 @@ out_finish:
 
 out:
        btrfs_free_path(path);
-       if (trans) {
-               if (!ret)
-                       ret = btrfs_end_transaction(trans, send_root);
-               else
-                       btrfs_end_transaction(trans, send_root);
-       }
        return ret;
 }
 
@@ -5718,7 +5683,9 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
                        NULL);
        sort_clone_roots = 1;
 
+       current->journal_info = (void *)BTRFS_SEND_TRANS_STUB;
        ret = send_subvol(sctx);
+       current->journal_info = NULL;
        if (ret < 0)
                goto out;
 
index 9dbf4239515308a480b3cdb2cfc48b4b38812811..5011aadacab8e4cf1ac291f061f3c6ada24f8b9a 100644 (file)
@@ -66,6 +66,8 @@
 static const struct super_operations btrfs_super_ops;
 static struct file_system_type btrfs_fs_type;
 
+static int btrfs_remount(struct super_block *sb, int *flags, char *data);
+
 static const char *btrfs_decode_error(int errno)
 {
        char *errstr = "unknown";
@@ -1185,6 +1187,26 @@ static struct dentry *mount_subvol(const char *subvol_name, int flags,
        mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name,
                             newargs);
        kfree(newargs);
+
+       if (PTR_RET(mnt) == -EBUSY) {
+               if (flags & MS_RDONLY) {
+                       mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY, device_name,
+                                            newargs);
+               } else {
+                       int r;
+                       mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY, device_name,
+                                            newargs);
+                       if (IS_ERR(mnt))
+                               return ERR_CAST(mnt);
+
+                       r = btrfs_remount(mnt->mnt_sb, &flags, NULL);
+                       if (r < 0) {
+                               /* FIXME: release vfsmount mnt ??*/
+                               return ERR_PTR(r);
+                       }
+               }
+       }
+
        if (IS_ERR(mnt))
                return ERR_CAST(mnt);
 
index a04707f740d6aed809ccbf477cfc3305dfe5c44f..7579f6d0b8549525e6460f36af8666e1ca980a23 100644 (file)
@@ -75,10 +75,21 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
        }
 }
 
-static noinline void switch_commit_root(struct btrfs_root *root)
+static noinline void switch_commit_roots(struct btrfs_transaction *trans,
+                                        struct btrfs_fs_info *fs_info)
 {
-       free_extent_buffer(root->commit_root);
-       root->commit_root = btrfs_root_node(root);
+       struct btrfs_root *root, *tmp;
+
+       down_write(&fs_info->commit_root_sem);
+       list_for_each_entry_safe(root, tmp, &trans->switch_commits,
+                                dirty_list) {
+               list_del_init(&root->dirty_list);
+               free_extent_buffer(root->commit_root);
+               root->commit_root = btrfs_root_node(root);
+               if (is_fstree(root->objectid))
+                       btrfs_unpin_free_ino(root);
+       }
+       up_write(&fs_info->commit_root_sem);
 }
 
 static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
@@ -208,6 +219,7 @@ loop:
        INIT_LIST_HEAD(&cur_trans->pending_snapshots);
        INIT_LIST_HEAD(&cur_trans->ordered_operations);
        INIT_LIST_HEAD(&cur_trans->pending_chunks);
+       INIT_LIST_HEAD(&cur_trans->switch_commits);
        list_add_tail(&cur_trans->list, &fs_info->trans_list);
        extent_io_tree_init(&cur_trans->dirty_pages,
                             fs_info->btree_inode->i_mapping);
@@ -375,7 +387,8 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
        if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
                return ERR_PTR(-EROFS);
 
-       if (current->journal_info) {
+       if (current->journal_info &&
+           current->journal_info != (void *)BTRFS_SEND_TRANS_STUB) {
                WARN_ON(type & TRANS_EXTWRITERS);
                h = current->journal_info;
                h->use_count++;
@@ -919,9 +932,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
                        return ret;
        }
 
-       if (root != root->fs_info->extent_root)
-               switch_commit_root(root);
-
        return 0;
 }
 
@@ -977,15 +987,16 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
                list_del_init(next);
                root = list_entry(next, struct btrfs_root, dirty_list);
 
+               if (root != fs_info->extent_root)
+                       list_add_tail(&root->dirty_list,
+                                     &trans->transaction->switch_commits);
                ret = update_cowonly_root(trans, root);
                if (ret)
                        return ret;
        }
 
-       down_write(&fs_info->extent_commit_sem);
-       switch_commit_root(fs_info->extent_root);
-       up_write(&fs_info->extent_commit_sem);
-
+       list_add_tail(&fs_info->extent_root->dirty_list,
+                     &trans->transaction->switch_commits);
        btrfs_after_dev_replace_commit(fs_info);
 
        return 0;
@@ -1042,11 +1053,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
                        smp_wmb();
 
                        if (root->commit_root != root->node) {
-                               mutex_lock(&root->fs_commit_mutex);
-                               switch_commit_root(root);
-                               btrfs_unpin_free_ino(root);
-                               mutex_unlock(&root->fs_commit_mutex);
-
+                               list_add_tail(&root->dirty_list,
+                                       &trans->transaction->switch_commits);
                                btrfs_set_root_node(&root->root_item,
                                                    root->node);
                        }
@@ -1857,11 +1865,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        btrfs_set_root_node(&root->fs_info->tree_root->root_item,
                            root->fs_info->tree_root->node);
-       switch_commit_root(root->fs_info->tree_root);
+       list_add_tail(&root->fs_info->tree_root->dirty_list,
+                     &cur_trans->switch_commits);
 
        btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
                            root->fs_info->chunk_root->node);
-       switch_commit_root(root->fs_info->chunk_root);
+       list_add_tail(&root->fs_info->chunk_root->dirty_list,
+                     &cur_trans->switch_commits);
+
+       switch_commit_roots(cur_trans, root->fs_info);
 
        assert_qgroups_uptodate(trans);
        update_super_roots(root);
index 6ac037e9f9f0557524b7ed29b6b72a768f7a23cf..b57b924e8e03ee66fd98bc24b0e53de672be5b3b 100644 (file)
@@ -57,6 +57,7 @@ struct btrfs_transaction {
        struct list_head pending_snapshots;
        struct list_head ordered_operations;
        struct list_head pending_chunks;
+       struct list_head switch_commits;
        struct btrfs_delayed_ref_root delayed_refs;
        int aborted;
 };
@@ -78,6 +79,8 @@ struct btrfs_transaction {
 #define TRANS_EXTWRITERS       (__TRANS_USERSPACE | __TRANS_START |    \
                                 __TRANS_ATTACH)
 
+#define BTRFS_SEND_TRANS_STUB  1
+
 struct btrfs_trans_handle {
        u64 transid;
        u64 bytes_reserved;
index d241130a32fddfd351237b8a14f0982f46b7e2e2..49d7fab73360ce53b6bf50555ad6a308495953fa 100644 (file)
@@ -448,6 +448,14 @@ static void pending_bios_fn(struct btrfs_work *work)
        run_scheduled_bios(device);
 }
 
+/*
+ * Add new device to list of registered devices
+ *
+ * Returns:
+ * 1   - first time device is seen
+ * 0   - device already known
+ * < 0 - error
+ */
 static noinline int device_list_add(const char *path,
                           struct btrfs_super_block *disk_super,
                           u64 devid, struct btrfs_fs_devices **fs_devices_ret)
@@ -455,6 +463,7 @@ static noinline int device_list_add(const char *path,
        struct btrfs_device *device;
        struct btrfs_fs_devices *fs_devices;
        struct rcu_string *name;
+       int ret = 0;
        u64 found_transid = btrfs_super_generation(disk_super);
 
        fs_devices = find_fsid(disk_super->fsid);
@@ -495,6 +504,7 @@ static noinline int device_list_add(const char *path,
                fs_devices->num_devices++;
                mutex_unlock(&fs_devices->device_list_mutex);
 
+               ret = 1;
                device->fs_devices = fs_devices;
        } else if (!device->name || strcmp(device->name->str, path)) {
                name = rcu_string_strdup(path, GFP_NOFS);
@@ -513,7 +523,8 @@ static noinline int device_list_add(const char *path,
                fs_devices->latest_trans = found_transid;
        }
        *fs_devices_ret = fs_devices;
-       return 0;
+
+       return ret;
 }
 
 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
@@ -910,17 +921,19 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
        transid = btrfs_super_generation(disk_super);
        total_devices = btrfs_super_num_devices(disk_super);
 
-       if (disk_super->label[0]) {
-               if (disk_super->label[BTRFS_LABEL_SIZE - 1])
-                       disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
-               printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
-       } else {
-               printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
-       }
-
-       printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
-
        ret = device_list_add(path, disk_super, devid, fs_devices_ret);
+       if (ret > 0) {
+               if (disk_super->label[0]) {
+                       if (disk_super->label[BTRFS_LABEL_SIZE - 1])
+                               disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
+                       printk(KERN_INFO "BTRFS: device label %s ", disk_super->label);
+               } else {
+                       printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid);
+               }
+
+               printk(KERN_CONT "devid %llu transid %llu %s\n", devid, transid, path);
+               ret = 0;
+       }
        if (!ret && fs_devices_ret)
                (*fs_devices_ret)->total_devices = total_devices;
 
index 8c53a2b15ecbaffcc19ab5f45b53c19174998c91..9ddb9fc7d923fa31299a8aba228f61973d3b429f 100644 (file)
@@ -2114,8 +2114,8 @@ EXPORT_SYMBOL(generic_write_end);
  * Returns true if all buffers which correspond to a file portion
  * we want to read are uptodate.
  */
-int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
-                                       unsigned long from)
+int block_is_partially_uptodate(struct page *page, unsigned long from,
+                                       unsigned long count)
 {
        unsigned block_start, block_end, blocksize;
        unsigned to;
@@ -2127,7 +2127,7 @@ int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
 
        head = page_buffers(page);
        blocksize = head->b_size;
-       to = min_t(unsigned, PAGE_CACHE_SIZE - from, desc->count);
+       to = min_t(unsigned, PAGE_CACHE_SIZE - from, count);
        to = from + to;
        if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize)
                return 0;
index 622f4696e48435f47759ab35869ce692cc697188..5b99bafc31d13011801bb1ab3002b59f6c41cad0 100644 (file)
@@ -124,7 +124,6 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
        /* check parameters */
        ret = -EOPNOTSUPP;
        if (!root->d_inode ||
-           !root->d_inode->i_op ||
            !root->d_inode->i_op->lookup ||
            !root->d_inode->i_op->mkdir ||
            !root->d_inode->i_op->setxattr ||
index 6494d9f673aa51490a59694d600bd8a7101559bf..c0a681705104fc7a8aae428169e3f342bb4cb8a4 100644 (file)
@@ -779,8 +779,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
        }
 
        ret = -EPERM;
-       if (!subdir->d_inode->i_op ||
-           !subdir->d_inode->i_op->setxattr ||
+       if (!subdir->d_inode->i_op->setxattr ||
            !subdir->d_inode->i_op->getxattr ||
            !subdir->d_inode->i_op->lookup ||
            !subdir->d_inode->i_op->mkdir ||
index 66075a4ad97900edbfaf98775d484c31c7496200..39da1c2efa5030216d18bc6bb3020a78afb4c5f6 100644 (file)
@@ -601,7 +601,7 @@ ceph_sync_direct_write(struct kiocb *iocb, const struct iovec *iov,
                                            false);
                if (IS_ERR(req)) {
                        ret = PTR_ERR(req);
-                       goto out;
+                       break;
                }
 
                num_pages = calc_pages_for(page_align, len);
@@ -719,7 +719,7 @@ static ssize_t ceph_sync_write(struct kiocb *iocb, const struct iovec *iov,
                                            false);
                if (IS_ERR(req)) {
                        ret = PTR_ERR(req);
-                       goto out;
+                       break;
                }
 
                /*
@@ -972,6 +972,7 @@ retry_snap:
                }
        } else {
                loff_t old_size = inode->i_size;
+               struct iov_iter from;
                /*
                 * No need to acquire the i_truncate_mutex. Because
                 * the MDS revokes Fwb caps before sending truncate
@@ -979,9 +980,10 @@ retry_snap:
                 * are pending vmtruncate. So write and vmtruncate
                 * can not run at the same time
                 */
-               written = generic_file_buffered_write(iocb, iov, nr_segs,
-                                                     pos, &iocb->ki_pos,
-                                                     count, 0);
+               iov_iter_init(&from, iov, nr_segs, count, 0);
+               written = generic_perform_write(file, &from, pos);
+               if (likely(written >= 0))
+                       iocb->ki_pos = pos + written;
                if (inode->i_size > old_size)
                        ceph_fscache_update_objectsize(inode);
                mutex_unlock(&inode->i_mutex);
index efbe082892920333e6d2c703fa58f0a275e3d87b..fdf941b44ff103a2590a3804aa850e468ec980d6 100644 (file)
@@ -1,9 +1,8 @@
+#include <linux/ceph/ceph_debug.h>
 #include <linux/in.h>
 
 #include "super.h"
 #include "mds_client.h"
-#include <linux/ceph/ceph_debug.h>
-
 #include "ioctl.h"
 
 
index 2c70cbe35d39c3b7df79dd0b7e2b3fde6401f8fe..df9c9141c0998383522b181664b9d57256bc99fa 100644 (file)
@@ -850,7 +850,6 @@ const struct inode_operations cifs_file_inode_ops = {
 /*     revalidate:cifs_revalidate, */
        .setattr = cifs_setattr,
        .getattr = cifs_getattr, /* do we need this anymore? */
-       .rename = cifs_rename,
        .permission = cifs_permission,
 #ifdef CONFIG_CIFS_XATTR
        .setxattr = cifs_setxattr,
index 216d7e99f9219317bd0f2567c898925df77ec68d..8807442c94dd3323cbb7f9f8283c4f1c1a2d1480 100644 (file)
@@ -2579,19 +2579,32 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
        struct cifsInodeInfo *cinode = CIFS_I(inode);
        struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
        ssize_t rc = -EACCES;
-       loff_t lock_pos = pos;
+       loff_t lock_pos = iocb->ki_pos;
 
-       if (file->f_flags & O_APPEND)
-               lock_pos = i_size_read(inode);
        /*
         * We need to hold the sem to be sure nobody modifies lock list
         * with a brlock that prevents writing.
         */
        down_read(&cinode->lock_sem);
+       mutex_lock(&inode->i_mutex);
+       if (file->f_flags & O_APPEND)
+               lock_pos = i_size_read(inode);
        if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs),
                                     server->vals->exclusive_lock_type, NULL,
-                                    CIFS_WRITE_OP))
-               rc = generic_file_aio_write(iocb, iov, nr_segs, pos);
+                                    CIFS_WRITE_OP)) {
+               rc = __generic_file_aio_write(iocb, iov, nr_segs);
+               mutex_unlock(&inode->i_mutex);
+
+               if (rc > 0) {
+                       ssize_t err;
+
+                       err = generic_write_sync(file, iocb->ki_pos - rc, rc);
+                       if (rc < 0)
+                               rc = err;
+               }
+       } else {
+               mutex_unlock(&inode->i_mutex);
+       }
        up_read(&cinode->lock_sem);
        return rc;
 }
@@ -2727,56 +2740,27 @@ cifs_retry_async_readv(struct cifs_readdata *rdata)
 /**
  * cifs_readdata_to_iov - copy data from pages in response to an iovec
  * @rdata:     the readdata response with list of pages holding data
- * @iov:       vector in which we should copy the data
- * @nr_segs:   number of segments in vector
- * @offset:    offset into file of the first iovec
- * @copied:    used to return the amount of data copied to the iov
+ * @iter:      destination for our data
  *
  * This function copies data from a list of pages in a readdata response into
  * an array of iovecs. It will first calculate where the data should go
  * based on the info in the readdata and then copy the data into that spot.
  */
-static ssize_t
-cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
-                       unsigned long nr_segs, loff_t offset, ssize_t *copied)
+static int
+cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
 {
-       int rc = 0;
-       struct iov_iter ii;
-       size_t pos = rdata->offset - offset;
-       ssize_t remaining = rdata->bytes;
-       unsigned char *pdata;
+       size_t remaining = rdata->bytes;
        unsigned int i;
 
-       /* set up iov_iter and advance to the correct offset */
-       iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
-       iov_iter_advance(&ii, pos);
-
-       *copied = 0;
        for (i = 0; i < rdata->nr_pages; i++) {
-               ssize_t copy;
                struct page *page = rdata->pages[i];
-
-               /* copy a whole page or whatever's left */
-               copy = min_t(ssize_t, remaining, PAGE_SIZE);
-
-               /* ...but limit it to whatever space is left in the iov */
-               copy = min_t(ssize_t, copy, iov_iter_count(&ii));
-
-               /* go while there's data to be copied and no errors */
-               if (copy && !rc) {
-                       pdata = kmap(page);
-                       rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
-                                               (int)copy);
-                       kunmap(page);
-                       if (!rc) {
-                               *copied += copy;
-                               remaining -= copy;
-                               iov_iter_advance(&ii, copy);
-                       }
-               }
+               size_t copy = min(remaining, PAGE_SIZE);
+               size_t written = copy_page_to_iter(page, 0, copy, iter);
+               remaining -= written;
+               if (written < copy && iov_iter_count(iter) > 0)
+                       break;
        }
-
-       return rc;
+       return remaining ? -EFAULT : 0;
 }
 
 static void
@@ -2837,20 +2821,21 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
        return total_read > 0 ? total_read : result;
 }
 
-static ssize_t
-cifs_iovec_read(struct file *file, const struct iovec *iov,
-                unsigned long nr_segs, loff_t *poffset)
+ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
+                              unsigned long nr_segs, loff_t pos)
 {
+       struct file *file = iocb->ki_filp;
        ssize_t rc;
        size_t len, cur_len;
        ssize_t total_read = 0;
-       loff_t offset = *poffset;
+       loff_t offset = pos;
        unsigned int npages;
        struct cifs_sb_info *cifs_sb;
        struct cifs_tcon *tcon;
        struct cifsFileInfo *open_file;
        struct cifs_readdata *rdata, *tmp;
        struct list_head rdata_list;
+       struct iov_iter to;
        pid_t pid;
 
        if (!nr_segs)
@@ -2860,6 +2845,8 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
        if (!len)
                return 0;
 
+       iov_iter_init(&to, iov, nr_segs, len, 0);
+
        INIT_LIST_HEAD(&rdata_list);
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
        open_file = file->private_data;
@@ -2917,55 +2904,44 @@ error:
        if (!list_empty(&rdata_list))
                rc = 0;
 
+       len = iov_iter_count(&to);
        /* the loop below should proceed in the order of increasing offsets */
-restart_loop:
        list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
+       again:
                if (!rc) {
-                       ssize_t copied;
-
                        /* FIXME: freezable sleep too? */
                        rc = wait_for_completion_killable(&rdata->done);
                        if (rc)
                                rc = -EINTR;
-                       else if (rdata->result)
+                       else if (rdata->result) {
                                rc = rdata->result;
-                       else {
-                               rc = cifs_readdata_to_iov(rdata, iov,
-                                                       nr_segs, *poffset,
-                                                       &copied);
-                               total_read += copied;
+                               /* resend call if it's a retryable error */
+                               if (rc == -EAGAIN) {
+                                       rc = cifs_retry_async_readv(rdata);
+                                       goto again;
+                               }
+                       } else {
+                               rc = cifs_readdata_to_iov(rdata, &to);
                        }
 
-                       /* resend call if it's a retryable error */
-                       if (rc == -EAGAIN) {
-                               rc = cifs_retry_async_readv(rdata);
-                               goto restart_loop;
-                       }
                }
                list_del_init(&rdata->list);
                kref_put(&rdata->refcount, cifs_uncached_readdata_release);
        }
 
+       total_read = len - iov_iter_count(&to);
+
        cifs_stats_bytes_read(tcon, total_read);
-       *poffset += total_read;
 
        /* mask nodata case */
        if (rc == -ENODATA)
                rc = 0;
 
-       return total_read ? total_read : rc;
-}
-
-ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
-                              unsigned long nr_segs, loff_t pos)
-{
-       ssize_t read;
-
-       read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos);
-       if (read > 0)
-               iocb->ki_pos = pos;
-
-       return read;
+       if (total_read) {
+               iocb->ki_pos = pos + total_read;
+               return total_read;
+       }
+       return rc;
 }
 
 ssize_t
index 9e81c630dfa76469cdd452e179882403a93b03c8..476f3ebf437ef40ddd7432200080825b7e9e992c 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -813,7 +813,7 @@ EXPORT_SYMBOL(kernel_read);
 
 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
 {
-       ssize_t res = file->f_op->read(file, (void __user *)addr, len, &pos);
+       ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
        if (res > 0)
                flush_icache_range(addr, addr + len);
        return res;
index 7682b970d0f1352cd019c8076ea55cc470ecd523..4e2c032ab8a184f31334f3c6e3be915a258ff794 100644 (file)
 #undef ORE_DBGMSG2
 #define ORE_DBGMSG2 ORE_DBGMSG
 
-struct page *_raid_page_alloc(void)
+static struct page *_raid_page_alloc(void)
 {
        return alloc_page(GFP_KERNEL);
 }
 
-void _raid_page_free(struct page *p)
+static void _raid_page_free(struct page *p)
 {
        __free_page(p);
 }
index 9d9763328734e63e27ce6ef6f364518cb7f602c0..ed73ed8ebbeea878073c6e6f6043df9e11bda1c4 100644 (file)
@@ -543,7 +543,7 @@ static int exofs_devs_2_odi(struct exofs_dt_device_info *dt_dev,
        return !(odi->systemid_len || odi->osdname_len);
 }
 
-int __alloc_dev_table(struct exofs_sb_info *sbi, unsigned numdevs,
+static int __alloc_dev_table(struct exofs_sb_info *sbi, unsigned numdevs,
                      struct exofs_dev **peds)
 {
        struct __alloc_ore_devs_and_exofs_devs {
index 4e508fc83dcf1b0b9b2934e4cf69506e063b0f6e..ca7502d89fdee07b96585c768854375b207daaf6 100644 (file)
@@ -146,7 +146,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
                        overwrite = 1;
        }
 
-       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       ret = __generic_file_aio_write(iocb, iov, nr_segs);
        mutex_unlock(&inode->i_mutex);
 
        if (ret > 0) {
index b61293badfb1a9c98742a5bcc790751979251741..8f294cfac69749024c2c2e19d4b156755130e9ed 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
 
 int sysctl_nr_open __read_mostly = 1024*1024;
 int sysctl_nr_open_min = BITS_PER_LONG;
-int sysctl_nr_open_max = 1024 * 1024; /* raised later */
+/* our max() is unusable in constant expressions ;-/ */
+#define __const_max(x, y) ((x) < (y) ? (x) : (y))
+int sysctl_nr_open_max = __const_max(INT_MAX, ~(size_t)0/sizeof(void *)) &
+                        -BITS_PER_LONG;
 
 static void *alloc_fdmem(size_t size)
 {
@@ -429,12 +432,6 @@ void exit_files(struct task_struct *tsk)
        }
 }
 
-void __init files_defer_init(void)
-{
-       sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) &
-                            -BITS_PER_LONG;
-}
-
 struct files_struct init_files = {
        .count          = ATOMIC_INIT(1),
        .fdt            = &init_files.fdtab,
index 01071c4d752e1e41099c8082a31a55305c5c647d..a374f5033e97bab814977f8375f5c8cb1c899228 100644 (file)
@@ -52,7 +52,6 @@ static void file_free_rcu(struct rcu_head *head)
 static inline void file_free(struct file *f)
 {
        percpu_counter_dec(&nr_files);
-       file_check_state(f);
        call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
 }
 
@@ -178,47 +177,12 @@ struct file *alloc_file(struct path *path, fmode_t mode,
        file->f_mapping = path->dentry->d_inode->i_mapping;
        file->f_mode = mode;
        file->f_op = fop;
-
-       /*
-        * These mounts don't really matter in practice
-        * for r/o bind mounts.  They aren't userspace-
-        * visible.  We do this for consistency, and so
-        * that we can do debugging checks at __fput()
-        */
-       if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) {
-               file_take_write(file);
-               WARN_ON(mnt_clone_write(path->mnt));
-       }
        if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
                i_readcount_inc(path->dentry->d_inode);
        return file;
 }
 EXPORT_SYMBOL(alloc_file);
 
-/**
- * drop_file_write_access - give up ability to write to a file
- * @file: the file to which we will stop writing
- *
- * This is a central place which will give up the ability
- * to write to @file, along with access to write through
- * its vfsmount.
- */
-static void drop_file_write_access(struct file *file)
-{
-       struct vfsmount *mnt = file->f_path.mnt;
-       struct dentry *dentry = file->f_path.dentry;
-       struct inode *inode = dentry->d_inode;
-
-       put_write_access(inode);
-
-       if (special_file(inode->i_mode))
-               return;
-       if (file_check_writeable(file) != 0)
-               return;
-       __mnt_drop_write(mnt);
-       file_release_write(file);
-}
-
 /* the real guts of fput() - releasing the last reference to file
  */
 static void __fput(struct file *file)
@@ -253,8 +217,10 @@ static void __fput(struct file *file)
        put_pid(file->f_owner.pid);
        if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
                i_readcount_dec(inode);
-       if (file->f_mode & FMODE_WRITE)
-               drop_file_write_access(file);
+       if (file->f_mode & FMODE_WRITER) {
+               put_write_access(inode);
+               __mnt_drop_write(mnt);
+       }
        file->f_path.dentry = NULL;
        file->f_path.mnt = NULL;
        file->f_inode = NULL;
@@ -359,6 +325,5 @@ void __init files_init(unsigned long mempages)
 
        n = (mempages * (PAGE_SIZE / 1024)) / 10;
        files_stat.max_files = max_t(unsigned long, n, NR_FILE);
-       files_defer_init();
        percpu_counter_init(&nr_files, 0);
 } 
index 0a648bb455ae70b5a84e55ad9209c5894822d1be..aac71ce373e4494fa879c6cc268964a7f7641a37 100644 (file)
@@ -667,15 +667,15 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
                struct pipe_buffer *buf = cs->currbuf;
 
                if (!cs->write) {
-                       buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
+                       kunmap_atomic(cs->mapaddr);
                } else {
-                       kunmap(buf->page);
+                       kunmap_atomic(cs->mapaddr);
                        buf->len = PAGE_SIZE - cs->len;
                }
                cs->currbuf = NULL;
                cs->mapaddr = NULL;
        } else if (cs->mapaddr) {
-               kunmap(cs->pg);
+               kunmap_atomic(cs->mapaddr);
                if (cs->write) {
                        flush_dcache_page(cs->pg);
                        set_page_dirty_lock(cs->pg);
@@ -706,7 +706,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
 
                        BUG_ON(!cs->nr_segs);
                        cs->currbuf = buf;
-                       cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
+                       cs->mapaddr = kmap_atomic(buf->page);
                        cs->len = buf->len;
                        cs->buf = cs->mapaddr + buf->offset;
                        cs->pipebufs++;
@@ -726,7 +726,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                        buf->len = 0;
 
                        cs->currbuf = buf;
-                       cs->mapaddr = kmap(page);
+                       cs->mapaddr = kmap_atomic(page);
                        cs->buf = cs->mapaddr;
                        cs->len = PAGE_SIZE;
                        cs->pipebufs++;
@@ -745,7 +745,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                        return err;
                BUG_ON(err != 1);
                offset = cs->addr % PAGE_SIZE;
-               cs->mapaddr = kmap(cs->pg);
+               cs->mapaddr = kmap_atomic(cs->pg);
                cs->buf = cs->mapaddr + offset;
                cs->len = min(PAGE_SIZE - offset, cs->seglen);
                cs->seglen -= cs->len;
@@ -874,7 +874,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
 out_fallback_unlock:
        unlock_page(newpage);
 out_fallback:
-       cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
+       cs->mapaddr = kmap_atomic(buf->page);
        cs->buf = cs->mapaddr + buf->offset;
 
        err = lock_request(cs->fc, cs->req);
index 48992cac714b413f644dbdd7c97ba87355831677..13f8bdec5110d1a7db12b2a262bb5e2ecb0e4f82 100644 (file)
@@ -1086,9 +1086,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
                if (mapping_writably_mapped(mapping))
                        flush_dcache_page(page);
 
-               pagefault_disable();
                tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
-               pagefault_enable();
                flush_dcache_page(page);
 
                mark_page_accessed(page);
@@ -1237,8 +1235,7 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                goto out;
 
        if (file->f_flags & O_DIRECT) {
-               written = generic_file_direct_write(iocb, iov, &nr_segs,
-                                                   pos, &iocb->ki_pos,
+               written = generic_file_direct_write(iocb, iov, &nr_segs, pos, 
                                                    count, ocount);
                if (written < 0 || written == count)
                        goto out;
index b29e42f05f3442b887e58ca1972a9333ec474bd7..d55297f2fa058c18512d0136b8a5437b29bc481c 100644 (file)
@@ -10,7 +10,7 @@ struct mnt_namespace {
        struct user_namespace   *user_ns;
        u64                     seq;    /* Sequence number to prevent loops */
        wait_queue_head_t poll;
-       int event;
+       u64 event;
 };
 
 struct mnt_pcp {
@@ -104,6 +104,9 @@ struct proc_mounts {
        struct mnt_namespace *ns;
        struct path root;
        int (*show)(struct seq_file *, struct vfsmount *);
+       void *cached_mount;
+       u64 cached_event;
+       loff_t cached_index;
 };
 
 #define proc_mounts(p) (container_of((p), struct proc_mounts, m))
index 88339f59efb5d9b3691f5ebbad7e5ef6eb59c4db..c6157c894fce234c333d5a2d787f81ee3e5e7ba9 100644 (file)
@@ -358,6 +358,7 @@ int generic_permission(struct inode *inode, int mask)
 
        return -EACCES;
 }
+EXPORT_SYMBOL(generic_permission);
 
 /*
  * We _really_ want to just do "generic_permission()" without
@@ -455,6 +456,7 @@ int inode_permission(struct inode *inode, int mask)
                return retval;
        return __inode_permission(inode, mask);
 }
+EXPORT_SYMBOL(inode_permission);
 
 /**
  * path_get - get a reference to a path
@@ -924,6 +926,7 @@ int follow_up(struct path *path)
        path->mnt = &parent->mnt;
        return 1;
 }
+EXPORT_SYMBOL(follow_up);
 
 /*
  * Perform an automount
@@ -1085,6 +1088,7 @@ int follow_down_one(struct path *path)
        }
        return 0;
 }
+EXPORT_SYMBOL(follow_down_one);
 
 static inline bool managed_dentry_might_block(struct dentry *dentry)
 {
@@ -1223,6 +1227,7 @@ int follow_down(struct path *path)
        }
        return 0;
 }
+EXPORT_SYMBOL(follow_down);
 
 /*
  * Skip to top of mountpoint pile in refwalk mode for follow_dotdot()
@@ -2025,6 +2030,7 @@ int kern_path(const char *name, unsigned int flags, struct path *path)
                *path = nd.path;
        return res;
 }
+EXPORT_SYMBOL(kern_path);
 
 /**
  * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair
@@ -2049,6 +2055,7 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
                *path = nd.path;
        return err;
 }
+EXPORT_SYMBOL(vfs_path_lookup);
 
 /*
  * Restricted form of lookup. Doesn't follow links, single-component only,
@@ -2111,6 +2118,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
 
        return __lookup_hash(&this, base, 0);
 }
+EXPORT_SYMBOL(lookup_one_len);
 
 int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
                 struct path *path, int *empty)
@@ -2135,6 +2143,7 @@ int user_path_at(int dfd, const char __user *name, unsigned flags,
 {
        return user_path_at_empty(dfd, name, flags, path, NULL);
 }
+EXPORT_SYMBOL(user_path_at);
 
 /*
  * NB: most callers don't do anything directly with the reference to the
@@ -2477,6 +2486,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
        mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD);
        return NULL;
 }
+EXPORT_SYMBOL(lock_rename);
 
 void unlock_rename(struct dentry *p1, struct dentry *p2)
 {
@@ -2486,6 +2496,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2)
                mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex);
        }
 }
+EXPORT_SYMBOL(unlock_rename);
 
 int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
                bool want_excl)
@@ -2506,6 +2517,7 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
                fsnotify_create(dir, dentry);
        return error;
 }
+EXPORT_SYMBOL(vfs_create);
 
 static int may_open(struct path *path, int acc_mode, int flag)
 {
@@ -3375,6 +3387,7 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
                fsnotify_create(dir, dentry);
        return error;
 }
+EXPORT_SYMBOL(vfs_mknod);
 
 static int may_mknod(umode_t mode)
 {
@@ -3464,6 +3477,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
                fsnotify_mkdir(dir, dentry);
        return error;
 }
+EXPORT_SYMBOL(vfs_mkdir);
 
 SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode)
 {
@@ -3518,6 +3532,7 @@ void dentry_unhash(struct dentry *dentry)
                __d_drop(dentry);
        spin_unlock(&dentry->d_lock);
 }
+EXPORT_SYMBOL(dentry_unhash);
 
 int vfs_rmdir(struct inode *dir, struct dentry *dentry)
 {
@@ -3555,6 +3570,7 @@ out:
                d_delete(dentry);
        return error;
 }
+EXPORT_SYMBOL(vfs_rmdir);
 
 static long do_rmdir(int dfd, const char __user *pathname)
 {
@@ -3672,6 +3688,7 @@ out:
 
        return error;
 }
+EXPORT_SYMBOL(vfs_unlink);
 
 /*
  * Make sure that the actual truncation of the file will occur outside its
@@ -3785,6 +3802,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
                fsnotify_create(dir, dentry);
        return error;
 }
+EXPORT_SYMBOL(vfs_symlink);
 
 SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
                int, newdfd, const char __user *, newname)
@@ -3893,6 +3911,7 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
                fsnotify_link(dir, inode, new_dentry);
        return error;
 }
+EXPORT_SYMBOL(vfs_link);
 
 /*
  * Hardlinks are often used in delicate situations.  We avoid
@@ -4152,6 +4171,7 @@ out:
 
        return error;
 }
+EXPORT_SYMBOL(vfs_rename);
 
 SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
                int, newdfd, const char __user *, newname, unsigned int, flags)
@@ -4304,11 +4324,9 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
        return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
 }
 
-int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
+int readlink_copy(char __user *buffer, int buflen, const char *link)
 {
-       int len;
-
-       len = PTR_ERR(link);
+       int len = PTR_ERR(link);
        if (IS_ERR(link))
                goto out;
 
@@ -4320,6 +4338,7 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
 out:
        return len;
 }
+EXPORT_SYMBOL(readlink_copy);
 
 /*
  * A helper for ->readlink().  This should be used *ONLY* for symlinks that
@@ -4337,11 +4356,12 @@ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
        if (IS_ERR(cookie))
                return PTR_ERR(cookie);
 
-       res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
+       res = readlink_copy(buffer, buflen, nd_get_link(&nd));
        if (dentry->d_inode->i_op->put_link)
                dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
        return res;
 }
+EXPORT_SYMBOL(generic_readlink);
 
 /* get the link contents into pagecache */
 static char *page_getlink(struct dentry * dentry, struct page **ppage)
@@ -4361,14 +4381,14 @@ static char *page_getlink(struct dentry * dentry, struct page **ppage)
 int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
 {
        struct page *page = NULL;
-       char *s = page_getlink(dentry, &page);
-       int res = vfs_readlink(dentry,buffer,buflen,s);
+       int res = readlink_copy(buffer, buflen, page_getlink(dentry, &page));
        if (page) {
                kunmap(page);
                page_cache_release(page);
        }
        return res;
 }
+EXPORT_SYMBOL(page_readlink);
 
 void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
 {
@@ -4376,6 +4396,7 @@ void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
        nd_set_link(nd, page_getlink(dentry, &page));
        return page;
 }
+EXPORT_SYMBOL(page_follow_link_light);
 
 void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
 {
@@ -4386,6 +4407,7 @@ void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
                page_cache_release(page);
        }
 }
+EXPORT_SYMBOL(page_put_link);
 
 /*
  * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS
@@ -4423,45 +4445,18 @@ retry:
 fail:
        return err;
 }
+EXPORT_SYMBOL(__page_symlink);
 
 int page_symlink(struct inode *inode, const char *symname, int len)
 {
        return __page_symlink(inode, symname, len,
                        !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS));
 }
+EXPORT_SYMBOL(page_symlink);
 
 const struct inode_operations page_symlink_inode_operations = {
        .readlink       = generic_readlink,
        .follow_link    = page_follow_link_light,
        .put_link       = page_put_link,
 };
-
-EXPORT_SYMBOL(user_path_at);
-EXPORT_SYMBOL(follow_down_one);
-EXPORT_SYMBOL(follow_down);
-EXPORT_SYMBOL(follow_up);
-EXPORT_SYMBOL(get_write_access); /* nfsd */
-EXPORT_SYMBOL(lock_rename);
-EXPORT_SYMBOL(lookup_one_len);
-EXPORT_SYMBOL(page_follow_link_light);
-EXPORT_SYMBOL(page_put_link);
-EXPORT_SYMBOL(page_readlink);
-EXPORT_SYMBOL(__page_symlink);
-EXPORT_SYMBOL(page_symlink);
 EXPORT_SYMBOL(page_symlink_inode_operations);
-EXPORT_SYMBOL(kern_path);
-EXPORT_SYMBOL(vfs_path_lookup);
-EXPORT_SYMBOL(inode_permission);
-EXPORT_SYMBOL(unlock_rename);
-EXPORT_SYMBOL(vfs_create);
-EXPORT_SYMBOL(vfs_link);
-EXPORT_SYMBOL(vfs_mkdir);
-EXPORT_SYMBOL(vfs_mknod);
-EXPORT_SYMBOL(generic_permission);
-EXPORT_SYMBOL(vfs_readlink);
-EXPORT_SYMBOL(vfs_rename);
-EXPORT_SYMBOL(vfs_rmdir);
-EXPORT_SYMBOL(vfs_symlink);
-EXPORT_SYMBOL(vfs_unlink);
-EXPORT_SYMBOL(dentry_unhash);
-EXPORT_SYMBOL(generic_readlink);
index 2ffc5a2905d463e828c387fbfb92a31f219f98bc..182bc41cd88711d593c4d997171c6ad483a87577 100644 (file)
@@ -52,7 +52,7 @@ static int __init set_mphash_entries(char *str)
 }
 __setup("mphash_entries=", set_mphash_entries);
 
-static int event;
+static u64 event;
 static DEFINE_IDA(mnt_id_ida);
 static DEFINE_IDA(mnt_group_ida);
 static DEFINE_SPINLOCK(mnt_id_lock);
@@ -414,9 +414,7 @@ EXPORT_SYMBOL_GPL(mnt_clone_write);
  */
 int __mnt_want_write_file(struct file *file)
 {
-       struct inode *inode = file_inode(file);
-
-       if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode))
+       if (!(file->f_mode & FMODE_WRITER))
                return __mnt_want_write(file->f_path.mnt);
        else
                return mnt_clone_write(file->f_path.mnt);
@@ -570,13 +568,17 @@ int sb_prepare_remount_readonly(struct super_block *sb)
 static void free_vfsmnt(struct mount *mnt)
 {
        kfree(mnt->mnt_devname);
-       mnt_free_id(mnt);
 #ifdef CONFIG_SMP
        free_percpu(mnt->mnt_pcp);
 #endif
        kmem_cache_free(mnt_cache, mnt);
 }
 
+static void delayed_free_vfsmnt(struct rcu_head *head)
+{
+       free_vfsmnt(container_of(head, struct mount, mnt_rcu));
+}
+
 /* call under rcu_read_lock */
 bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
 {
@@ -848,6 +850,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
 
        root = mount_fs(type, flags, name, data);
        if (IS_ERR(root)) {
+               mnt_free_id(mnt);
                free_vfsmnt(mnt);
                return ERR_CAST(root);
        }
@@ -885,7 +888,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
                        goto out_free;
        }
 
-       mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD;
+       mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
        /* Don't allow unprivileged users to change mount flags */
        if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))
                mnt->mnt.mnt_flags |= MNT_LOCK_READONLY;
@@ -928,20 +931,11 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
        return mnt;
 
  out_free:
+       mnt_free_id(mnt);
        free_vfsmnt(mnt);
        return ERR_PTR(err);
 }
 
-static void delayed_free(struct rcu_head *head)
-{
-       struct mount *mnt = container_of(head, struct mount, mnt_rcu);
-       kfree(mnt->mnt_devname);
-#ifdef CONFIG_SMP
-       free_percpu(mnt->mnt_pcp);
-#endif
-       kmem_cache_free(mnt_cache, mnt);
-}
-
 static void mntput_no_expire(struct mount *mnt)
 {
 put_again:
@@ -991,7 +985,7 @@ put_again:
        dput(mnt->mnt.mnt_root);
        deactivate_super(mnt->mnt.mnt_sb);
        mnt_free_id(mnt);
-       call_rcu(&mnt->mnt_rcu, delayed_free);
+       call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);
 }
 
 void mntput(struct vfsmount *mnt)
@@ -1100,14 +1094,29 @@ static void *m_start(struct seq_file *m, loff_t *pos)
        struct proc_mounts *p = proc_mounts(m);
 
        down_read(&namespace_sem);
-       return seq_list_start(&p->ns->list, *pos);
+       if (p->cached_event == p->ns->event) {
+               void *v = p->cached_mount;
+               if (*pos == p->cached_index)
+                       return v;
+               if (*pos == p->cached_index + 1) {
+                       v = seq_list_next(v, &p->ns->list, &p->cached_index);
+                       return p->cached_mount = v;
+               }
+       }
+
+       p->cached_event = p->ns->event;
+       p->cached_mount = seq_list_start(&p->ns->list, *pos);
+       p->cached_index = *pos;
+       return p->cached_mount;
 }
 
 static void *m_next(struct seq_file *m, void *v, loff_t *pos)
 {
        struct proc_mounts *p = proc_mounts(m);
 
-       return seq_list_next(v, &p->ns->list, pos);
+       p->cached_mount = seq_list_next(v, &p->ns->list, pos);
+       p->cached_index = *pos;
+       return p->cached_mount;
 }
 
 static void m_stop(struct seq_file *m, void *v)
@@ -1661,9 +1670,9 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                if (err)
                        goto out;
                err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
+               lock_mount_hash();
                if (err)
                        goto out_cleanup_ids;
-               lock_mount_hash();
                for (p = source_mnt; p; p = next_mnt(p, source_mnt))
                        set_mnt_shared(p);
        } else {
@@ -1690,6 +1699,11 @@ static int attach_recursive_mnt(struct mount *source_mnt,
        return 0;
 
  out_cleanup_ids:
+       while (!hlist_empty(&tree_list)) {
+               child = hlist_entry(tree_list.first, struct mount, mnt_hash);
+               umount_tree(child, 0);
+       }
+       unlock_mount_hash();
        cleanup_group_ids(source_mnt, NULL);
  out:
        return err;
@@ -2044,7 +2058,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
        struct mount *parent;
        int err;
 
-       mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | MNT_DOOMED | MNT_SYNC_UMOUNT);
+       mnt_flags &= ~MNT_INTERNAL_FLAGS;
 
        mp = lock_mount(path);
        if (IS_ERR(mp))
index 81b4f643ecefda84444cca57481958bfca11ea06..e31e589369a49a930f45be2124114c7252c83723 100644 (file)
@@ -470,9 +470,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
 {
        struct ncp_mount_data_kernel data;
        struct ncp_server *server;
-       struct file *ncp_filp;
        struct inode *root_inode;
-       struct inode *sock_inode;
        struct socket *sock;
        int error;
        int default_bufsize;
@@ -541,18 +539,10 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
        if (!uid_valid(data.mounted_uid) || !uid_valid(data.uid) ||
            !gid_valid(data.gid))
                goto out;
-       error = -EBADF;
-       ncp_filp = fget(data.ncp_fd);
-       if (!ncp_filp)
-               goto out;
-       error = -ENOTSOCK;
-       sock_inode = file_inode(ncp_filp);
-       if (!S_ISSOCK(sock_inode->i_mode))
-               goto out_fput;
-       sock = SOCKET_I(sock_inode);
+       sock = sockfd_lookup(data.ncp_fd, &error);
        if (!sock)
-               goto out_fput;
-               
+               goto out;
+
        if (sock->type == SOCK_STREAM)
                default_bufsize = 0xF000;
        else
@@ -574,27 +564,16 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
        if (error)
                goto out_fput;
 
-       server->ncp_filp = ncp_filp;
        server->ncp_sock = sock;
        
        if (data.info_fd != -1) {
-               struct socket *info_sock;
-
-               error = -EBADF;
-               server->info_filp = fget(data.info_fd);
-               if (!server->info_filp)
-                       goto out_bdi;
-               error = -ENOTSOCK;
-               sock_inode = file_inode(server->info_filp);
-               if (!S_ISSOCK(sock_inode->i_mode))
-                       goto out_fput2;
-               info_sock = SOCKET_I(sock_inode);
+               struct socket *info_sock = sockfd_lookup(data.info_fd, &error);
                if (!info_sock)
-                       goto out_fput2;
+                       goto out_bdi;
+               server->info_sock = info_sock;
                error = -EBADFD;
                if (info_sock->type != SOCK_STREAM)
                        goto out_fput2;
-               server->info_sock = info_sock;
        }
 
 /*     server->lock = 0;       */
@@ -766,17 +745,12 @@ out_nls:
        mutex_destroy(&server->root_setup_lock);
        mutex_destroy(&server->mutex);
 out_fput2:
-       if (server->info_filp)
-               fput(server->info_filp);
+       if (server->info_sock)
+               sockfd_put(server->info_sock);
 out_bdi:
        bdi_destroy(&server->bdi);
 out_fput:
-       /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
-        * 
-        * The previously used put_filp(ncp_filp); was bogus, since
-        * it doesn't perform proper unlocking.
-        */
-       fput(ncp_filp);
+       sockfd_put(sock);
 out:
        put_pid(data.wdog_pid);
        sb->s_fs_info = NULL;
@@ -809,9 +783,9 @@ static void ncp_put_super(struct super_block *sb)
        mutex_destroy(&server->root_setup_lock);
        mutex_destroy(&server->mutex);
 
-       if (server->info_filp)
-               fput(server->info_filp);
-       fput(server->ncp_filp);
+       if (server->info_sock)
+               sockfd_put(server->info_sock);
+       sockfd_put(server->ncp_sock);
        kill_pid(server->m.wdog_pid, SIGTERM, 1);
        put_pid(server->m.wdog_pid);
 
index b81e97adc5a9b9862e19ce0b25ab83a652909565..7fa17e4593669379d420fafa11d355276a6eba72 100644 (file)
@@ -45,9 +45,7 @@ struct ncp_server {
 
        __u8 name_space[NCP_NUMBER_OF_VOLUMES + 2];
 
-       struct file *ncp_filp;  /* File pointer to ncp socket */
        struct socket *ncp_sock;/* ncp socket */
-       struct file *info_filp;
        struct socket *info_sock;
 
        u8 sequence;
index 9d8153ebacfb1c2784543eb319b4de4cdf5f16e7..f47af5e6e23037ed0c82b17274b098d00a617517 100644 (file)
@@ -1704,8 +1704,6 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi)
        iput(bvi);
 skip_large_index_stuff:
        /* Setup the operations for this index inode. */
-       vi->i_op = NULL;
-       vi->i_fop = NULL;
        vi->i_mapping->a_ops = &ntfs_mst_aops;
        vi->i_blocks = ni->allocated_size >> 9;
        /*
index eb649d23a4de24e4cde6860389528e7b2ef919dc..dfda2ffdb16c15aa8338b1de6f58ccc42ff5b9ba 100644 (file)
@@ -916,57 +916,30 @@ static struct o2net_msg_handler *o2net_handler_get(u32 msg_type, u32 key)
 
 static int o2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
 {
-       int ret;
-       mm_segment_t oldfs;
-       struct kvec vec = {
-               .iov_len = len,
-               .iov_base = data,
-       };
-       struct msghdr msg = {
-               .msg_iovlen = 1,
-               .msg_iov = (struct iovec *)&vec,
-                       .msg_flags = MSG_DONTWAIT,
-       };
-
-       oldfs = get_fs();
-       set_fs(get_ds());
-       ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
-       set_fs(oldfs);
-
-       return ret;
+       struct kvec vec = { .iov_len = len, .iov_base = data, };
+       struct msghdr msg = { .msg_flags = MSG_DONTWAIT, };
+       return kernel_recvmsg(sock, &msg, &vec, 1, len, msg.msg_flags);
 }
 
 static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
                              size_t veclen, size_t total)
 {
        int ret;
-       mm_segment_t oldfs;
-       struct msghdr msg = {
-               .msg_iov = (struct iovec *)vec,
-               .msg_iovlen = veclen,
-       };
+       struct msghdr msg;
 
        if (sock == NULL) {
                ret = -EINVAL;
                goto out;
        }
 
-       oldfs = get_fs();
-       set_fs(get_ds());
-       ret = sock_sendmsg(sock, &msg, total);
-       set_fs(oldfs);
-       if (ret != total) {
-               mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret,
-                    total);
-               if (ret >= 0)
-                       ret = -EPIPE; /* should be smarter, I bet */
-               goto out;
-       }
-
-       ret = 0;
+       ret = kernel_sendmsg(sock, &msg, vec, veclen, total);
+       if (likely(ret == total))
+               return 0;
+       mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret, total);
+       if (ret >= 0)
+               ret = -EPIPE; /* should be smarter, I bet */
 out:
-       if (ret < 0)
-               mlog(0, "returning error: %d\n", ret);
+       mlog(0, "returning error: %d\n", ret);
        return ret;
 }
 
index ff33c5ef87f2b826532af886436be9adcc8f7046..8970dcf74de53e71539e8fad7042a55f66a2c713 100644 (file)
@@ -2367,15 +2367,18 @@ relock:
 
        if (direct_io) {
                written = generic_file_direct_write(iocb, iov, &nr_segs, *ppos,
-                                                   ppos, count, ocount);
+                                                   count, ocount);
                if (written < 0) {
                        ret = written;
                        goto out_dio;
                }
        } else {
+               struct iov_iter from;
+               iov_iter_init(&from, iov, nr_segs, count, 0);
                current->backing_dev_info = file->f_mapping->backing_dev_info;
-               written = generic_file_buffered_write(iocb, iov, nr_segs, *ppos,
-                                                     ppos, count, 0);
+               written = generic_perform_write(file, &from, *ppos);
+               if (likely(written >= 0))
+                       iocb->ki_pos = *ppos + written;
                current->backing_dev_info = NULL;
        }
 
index 631aea815def32946433b8aebed9a312d0fc872c..3d30eb1fc95e383e50e91605d3526161bcfdebde 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -655,35 +655,6 @@ out:
        return error;
 }
 
-/*
- * You have to be very careful that these write
- * counts get cleaned up in error cases and
- * upon __fput().  This should probably never
- * be called outside of __dentry_open().
- */
-static inline int __get_file_write_access(struct inode *inode,
-                                         struct vfsmount *mnt)
-{
-       int error;
-       error = get_write_access(inode);
-       if (error)
-               return error;
-       /*
-        * Do not take mount writer counts on
-        * special files since no writes to
-        * the mount itself will occur.
-        */
-       if (!special_file(inode->i_mode)) {
-               /*
-                * Balanced in __fput()
-                */
-               error = __mnt_want_write(mnt);
-               if (error)
-                       put_write_access(inode);
-       }
-       return error;
-}
-
 int open_check_o_direct(struct file *f)
 {
        /* NB: we're sure to have correct a_ops only after f_op->open */
@@ -708,26 +679,28 @@ static int do_dentry_open(struct file *f,
        f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK |
                                FMODE_PREAD | FMODE_PWRITE;
 
-       if (unlikely(f->f_flags & O_PATH))
-               f->f_mode = FMODE_PATH;
-
        path_get(&f->f_path);
        inode = f->f_inode = f->f_path.dentry->d_inode;
-       if (f->f_mode & FMODE_WRITE) {
-               error = __get_file_write_access(inode, f->f_path.mnt);
-               if (error)
-                       goto cleanup_file;
-               if (!special_file(inode->i_mode))
-                       file_take_write(f);
-       }
-
        f->f_mapping = inode->i_mapping;
 
-       if (unlikely(f->f_mode & FMODE_PATH)) {
+       if (unlikely(f->f_flags & O_PATH)) {
+               f->f_mode = FMODE_PATH;
                f->f_op = &empty_fops;
                return 0;
        }
 
+       if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
+               error = get_write_access(inode);
+               if (unlikely(error))
+                       goto cleanup_file;
+               error = __mnt_want_write(f->f_path.mnt);
+               if (unlikely(error)) {
+                       put_write_access(inode);
+                       goto cleanup_file;
+               }
+               f->f_mode |= FMODE_WRITER;
+       }
+
        /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */
        if (S_ISREG(inode->i_mode))
                f->f_mode |= FMODE_ATOMIC_POS;
@@ -764,18 +737,9 @@ static int do_dentry_open(struct file *f,
 
 cleanup_all:
        fops_put(f->f_op);
-       if (f->f_mode & FMODE_WRITE) {
+       if (f->f_mode & FMODE_WRITER) {
                put_write_access(inode);
-               if (!special_file(inode->i_mode)) {
-                       /*
-                        * We don't consider this a real
-                        * mnt_want/drop_write() pair
-                        * because it all happenend right
-                        * here, so just reset the state.
-                        */
-                       file_reset_write(f);
-                       __mnt_drop_write(f->f_path.mnt);
-               }
+               __mnt_drop_write(f->f_path.mnt);
        }
 cleanup_file:
        path_put(&f->f_path);
index 78fd0d0788dbc465c3d7a6ba674bb64a6ebea25d..034bffac3f9724c6121f4635ba9740d61e106d06 100644 (file)
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -142,55 +142,6 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
        return 0;
 }
 
-static int
-pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
-                     int atomic)
-{
-       unsigned long copy;
-
-       while (len > 0) {
-               while (!iov->iov_len)
-                       iov++;
-               copy = min_t(unsigned long, len, iov->iov_len);
-
-               if (atomic) {
-                       if (__copy_to_user_inatomic(iov->iov_base, from, copy))
-                               return -EFAULT;
-               } else {
-                       if (copy_to_user(iov->iov_base, from, copy))
-                               return -EFAULT;
-               }
-               from += copy;
-               len -= copy;
-               iov->iov_base += copy;
-               iov->iov_len -= copy;
-       }
-       return 0;
-}
-
-/*
- * Attempt to pre-fault in the user memory, so we can use atomic copies.
- * Returns the number of bytes not faulted in.
- */
-static int iov_fault_in_pages_write(struct iovec *iov, unsigned long len)
-{
-       while (!iov->iov_len)
-               iov++;
-
-       while (len > 0) {
-               unsigned long this_len;
-
-               this_len = min_t(unsigned long, len, iov->iov_len);
-               if (fault_in_pages_writeable(iov->iov_base, this_len))
-                       break;
-
-               len -= this_len;
-               iov++;
-       }
-
-       return len;
-}
-
 /*
  * Pre-fault in the user memory, so we can use atomic copies.
  */
@@ -225,52 +176,6 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
                page_cache_release(page);
 }
 
-/**
- * generic_pipe_buf_map - virtually map a pipe buffer
- * @pipe:      the pipe that the buffer belongs to
- * @buf:       the buffer that should be mapped
- * @atomic:    whether to use an atomic map
- *
- * Description:
- *     This function returns a kernel virtual address mapping for the
- *     pipe_buffer passed in @buf. If @atomic is set, an atomic map is provided
- *     and the caller has to be careful not to fault before calling
- *     the unmap function.
- *
- *     Note that this function calls kmap_atomic() if @atomic != 0.
- */
-void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
-                          struct pipe_buffer *buf, int atomic)
-{
-       if (atomic) {
-               buf->flags |= PIPE_BUF_FLAG_ATOMIC;
-               return kmap_atomic(buf->page);
-       }
-
-       return kmap(buf->page);
-}
-EXPORT_SYMBOL(generic_pipe_buf_map);
-
-/**
- * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
- * @pipe:      the pipe that the buffer belongs to
- * @buf:       the buffer that should be unmapped
- * @map_data:  the data that the mapping function returned
- *
- * Description:
- *     This function undoes the mapping that ->map() provided.
- */
-void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
-                           struct pipe_buffer *buf, void *map_data)
-{
-       if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
-               buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
-               kunmap_atomic(map_data);
-       } else
-               kunmap(buf->page);
-}
-EXPORT_SYMBOL(generic_pipe_buf_unmap);
-
 /**
  * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
  * @pipe:      the pipe that the buffer belongs to
@@ -351,8 +256,6 @@ EXPORT_SYMBOL(generic_pipe_buf_release);
 
 static const struct pipe_buf_operations anon_pipe_buf_ops = {
        .can_merge = 1,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = anon_pipe_buf_release,
        .steal = generic_pipe_buf_steal,
@@ -361,8 +264,6 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
 
 static const struct pipe_buf_operations packet_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = anon_pipe_buf_release,
        .steal = generic_pipe_buf_steal,
@@ -379,12 +280,15 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
        ssize_t ret;
        struct iovec *iov = (struct iovec *)_iov;
        size_t total_len;
+       struct iov_iter iter;
 
        total_len = iov_length(iov, nr_segs);
        /* Null read succeeds. */
        if (unlikely(total_len == 0))
                return 0;
 
+       iov_iter_init(&iter, iov, nr_segs, total_len, 0);
+
        do_wakeup = 0;
        ret = 0;
        __pipe_lock(pipe);
@@ -394,9 +298,9 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
                        int curbuf = pipe->curbuf;
                        struct pipe_buffer *buf = pipe->bufs + curbuf;
                        const struct pipe_buf_operations *ops = buf->ops;
-                       void *addr;
                        size_t chars = buf->len;
-                       int error, atomic;
+                       size_t written;
+                       int error;
 
                        if (chars > total_len)
                                chars = total_len;
@@ -408,21 +312,10 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
                                break;
                        }
 
-                       atomic = !iov_fault_in_pages_write(iov, chars);
-redo:
-                       addr = ops->map(pipe, buf, atomic);
-                       error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
-                       ops->unmap(pipe, buf, addr);
-                       if (unlikely(error)) {
-                               /*
-                                * Just retry with the slow path if we failed.
-                                */
-                               if (atomic) {
-                                       atomic = 0;
-                                       goto redo;
-                               }
+                       written = copy_page_to_iter(buf->page, buf->offset, chars, &iter);
+                       if (unlikely(written < chars)) {
                                if (!ret)
-                                       ret = error;
+                                       ret = -EFAULT;
                                break;
                        }
                        ret += chars;
@@ -538,10 +431,16 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
 
                        iov_fault_in_pages_read(iov, chars);
 redo1:
-                       addr = ops->map(pipe, buf, atomic);
+                       if (atomic)
+                               addr = kmap_atomic(buf->page);
+                       else
+                               addr = kmap(buf->page);
                        error = pipe_iov_copy_from_user(offset + addr, iov,
                                                        chars, atomic);
-                       ops->unmap(pipe, buf, addr);
+                       if (atomic)
+                               kunmap_atomic(addr);
+                       else
+                               kunmap(buf->page);
                        ret = error;
                        do_wakeup = 1;
                        if (error) {
index 88396df725b4bbe84dc7d57eaf7a259877e5d87c..302bf22c4a30762013dbbfd64d0353250101eb62 100644 (file)
@@ -164,46 +164,94 @@ static struct mount *propagation_next(struct mount *m,
        }
 }
 
-/*
- * return the source mount to be used for cloning
- *
- * @dest       the current destination mount
- * @last_dest          the last seen destination mount
- * @last_src   the last seen source mount
- * @type       return CL_SLAVE if the new mount has to be
- *             cloned as a slave.
- */
-static struct mount *get_source(struct mount *dest,
-                               struct mount *last_dest,
-                               struct mount *last_src,
-                               int *type)
+static struct mount *next_group(struct mount *m, struct mount *origin)
 {
-       struct mount *p_last_src = NULL;
-       struct mount *p_last_dest = NULL;
-
-       while (last_dest != dest->mnt_master) {
-               p_last_dest = last_dest;
-               p_last_src = last_src;
-               last_dest = last_dest->mnt_master;
-               last_src = last_src->mnt_master;
+       while (1) {
+               while (1) {
+                       struct mount *next;
+                       if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+                               return first_slave(m);
+                       next = next_peer(m);
+                       if (m->mnt_group_id == origin->mnt_group_id) {
+                               if (next == origin)
+                                       return NULL;
+                       } else if (m->mnt_slave.next != &next->mnt_slave)
+                               break;
+                       m = next;
+               }
+               /* m is the last peer */
+               while (1) {
+                       struct mount *master = m->mnt_master;
+                       if (m->mnt_slave.next != &master->mnt_slave_list)
+                               return next_slave(m);
+                       m = next_peer(master);
+                       if (master->mnt_group_id == origin->mnt_group_id)
+                               break;
+                       if (master->mnt_slave.next == &m->mnt_slave)
+                               break;
+                       m = master;
+               }
+               if (m == origin)
+                       return NULL;
        }
+}
 
-       if (p_last_dest) {
-               do {
-                       p_last_dest = next_peer(p_last_dest);
-               } while (IS_MNT_NEW(p_last_dest));
-               /* is that a peer of the earlier? */
-               if (dest == p_last_dest) {
-                       *type = CL_MAKE_SHARED;
-                       return p_last_src;
+/* all accesses are serialized by namespace_sem */
+static struct user_namespace *user_ns;
+static struct mount *last_dest, *last_source, *dest_master;
+static struct mountpoint *mp;
+static struct hlist_head *list;
+
+static int propagate_one(struct mount *m)
+{
+       struct mount *child;
+       int type;
+       /* skip ones added by this propagate_mnt() */
+       if (IS_MNT_NEW(m))
+               return 0;
+       /* skip if mountpoint isn't covered by it */
+       if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
+               return 0;
+       if (m->mnt_group_id == last_dest->mnt_group_id) {
+               type = CL_MAKE_SHARED;
+       } else {
+               struct mount *n, *p;
+               for (n = m; ; n = p) {
+                       p = n->mnt_master;
+                       if (p == dest_master || IS_MNT_MARKED(p)) {
+                               while (last_dest->mnt_master != p) {
+                                       last_source = last_source->mnt_master;
+                                       last_dest = last_source->mnt_parent;
+                               }
+                               if (n->mnt_group_id != last_dest->mnt_group_id) {
+                                       last_source = last_source->mnt_master;
+                                       last_dest = last_source->mnt_parent;
+                               }
+                               break;
+                       }
                }
+               type = CL_SLAVE;
+               /* beginning of peer group among the slaves? */
+               if (IS_MNT_SHARED(m))
+                       type |= CL_MAKE_SHARED;
        }
-       /* slave of the earlier, then */
-       *type = CL_SLAVE;
-       /* beginning of peer group among the slaves? */
-       if (IS_MNT_SHARED(dest))
-               *type |= CL_MAKE_SHARED;
-       return last_src;
+               
+       /* Notice when we are propagating across user namespaces */
+       if (m->mnt_ns->user_ns != user_ns)
+               type |= CL_UNPRIVILEGED;
+       child = copy_tree(last_source, last_source->mnt.mnt_root, type);
+       if (IS_ERR(child))
+               return PTR_ERR(child);
+       mnt_set_mountpoint(m, mp, child);
+       last_dest = m;
+       last_source = child;
+       if (m->mnt_master != dest_master) {
+               read_seqlock_excl(&mount_lock);
+               SET_MNT_MARK(m->mnt_master);
+               read_sequnlock_excl(&mount_lock);
+       }
+       hlist_add_head(&child->mnt_hash, list);
+       return 0;
 }
 
 /*
@@ -222,56 +270,48 @@ static struct mount *get_source(struct mount *dest,
 int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
                    struct mount *source_mnt, struct hlist_head *tree_list)
 {
-       struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
-       struct mount *m, *child;
+       struct mount *m, *n;
        int ret = 0;
-       struct mount *prev_dest_mnt = dest_mnt;
-       struct mount *prev_src_mnt  = source_mnt;
-       HLIST_HEAD(tmp_list);
-
-       for (m = propagation_next(dest_mnt, dest_mnt); m;
-                       m = propagation_next(m, dest_mnt)) {
-               int type;
-               struct mount *source;
-
-               if (IS_MNT_NEW(m))
-                       continue;
-
-               source =  get_source(m, prev_dest_mnt, prev_src_mnt, &type);
-
-               /* Notice when we are propagating across user namespaces */
-               if (m->mnt_ns->user_ns != user_ns)
-                       type |= CL_UNPRIVILEGED;
-
-               child = copy_tree(source, source->mnt.mnt_root, type);
-               if (IS_ERR(child)) {
-                       ret = PTR_ERR(child);
-                       tmp_list = *tree_list;
-                       tmp_list.first->pprev = &tmp_list.first;
-                       INIT_HLIST_HEAD(tree_list);
+
+       /*
+        * we don't want to bother passing tons of arguments to
+        * propagate_one(); everything is serialized by namespace_sem,
+        * so globals will do just fine.
+        */
+       user_ns = current->nsproxy->mnt_ns->user_ns;
+       last_dest = dest_mnt;
+       last_source = source_mnt;
+       mp = dest_mp;
+       list = tree_list;
+       dest_master = dest_mnt->mnt_master;
+
+       /* all peers of dest_mnt, except dest_mnt itself */
+       for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
+               ret = propagate_one(n);
+               if (ret)
                        goto out;
-               }
+       }
 
-               if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
-                       mnt_set_mountpoint(m, dest_mp, child);
-                       hlist_add_head(&child->mnt_hash, tree_list);
-               } else {
-                       /*
-                        * This can happen if the parent mount was bind mounted
-                        * on some subdirectory of a shared/slave mount.
-                        */
-                       hlist_add_head(&child->mnt_hash, &tmp_list);
-               }
-               prev_dest_mnt = m;
-               prev_src_mnt  = child;
+       /* all slave groups */
+       for (m = next_group(dest_mnt, dest_mnt); m;
+                       m = next_group(m, dest_mnt)) {
+               /* everything in that slave group */
+               n = m;
+               do {
+                       ret = propagate_one(n);
+                       if (ret)
+                               goto out;
+                       n = next_peer(n);
+               } while (n != m);
        }
 out:
-       lock_mount_hash();
-       while (!hlist_empty(&tmp_list)) {
-               child = hlist_entry(tmp_list.first, struct mount, mnt_hash);
-               umount_tree(child, 0);
+       read_seqlock_excl(&mount_lock);
+       hlist_for_each_entry(n, tree_list, mnt_hash) {
+               m = n->mnt_parent;
+               if (m->mnt_master != dest_mnt->mnt_master)
+                       CLEAR_MNT_MARK(m->mnt_master);
        }
-       unlock_mount_hash();
+       read_sequnlock_excl(&mount_lock);
        return ret;
 }
 
index fc28a27fa89233d24b90bdd748477994b8713ecd..4a246358b03183994461d9eae5d8efe09db2e6dc 100644 (file)
@@ -16,6 +16,9 @@
 #define IS_MNT_NEW(m)  (!(m)->mnt_ns)
 #define CLEAR_MNT_SHARED(m) ((m)->mnt.mnt_flags &= ~MNT_SHARED)
 #define IS_MNT_UNBINDABLE(m) ((m)->mnt.mnt_flags & MNT_UNBINDABLE)
+#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
+#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
+#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
 
 #define CL_EXPIRE              0x01
 #define CL_SLAVE               0x02
index 6b7087e2e8fb059c36ac19e0013fc86f595e5149..2d696b0c93bfbec6f1dd73ff4a8f71c9da569e4d 100644 (file)
@@ -200,41 +200,9 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
        return result;
 }
 
-static int proc_pid_cmdline(struct task_struct *task, char * buffer)
+static int proc_pid_cmdline(struct task_struct *task, char *buffer)
 {
-       int res = 0;
-       unsigned int len;
-       struct mm_struct *mm = get_task_mm(task);
-       if (!mm)
-               goto out;
-       if (!mm->arg_end)
-               goto out_mm;    /* Shh! No looking before we're done */
-
-       len = mm->arg_end - mm->arg_start;
-       if (len > PAGE_SIZE)
-               len = PAGE_SIZE;
-       res = access_process_vm(task, mm->arg_start, buffer, len, 0);
-
-       // If the nul at the end of args has been overwritten, then
-       // assume application is using setproctitle(3).
-       if (res > 0 && buffer[res-1] != '\0' && len < PAGE_SIZE) {
-               len = strnlen(buffer, res);
-               if (len < res) {
-                   res = len;
-               } else {
-                       len = mm->env_end - mm->env_start;
-                       if (len > PAGE_SIZE - res)
-                               len = PAGE_SIZE - res;
-                       res += access_process_vm(task, mm->env_start, buffer+res, len, 0);
-                       res = strnlen(buffer, res);
-               }
-       }
-out_mm:
-       mmput(mm);
-out:
-       return res;
+       return get_cmdline(task, buffer, PAGE_SIZE);
 }
 
 static int proc_pid_auxv(struct task_struct *task, char *buffer)
index 9ae46b87470dd9fe9fe6962c689abb4a7500e697..89026095f2b51a081ab37098c4b8f3f1da762519 100644 (file)
@@ -146,7 +146,7 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
        struct task_struct *task;
        void *ns;
        char name[50];
-       int len = -EACCES;
+       int res = -EACCES;
 
        task = get_proc_task(inode);
        if (!task)
@@ -155,24 +155,18 @@ static int proc_ns_readlink(struct dentry *dentry, char __user *buffer, int bufl
        if (!ptrace_may_access(task, PTRACE_MODE_READ))
                goto out_put_task;
 
-       len = -ENOENT;
+       res = -ENOENT;
        ns = ns_ops->get(task);
        if (!ns)
                goto out_put_task;
 
        snprintf(name, sizeof(name), "%s:[%u]", ns_ops->name, ns_ops->inum(ns));
-       len = strlen(name);
-
-       if (len > buflen)
-               len = buflen;
-       if (copy_to_user(buffer, name, len))
-               len = -EFAULT;
-
+       res = readlink_copy(buffer, buflen, name);
        ns_ops->put(ns);
 out_put_task:
        put_task_struct(task);
 out:
-       return len;
+       return res;
 }
 
 static const struct inode_operations proc_ns_link_inode_operations = {
index ffeb202ec942d3f3f83594d517e329bafa89ce98..4348bb8907c20a6abc211b9fa944eca005d6f47c 100644 (file)
@@ -16,7 +16,7 @@ static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
        if (!tgid)
                return -ENOENT;
        sprintf(tmp, "%d", tgid);
-       return vfs_readlink(dentry,buffer,buflen,tmp);
+       return readlink_copy(buffer, buflen, tmp);
 }
 
 static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
index 7be26f03a3f5813ed501bea520e79041af4466f7..1a81373947f33ec4e56b5e34e24814b4c267800a 100644 (file)
@@ -267,6 +267,7 @@ static int mounts_open_common(struct inode *inode, struct file *file,
        p->root = root;
        p->m.poll_event = ns->event;
        p->show = show;
+       p->cached_event = ~0ULL;
 
        return 0;
 
index 12028fa41def9c007b0217b6c4bc481868d93792..9bc07d2b53cf3a0e66605912386f9d2367601585 100644 (file)
@@ -136,8 +136,6 @@ error:
 
 const struct pipe_buf_operations page_cache_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = page_cache_pipe_buf_confirm,
        .release = page_cache_pipe_buf_release,
        .steal = page_cache_pipe_buf_steal,
@@ -156,8 +154,6 @@ static int user_page_pipe_buf_steal(struct pipe_inode_info *pipe,
 
 static const struct pipe_buf_operations user_page_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = page_cache_pipe_buf_release,
        .steal = user_page_pipe_buf_steal,
@@ -547,8 +543,6 @@ EXPORT_SYMBOL(generic_file_splice_read);
 
 static const struct pipe_buf_operations default_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = generic_pipe_buf_release,
        .steal = generic_pipe_buf_steal,
@@ -564,8 +558,6 @@ static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
 /* Pipe buffer operations for a socket and similar. */
 const struct pipe_buf_operations nosteal_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = generic_pipe_buf_release,
        .steal = generic_pipe_buf_nosteal,
@@ -767,13 +759,13 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
                goto out;
 
        if (buf->page != page) {
-               char *src = buf->ops->map(pipe, buf, 1);
+               char *src = kmap_atomic(buf->page);
                char *dst = kmap_atomic(page);
 
                memcpy(dst + offset, src + buf->offset, this_len);
                flush_dcache_page(page);
                kunmap_atomic(dst);
-               buf->ops->unmap(pipe, buf, src);
+               kunmap_atomic(src);
        }
        ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
                                page, fsdata);
@@ -1067,9 +1059,9 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
        void *data;
        loff_t tmp = sd->pos;
 
-       data = buf->ops->map(pipe, buf, 0);
+       data = kmap(buf->page);
        ret = __kernel_write(sd->u.file, data + buf->offset, sd->len, &tmp);
-       buf->ops->unmap(pipe, buf, data);
+       kunmap(buf->page);
 
        return ret;
 }
@@ -1528,116 +1520,48 @@ static int get_iovec_page_array(const struct iovec __user *iov,
 static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
                        struct splice_desc *sd)
 {
-       char *src;
-       int ret;
-
-       /*
-        * See if we can use the atomic maps, by prefaulting in the
-        * pages and doing an atomic copy
-        */
-       if (!fault_in_pages_writeable(sd->u.userptr, sd->len)) {
-               src = buf->ops->map(pipe, buf, 1);
-               ret = __copy_to_user_inatomic(sd->u.userptr, src + buf->offset,
-                                                       sd->len);
-               buf->ops->unmap(pipe, buf, src);
-               if (!ret) {
-                       ret = sd->len;
-                       goto out;
-               }
-       }
-
-       /*
-        * No dice, use slow non-atomic map and copy
-        */
-       src = buf->ops->map(pipe, buf, 0);
-
-       ret = sd->len;
-       if (copy_to_user(sd->u.userptr, src + buf->offset, sd->len))
-               ret = -EFAULT;
-
-       buf->ops->unmap(pipe, buf, src);
-out:
-       if (ret > 0)
-               sd->u.userptr += ret;
-       return ret;
+       int n = copy_page_to_iter(buf->page, buf->offset, sd->len, sd->u.data);
+       return n == sd->len ? n : -EFAULT;
 }
 
 /*
  * For lack of a better implementation, implement vmsplice() to userspace
  * as a simple copy of the pipes pages to the user iov.
  */
-static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
+static long vmsplice_to_user(struct file *file, const struct iovec __user *uiov,
                             unsigned long nr_segs, unsigned int flags)
 {
        struct pipe_inode_info *pipe;
        struct splice_desc sd;
-       ssize_t size;
-       int error;
        long ret;
+       struct iovec iovstack[UIO_FASTIOV];
+       struct iovec *iov = iovstack;
+       struct iov_iter iter;
+       ssize_t count = 0;
 
        pipe = get_pipe_info(file);
        if (!pipe)
                return -EBADF;
 
-       pipe_lock(pipe);
-
-       error = ret = 0;
-       while (nr_segs) {
-               void __user *base;
-               size_t len;
-
-               /*
-                * Get user address base and length for this iovec.
-                */
-               error = get_user(base, &iov->iov_base);
-               if (unlikely(error))
-                       break;
-               error = get_user(len, &iov->iov_len);
-               if (unlikely(error))
-                       break;
-
-               /*
-                * Sanity check this iovec. 0 read succeeds.
-                */
-               if (unlikely(!len))
-                       break;
-               if (unlikely(!base)) {
-                       error = -EFAULT;
-                       break;
-               }
-
-               if (unlikely(!access_ok(VERIFY_WRITE, base, len))) {
-                       error = -EFAULT;
-                       break;
-               }
-
-               sd.len = 0;
-               sd.total_len = len;
-               sd.flags = flags;
-               sd.u.userptr = base;
-               sd.pos = 0;
-
-               size = __splice_from_pipe(pipe, &sd, pipe_to_user);
-               if (size < 0) {
-                       if (!ret)
-                               ret = size;
-
-                       break;
-               }
-
-               ret += size;
+       ret = rw_copy_check_uvector(READ, uiov, nr_segs,
+                                   ARRAY_SIZE(iovstack), iovstack, &iov);
+       if (ret <= 0)
+               return ret;
 
-               if (size < len)
-                       break;
+       iov_iter_init(&iter, iov, nr_segs, count, 0);
 
-               nr_segs--;
-               iov++;
-       }
+       sd.len = 0;
+       sd.total_len = count;
+       sd.flags = flags;
+       sd.u.data = &iter;
+       sd.pos = 0;
 
+       pipe_lock(pipe);
+       ret = __splice_from_pipe(pipe, &sd, pipe_to_user);
        pipe_unlock(pipe);
 
-       if (!ret)
-               ret = error;
+       if (iov != iovstack)
+               kfree(iov);
 
        return ret;
 }
index 1037637957c7670e1a66e6bf1a8e51c80fbcc49d..d2c170f8b035a4b21ef6eac2274e74b137346d56 100644 (file)
@@ -171,7 +171,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        } else
                up_write(&iinfo->i_data_sem);
 
-       retval = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       retval = __generic_file_aio_write(iocb, iov, nr_segs);
        mutex_unlock(&inode->i_mutex);
 
        if (retval > 0) {
index 003c0051b62fa34ce086154413095b4fbdb4317d..79e96ce987331cad3aab2d0a7513ce19cad2fcb4 100644 (file)
@@ -699,7 +699,7 @@ xfs_file_dio_aio_write(
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
        ret = generic_file_direct_write(iocb, iovp,
-                       &nr_segs, pos, &iocb->ki_pos, count, ocount);
+                       &nr_segs, pos, count, ocount);
 
 out:
        xfs_rw_iunlock(ip, iolock);
@@ -715,7 +715,7 @@ xfs_file_buffered_aio_write(
        const struct iovec      *iovp,
        unsigned long           nr_segs,
        loff_t                  pos,
-       size_t                  ocount)
+       size_t                  count)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -724,7 +724,7 @@ xfs_file_buffered_aio_write(
        ssize_t                 ret;
        int                     enospc = 0;
        int                     iolock = XFS_IOLOCK_EXCL;
-       size_t                  count = ocount;
+       struct iov_iter         from;
 
        xfs_rw_ilock(ip, iolock);
 
@@ -732,14 +732,15 @@ xfs_file_buffered_aio_write(
        if (ret)
                goto out;
 
+       iov_iter_init(&from, iovp, nr_segs, count, 0);
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = mapping->backing_dev_info;
 
 write_retry:
        trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
-       ret = generic_file_buffered_write(iocb, iovp, nr_segs,
-                       pos, &iocb->ki_pos, count, 0);
-
+       ret = generic_perform_write(file, &from, pos);
+       if (likely(ret >= 0))
+               iocb->ki_pos = pos + ret;
        /*
         * If we just got an ENOSPC, try to write back all dirty inodes to
         * convert delalloc space to free up some of the excess reserved
index bcfe61202115510b22509ad49aadc16bcbbb4368..0b18776b075e44fb7f7d818183fa03dcfc7b3f4d 100644 (file)
@@ -271,32 +271,6 @@ xfs_open_by_handle(
        return error;
 }
 
-/*
- * This is a copy from fs/namei.c:vfs_readlink(), except for removing it's
- * unused first argument.
- */
-STATIC int
-do_readlink(
-       char __user             *buffer,
-       int                     buflen,
-       const char              *link)
-{
-        int len;
-
-       len = PTR_ERR(link);
-       if (IS_ERR(link))
-               goto out;
-
-       len = strlen(link);
-       if (len > (unsigned) buflen)
-               len = buflen;
-       if (copy_to_user(buffer, link, len))
-               len = -EFAULT;
- out:
-       return len;
-}
-
-
 int
 xfs_readlink_by_handle(
        struct file             *parfilp,
@@ -334,7 +308,7 @@ xfs_readlink_by_handle(
        error = -xfs_readlink(XFS_I(dentry->d_inode), link);
        if (error)
                goto out_kfree;
-       error = do_readlink(hreq->ohandle, olen, link);
+       error = readlink_copy(hreq->ohandle, olen, link);
        if (error)
                goto out_kfree;
 
index 5b09392db6734f6ddab3f01e58ee526a65ae5e6b..d401e5463fb02daae31c1ad34013c1690937cbda 100644 (file)
@@ -144,8 +144,6 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
 
 /**
  * syscall_get_arch - return the AUDIT_ARCH for the current system call
- * @task:      task of interest, must be in system call entry tracing
- * @regs:      task_pt_regs() of @task
  *
  * Returns the AUDIT_ARCH_* based on the system call convention in use.
  *
@@ -155,5 +153,5 @@ void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
  * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
  * provide an implementation of this.
  */
-int syscall_get_arch(struct task_struct *task, struct pt_regs *regs);
+int syscall_get_arch(void);
 #endif /* _ASM_SYSCALL_H */
index 97d5497debc1c7dc0b7b6c75199810bf13b6c03f..595f85c392ac443cd6cf23e3536c32ee58ecda15 100644 (file)
@@ -56,6 +56,12 @@ extern bool i915_gpu_turbo_disable(void);
 
 #define I830_GMCH_CTRL                 0x52
 
+#define I830_GMCH_GMS_MASK             0x70
+#define I830_GMCH_GMS_LOCAL            0x10
+#define I830_GMCH_GMS_STOLEN_512       0x20
+#define I830_GMCH_GMS_STOLEN_1024      0x30
+#define I830_GMCH_GMS_STOLEN_8192      0x40
+
 #define I855_GMCH_GMS_MASK             0xF0
 #define I855_GMCH_GMS_STOLEN_0M                0x0
 #define I855_GMCH_GMS_STOLEN_1M                (0x1 << 4)
@@ -72,4 +78,18 @@ extern bool i915_gpu_turbo_disable(void);
 #define INTEL_GMCH_GMS_STOLEN_224M     (0xc << 4)
 #define INTEL_GMCH_GMS_STOLEN_352M     (0xd << 4)
 
+#define I830_DRB3              0x63
+#define I85X_DRB3              0x43
+#define I865_TOUD              0xc4
+
+#define I830_ESMRAMC           0x91
+#define I845_ESMRAMC           0x9e
+#define I85X_ESMRAMC           0x61
+#define    TSEG_ENABLE         (1 << 0)
+#define    I830_TSEG_SIZE_512K (0 << 1)
+#define    I830_TSEG_SIZE_1M   (1 << 1)
+#define    I845_TSEG_SIZE_MASK (3 << 1)
+#define    I845_TSEG_SIZE_512K (2 << 1)
+#define    I845_TSEG_SIZE_1M   (3 << 1)
+
 #endif                         /* _I915_DRM_H_ */
index fb0298082916fa7f7622de3fc2233ca4732b649e..329436d38e665c36c363d5348ea34c6030fc35b2 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/list.h>
 #include <linux/device.h>
+#include <linux/err.h>
 #include <linux/dmaengine.h>
 
 /**
@@ -103,12 +104,12 @@ static inline void devm_acpi_dma_controller_free(struct device *dev)
 static inline struct dma_chan *acpi_dma_request_slave_chan_by_index(
                struct device *dev, size_t index)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 static inline struct dma_chan *acpi_dma_request_slave_chan_by_name(
                struct device *dev, const char *name)
 {
-       return NULL;
+       return ERR_PTR(-ENODEV);
 }
 
 #define acpi_dma_simple_xlate  NULL
index ec1464df4c60930a7fde38f1bc6f35a35b7f79d3..22cfddb7556631f7337b3e31b86ec32f9109c615 100644 (file)
@@ -79,6 +79,14 @@ extern int is_audit_feature_set(int which);
 extern int __init audit_register_class(int class, unsigned *list);
 extern int audit_classify_syscall(int abi, unsigned syscall);
 extern int audit_classify_arch(int arch);
+/* only for compat system calls */
+extern unsigned compat_write_class[];
+extern unsigned compat_read_class[];
+extern unsigned compat_dir_class[];
+extern unsigned compat_chattr_class[];
+extern unsigned compat_signal_class[];
+
+extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall);
 
 /* audit_names->type values */
 #define        AUDIT_TYPE_UNKNOWN      0       /* we don't know yet */
@@ -94,6 +102,12 @@ struct filename;
 
 extern void audit_log_session_info(struct audit_buffer *ab);
 
+#ifdef CONFIG_AUDIT_COMPAT_GENERIC
+#define audit_is_compat(arch)  (!((arch) & __AUDIT_ARCH_64BIT))
+#else
+#define audit_is_compat(arch)  false
+#endif
+
 #ifdef CONFIG_AUDITSYSCALL
 /* These are defined in auditsc.c */
                                /* Public API */
index 5a4d39b4686be4fb1f78c06442f29d4d955297d7..bba5508269219a0726ede9f97cc3d22afa97f213 100644 (file)
@@ -216,9 +216,9 @@ static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
 }
 
 #define for_each_bvec(bvl, bio_vec, iter, start)                       \
-       for ((iter) = start;                                            \
-            (bvl) = bvec_iter_bvec((bio_vec), (iter)),                 \
-               (iter).bi_size;                                         \
+       for (iter = (start);                                            \
+            (iter).bi_size &&                                          \
+               ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
             bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
 
 
@@ -388,7 +388,7 @@ struct sg_iovec;
 struct rq_map_data;
 extern struct bio *bio_map_user_iov(struct request_queue *,
                                    struct block_device *,
-                                   struct sg_iovec *, int, int, gfp_t);
+                                   const struct sg_iovec *, int, int, gfp_t);
 extern void bio_unmap_user(struct bio *);
 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
                                gfp_t);
@@ -414,7 +414,8 @@ extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
 extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
                                 unsigned long, unsigned int, int, gfp_t);
 extern struct bio *bio_copy_user_iov(struct request_queue *,
-                                    struct rq_map_data *, struct sg_iovec *,
+                                    struct rq_map_data *,
+                                    const struct sg_iovec *,
                                     int, int, gfp_t);
 extern int bio_uncopy_user(struct bio *);
 void zero_fill_bio(struct bio *bio);
index bbc3a6c88fce3410b954b6c91c407297e2f03e7f..aa0eaa2d0bd85854e231f9fecd56e4f037446d55 100644 (file)
@@ -189,6 +189,7 @@ enum rq_flag_bits {
        __REQ_KERNEL,           /* direct IO to kernel pages */
        __REQ_PM,               /* runtime pm request */
        __REQ_END,              /* last of chain of requests */
+       __REQ_HASHED,           /* on IO scheduler merge hash */
        __REQ_NR_BITS,          /* stops here */
 };
 
@@ -241,5 +242,6 @@ enum rq_flag_bits {
 #define REQ_KERNEL             (1ULL << __REQ_KERNEL)
 #define REQ_PM                 (1ULL << __REQ_PM)
 #define REQ_END                        (1ULL << __REQ_END)
+#define REQ_HASHED             (1ULL << __REQ_HASHED)
 
 #endif /* __LINUX_BLK_TYPES_H */
index 1e1fa3f93d5fc804627108a32737527e2b44b4d5..0d84981ee03fc1c9d7bd5b656611b8b87af696e0 100644 (file)
@@ -118,7 +118,18 @@ struct request {
        struct bio *bio;
        struct bio *biotail;
 
-       struct hlist_node hash; /* merge hash */
+       /*
+        * The hash is used inside the scheduler, and killed once the
+        * request reaches the dispatch list. The ipi_list is only used
+        * to queue the request for softirq completion, which is long
+        * after the request has been unhashed (and even removed from
+        * the dispatch list).
+        */
+       union {
+               struct hlist_node hash; /* merge hash */
+               struct list_head ipi_list;
+       };
+
        /*
         * The rb_node is only used inside the io scheduler, requests
         * are pruned when moved to the dispatch queue. So let the
@@ -824,8 +835,8 @@ extern int blk_rq_map_user(struct request_queue *, struct request *,
 extern int blk_rq_unmap_user(struct bio *);
 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
-                              struct rq_map_data *, struct sg_iovec *, int,
-                              unsigned int, gfp_t);
+                              struct rq_map_data *, const struct sg_iovec *,
+                              int, unsigned int, gfp_t);
 extern int blk_execute_rq(struct request_queue *, struct gendisk *,
                          struct request *, int);
 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
index d77797a52b7bf01ab367c312e457b1e0771107a8..c40302f909ce00910260e2818dd712d658257a77 100644 (file)
@@ -210,8 +210,8 @@ int block_write_full_page(struct page *page, get_block_t *get_block,
 int block_write_full_page_endio(struct page *page, get_block_t *get_block,
                        struct writeback_control *wbc, bh_end_io_t *handler);
 int block_read_full_page(struct page*, get_block_t*);
-int block_is_partially_uptodate(struct page *page, read_descriptor_t *desc,
-                               unsigned long from);
+int block_is_partially_uptodate(struct page *page, unsigned long from,
+                               unsigned long count);
 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len,
                unsigned flags, struct page **pagep, get_block_t *get_block);
 int __block_write_begin(struct page *page, loff_t pos, unsigned len,
index c48e595f623e682cea2b8a8e5d83cd30f5275ad0..5ae5100c1f2457ec9573870f2b90875031255a02 100644 (file)
@@ -455,11 +455,14 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
  *                     FREQUENCY TABLE HELPERS                       *
  *********************************************************************/
 
-#define CPUFREQ_ENTRY_INVALID ~0
-#define CPUFREQ_TABLE_END     ~1
-#define CPUFREQ_BOOST_FREQ    ~2
+/* Special Values of .frequency field */
+#define CPUFREQ_ENTRY_INVALID  ~0
+#define CPUFREQ_TABLE_END      ~1
+/* Special Values of .flags field */
+#define CPUFREQ_BOOST_FREQ     (1 << 0)
 
 struct cpufreq_frequency_table {
+       unsigned int    flags;
        unsigned int    driver_data; /* driver specific data, not used by core */
        unsigned int    frequency; /* kHz - doesn't need to be in ascending
                                    * order */
index c5c92d59e5316820d0ae078fe7ce78e8407f2ede..8300fb87b84ac1329d8facb1d560767c29c549d8 100644 (file)
@@ -341,15 +341,11 @@ enum dma_slave_buswidth {
  * and this struct will then be passed in as an argument to the
  * DMA engine device_control() function.
  *
- * The rationale for adding configuration information to this struct
- * is as follows: if it is likely that most DMA slave controllers in
- * the world will support the configuration option, then make it
- * generic. If not: if it is fixed so that it be sent in static from
- * the platform data, then prefer to do that. Else, if it is neither
- * fixed at runtime, nor generic enough (such as bus mastership on
- * some CPU family and whatnot) then create a custom slave config
- * struct and pass that, then make this config a member of that
- * struct, if applicable.
+ * The rationale for adding configuration information to this struct is as
+ * follows: if it is likely that more than one DMA slave controllers in
+ * the world will support the configuration option, then make it generic.
+ * If not: if it is fixed so that it be sent in static from the platform
+ * data, then prefer to do that.
  */
 struct dma_slave_config {
        enum dma_transfer_direction direction;
index 481ab2345d6be3425be028efaf2c423bd085becc..68b4024184de434895229779c7b8918d9c89bafb 100644 (file)
@@ -1,6 +1,5 @@
 /*
- * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
- * AVR32 systems.)
+ * Driver for the Synopsys DesignWare DMA Controller
  *
  * Copyright (C) 2007 Atmel Corporation
  * Copyright (C) 2010-2011 ST Microelectronics
@@ -44,8 +43,6 @@ struct dw_dma_slave {
  * @nr_masters: Number of AHB masters supported by the controller
  * @data_width: Maximum data width supported by hardware per AHB master
  *             (0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
- * @sd: slave specific data. Used for configuring channels
- * @sd_count: count of slave data structures passed.
  */
 struct dw_dma_platform_data {
        unsigned int    nr_channels;
index 70e8e21c0a303a3db8b7a59ff3b6f511cf225c46..230f87bdf5ad02008ff622e65bc761e41d4b22e0 100644 (file)
@@ -63,8 +63,6 @@ struct file_operations;
 struct vfsmount;
 struct dentry;
 
-extern void __init files_defer_init(void);
-
 #define rcu_dereference_check_fdtable(files, fdtfd) \
        rcu_dereference_check((fdtfd), lockdep_is_held(&(files)->file_lock))
 
index 81048f9bc7837e3ce32fb12dddf158a09fbaf302..7a9c5bca2b7694f5496dbcf793eea2920fd37af9 100644 (file)
@@ -48,6 +48,7 @@ struct cred;
 struct swap_info_struct;
 struct seq_file;
 struct workqueue_struct;
+struct iov_iter;
 
 extern void __init inode_init(void);
 extern void __init inode_init_early(void);
@@ -125,6 +126,8 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
 
 /* File needs atomic accesses to f_pos */
 #define FMODE_ATOMIC_POS       ((__force fmode_t)0x8000)
+/* Write access to underlying fs */
+#define FMODE_WRITER           ((__force fmode_t)0x10000)
 
 /* File was opened by fanotify and shouldn't generate fanotify events */
 #define FMODE_NONOTIFY         ((__force fmode_t)0x1000000)
@@ -293,38 +296,6 @@ struct page;
 struct address_space;
 struct writeback_control;
 
-struct iov_iter {
-       const struct iovec *iov;
-       unsigned long nr_segs;
-       size_t iov_offset;
-       size_t count;
-};
-
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes);
-size_t iov_iter_copy_from_user(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes);
-void iov_iter_advance(struct iov_iter *i, size_t bytes);
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
-size_t iov_iter_single_seg_count(const struct iov_iter *i);
-
-static inline void iov_iter_init(struct iov_iter *i,
-                       const struct iovec *iov, unsigned long nr_segs,
-                       size_t count, size_t written)
-{
-       i->iov = iov;
-       i->nr_segs = nr_segs;
-       i->iov_offset = 0;
-       i->count = count + written;
-
-       iov_iter_advance(i, written);
-}
-
-static inline size_t iov_iter_count(struct iov_iter *i)
-{
-       return i->count;
-}
-
 /*
  * "descriptor" for what we're up to with a read.
  * This allows us to use the same read code yet
@@ -383,7 +354,7 @@ struct address_space_operations {
        int (*migratepage) (struct address_space *,
                        struct page *, struct page *, enum migrate_mode);
        int (*launder_page) (struct page *);
-       int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
+       int (*is_partially_uptodate) (struct page *, unsigned long,
                                        unsigned long);
        void (*is_dirty_writeback) (struct page *, bool *, bool *);
        int (*error_remove_page)(struct address_space *, struct page *);
@@ -770,9 +741,6 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index)
                index <  ra->start + ra->size);
 }
 
-#define FILE_MNT_WRITE_TAKEN   1
-#define FILE_MNT_WRITE_RELEASED        2
-
 struct file {
        union {
                struct llist_node       fu_llist;
@@ -810,9 +778,6 @@ struct file {
        struct list_head        f_tfile_llink;
 #endif /* #ifdef CONFIG_EPOLL */
        struct address_space    *f_mapping;
-#ifdef CONFIG_DEBUG_WRITECOUNT
-       unsigned long f_mnt_write_state;
-#endif
 } __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
 
 struct file_handle {
@@ -830,49 +795,6 @@ static inline struct file *get_file(struct file *f)
 #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1)
 #define file_count(x)  atomic_long_read(&(x)->f_count)
 
-#ifdef CONFIG_DEBUG_WRITECOUNT
-static inline void file_take_write(struct file *f)
-{
-       WARN_ON(f->f_mnt_write_state != 0);
-       f->f_mnt_write_state = FILE_MNT_WRITE_TAKEN;
-}
-static inline void file_release_write(struct file *f)
-{
-       f->f_mnt_write_state |= FILE_MNT_WRITE_RELEASED;
-}
-static inline void file_reset_write(struct file *f)
-{
-       f->f_mnt_write_state = 0;
-}
-static inline void file_check_state(struct file *f)
-{
-       /*
-        * At this point, either both or neither of these bits
-        * should be set.
-        */
-       WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN);
-       WARN_ON(f->f_mnt_write_state == FILE_MNT_WRITE_RELEASED);
-}
-static inline int file_check_writeable(struct file *f)
-{
-       if (f->f_mnt_write_state == FILE_MNT_WRITE_TAKEN)
-               return 0;
-       printk(KERN_WARNING "writeable file with no "
-                           "mnt_want_write()\n");
-       WARN_ON(1);
-       return -EINVAL;
-}
-#else /* !CONFIG_DEBUG_WRITECOUNT */
-static inline void file_take_write(struct file *filp) {}
-static inline void file_release_write(struct file *filp) {}
-static inline void file_reset_write(struct file *filp) {}
-static inline void file_check_state(struct file *filp) {}
-static inline int file_check_writeable(struct file *filp)
-{
-       return 0;
-}
-#endif /* CONFIG_DEBUG_WRITECOUNT */
-
 #define        MAX_NON_LFS     ((1UL<<31) - 1)
 
 /* Page cache limit. The filesystems should put that into their s_maxbytes 
@@ -2481,16 +2403,13 @@ extern int generic_file_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
 extern int generic_file_remap_pages(struct vm_area_struct *, unsigned long addr,
                unsigned long size, pgoff_t pgoff);
-extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
 int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk);
 extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t);
-extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long,
-               loff_t *);
+extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long);
 extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t);
 extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *,
-               unsigned long *, loff_t, loff_t *, size_t, size_t);
-extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *,
-               unsigned long, loff_t, loff_t *, size_t, ssize_t);
+               unsigned long *, loff_t, size_t, size_t);
+extern ssize_t generic_perform_write(struct file *, struct iov_iter *, loff_t);
 extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
 extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
 extern int generic_segment_checks(const struct iovec *iov,
@@ -2582,7 +2501,7 @@ extern const struct file_operations generic_ro_fops;
 
 #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
 
-extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
+extern int readlink_copy(char __user *, int, const char *);
 extern int page_readlink(struct dentry *, char __user *, int);
 extern void *page_follow_link_light(struct dentry *, struct nameidata *);
 extern void page_put_link(struct dentry *, struct nameidata *, void *);
index cdc30111d2f8d0b1b16c832d4aeb481ea4e68e80..d16da3e53bc732710e7ea05e3e500d0f1784a94c 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
 #include <linux/perf_event.h>
+#include <linux/tracepoint.h>
 
 struct trace_array;
 struct trace_buffer;
@@ -232,6 +233,7 @@ enum {
        TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
        TRACE_EVENT_FL_WAS_ENABLED_BIT,
        TRACE_EVENT_FL_USE_CALL_FILTER_BIT,
+       TRACE_EVENT_FL_TRACEPOINT_BIT,
 };
 
 /*
@@ -244,6 +246,7 @@ enum {
  *                    (used for module unloading, if a module event is enabled,
  *                     it is best to clear the buffers that used it).
  *  USE_CALL_FILTER - For ftrace internal events, don't use file filter
+ *  TRACEPOINT    - Event is a tracepoint
  */
 enum {
        TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
@@ -252,12 +255,17 @@ enum {
        TRACE_EVENT_FL_IGNORE_ENABLE    = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
        TRACE_EVENT_FL_WAS_ENABLED      = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
        TRACE_EVENT_FL_USE_CALL_FILTER  = (1 << TRACE_EVENT_FL_USE_CALL_FILTER_BIT),
+       TRACE_EVENT_FL_TRACEPOINT       = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
 };
 
 struct ftrace_event_call {
        struct list_head        list;
        struct ftrace_event_class *class;
-       char                    *name;
+       union {
+               char                    *name;
+               /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
+               struct tracepoint       *tp;
+       };
        struct trace_event      event;
        const char              *print_fmt;
        struct event_filter     *filter;
@@ -271,6 +279,7 @@ struct ftrace_event_call {
         *   bit 3:             ftrace internal event (do not enable)
         *   bit 4:             Event was enabled by module
         *   bit 5:             use call filter rather than file filter
+        *   bit 6:             Event is a tracepoint
         */
        int                     flags; /* static flags of different events */
 
@@ -283,6 +292,15 @@ struct ftrace_event_call {
 #endif
 };
 
+static inline const char *
+ftrace_event_name(struct ftrace_event_call *call)
+{
+       if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
+               return call->tp ? call->tp->name : NULL;
+       else
+               return call->name;
+}
+
 struct trace_array;
 struct ftrace_subsystem_dir;
 
@@ -353,7 +371,7 @@ struct ftrace_event_file {
 #define __TRACE_EVENT_FLAGS(name, value)                               \
        static int __init trace_init_flags_##name(void)                 \
        {                                                               \
-               event_##name.flags = value;                             \
+               event_##name.flags |= value;                            \
                return 0;                                               \
        }                                                               \
        early_initcall(trace_init_flags_##name);
index ac39d910e70bda7c209d783dc6f01464a836e08a..a326c850f0468ad415ad6ab014a470db452b5b03 100644 (file)
@@ -104,6 +104,9 @@ enum {
        MC13892_LED_R,
        MC13892_LED_G,
        MC13892_LED_B,
+       /* MC34708 LED IDs */
+       MC34708_LED_R,
+       MC34708_LED_G,
 };
 
 struct mc13xxx_led_platform_data {
@@ -163,6 +166,9 @@ struct mc13xxx_leds_platform_data {
 #define MC13892_LED_C2_CURRENT_G(x)    (((x) & 0x7) << 21)
 /* MC13892 LED Control 3 */
 #define MC13892_LED_C3_CURRENT_B(x)    (((x) & 0x7) << 9)
+/* MC34708 LED Control 0 */
+#define MC34708_LED_C0_CURRENT_R(x)    (((x) & 0x3) << 9)
+#define MC34708_LED_C0_CURRENT_G(x)    (((x) & 0x3) << 21)
        u32 led_control[MAX_LED_CONTROL_REGS];
 };
 
index abc848412e3c1c9cf34023690c9e80ecd00ab21d..bf9811e1321a5ae80eceeba7664f7d677da9648e 100644 (file)
@@ -1204,6 +1204,7 @@ void account_page_writeback(struct page *page);
 int set_page_dirty(struct page *page);
 int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
+int get_cmdline(struct task_struct *task, char *buffer, int buflen);
 
 /* Is the vma a continuation of the stack vma above it? */
 static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
index 371d346fa270dbfe7d8d3ac4a7849ab9cf6b5f77..839bac2709048da4d3f2d5099da1db618adcc729 100644 (file)
@@ -44,6 +44,8 @@ struct mnt_namespace;
 #define MNT_SHARED_MASK        (MNT_UNBINDABLE)
 #define MNT_PROPAGATION_MASK   (MNT_SHARED | MNT_UNBINDABLE)
 
+#define MNT_INTERNAL_FLAGS (MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL | \
+                           MNT_DOOMED | MNT_SYNC_UMOUNT | MNT_MARKED)
 
 #define MNT_INTERNAL   0x4000
 
@@ -51,6 +53,7 @@ struct mnt_namespace;
 #define MNT_LOCKED             0x800000
 #define MNT_DOOMED             0x1000000
 #define MNT_SYNC_UMOUNT                0x2000000
+#define MNT_MARKED             0x4000000
 
 struct vfsmount {
        struct dentry *mnt_root;        /* root of the mounted tree */
index ae4981ebd18eaacacafb0cf34b040f67c2c3b729..f62f78aef4ac32e315fb3fe705dd4364e957158a 100644 (file)
@@ -24,8 +24,7 @@ struct request;
 struct nbd_device {
        int flags;
        int harderror;          /* Code of hard error                   */
-       struct socket * sock;
-       struct file * file;     /* If == NULL, device is not ready, yet */
+       struct socket * sock;   /* If == NULL, device is not ready, yet */
        int magic;
 
        spinlock_t queue_lock;
index f6a15205853b228e89b5ca6233638f7f4a421cc0..9ac1a62fc6f5a1085b69c0acd717d6231a628ac3 100644 (file)
@@ -50,8 +50,13 @@ struct ntb_transport_qp;
 
 struct ntb_client {
        struct device_driver driver;
-       int (*probe) (struct pci_dev *pdev);
-       void (*remove) (struct pci_dev *pdev);
+       int (*probe)(struct pci_dev *pdev);
+       void (*remove)(struct pci_dev *pdev);
+};
+
+enum {
+       NTB_LINK_DOWN = 0,
+       NTB_LINK_UP,
 };
 
 int ntb_register_client(struct ntb_client *drvr);
@@ -60,11 +65,11 @@ int ntb_register_client_dev(char *device_name);
 void ntb_unregister_client_dev(char *device_name);
 
 struct ntb_queue_handlers {
-       void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
-                           void *data, int len);
-       void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
-                           void *data, int len);
-       void (*event_handler) (void *data, int status);
+       void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+                          void *data, int len);
+       void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
+                          void *data, int len);
+       void (*event_handler)(void *data, int status);
 };
 
 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
index 6b9aafed225fcd9a48228ac9f6044c7a956cc325..a50173ca1d729aba84bc00c2572b40a91ec93140 100644 (file)
@@ -66,20 +66,25 @@ enum {
 
 #define NVME_VS(major, minor)  (major << 16 | minor)
 
-#define NVME_IO_TIMEOUT        (5 * HZ)
+extern unsigned char io_timeout;
+#define NVME_IO_TIMEOUT        (io_timeout * HZ)
 
 /*
  * Represents an NVM Express device.  Each nvme_dev is a PCI function.
  */
 struct nvme_dev {
        struct list_head node;
-       struct nvme_queue **queues;
+       struct nvme_queue __rcu **queues;
+       unsigned short __percpu *io_queue;
        u32 __iomem *dbs;
        struct pci_dev *pci_dev;
        struct dma_pool *prp_page_pool;
        struct dma_pool *prp_small_pool;
        int instance;
-       int queue_count;
+       unsigned queue_count;
+       unsigned online_queues;
+       unsigned max_qid;
+       int q_depth;
        u32 db_stride;
        u32 ctrl_config;
        struct msix_entry *entry;
@@ -89,6 +94,7 @@ struct nvme_dev {
        struct miscdevice miscdev;
        work_func_t reset_workfn;
        struct work_struct reset_work;
+       struct notifier_block nb;
        char name[12];
        char serial[20];
        char model[40];
@@ -131,6 +137,7 @@ struct nvme_iod {
        int length;             /* Of data, in bytes */
        unsigned long start_time;
        dma_addr_t first_dma;
+       struct list_head node;
        struct scatterlist sg[0];
 };
 
@@ -146,16 +153,12 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
  */
 void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
 
-int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
-                       struct nvme_iod *iod, int total_len, gfp_t gfp);
+int nvme_setup_prps(struct nvme_dev *, struct nvme_iod *, int , gfp_t);
 struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
                                unsigned long addr, unsigned length);
 void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
                        struct nvme_iod *iod);
-struct nvme_queue *get_nvmeq(struct nvme_dev *dev);
-void put_nvmeq(struct nvme_queue *nvmeq);
-int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
-                                               u32 *result, unsigned timeout);
+int nvme_submit_io_cmd(struct nvme_dev *, struct nvme_command *, u32 *);
 int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
 int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
                                                        u32 *result);
index 4d9389c79e61b4abe20666aa9f9c89d676a8c6b1..eb8b8ac6df3c844e2bd84903e0a50ff07f1575fe 100644 (file)
@@ -82,23 +82,6 @@ struct pipe_buf_operations {
         */
        int can_merge;
 
-       /*
-        * ->map() returns a virtual address mapping of the pipe buffer.
-        * The last integer flag reflects whether this should be an atomic
-        * mapping or not. The atomic map is faster, however you can't take
-        * page faults before calling ->unmap() again. So if you need to eg
-        * access user data through copy_to/from_user(), then you must get
-        * a non-atomic map. ->map() uses the kmap_atomic slot for
-        * atomic maps, you have to be careful if mapping another page as
-        * source or destination for a copy.
-        */
-       void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
-
-       /*
-        * Undoes ->map(), finishes the virtual mapping of the pipe buffer.
-        */
-       void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
-
        /*
         * ->confirm() verifies that the data in the pipe buffer is there
         * and that the contents are good. If the pages in the pipe belong
@@ -150,8 +133,6 @@ struct pipe_inode_info *alloc_pipe_info(void);
 void free_pipe_info(struct pipe_inode_info *);
 
 /* Generic pipe buffer ops functions */
-void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
-void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
 void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
diff --git a/include/linux/platform_data/dma-rcar-audmapp.h b/include/linux/platform_data/dma-rcar-audmapp.h
new file mode 100644 (file)
index 0000000..471fffe
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * This is for Renesas R-Car Audio-DMAC-peri-peri.
+ *
+ * Copyright (C) 2014 Renesas Electronics Corporation
+ * Copyright (C) 2014 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This file is based on the include/linux/sh_dma.h
+ *
+ * Header for the new SH dmaengine driver
+ *
+ * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef SH_AUDMAPP_H
+#define SH_AUDMAPP_H
+
+#include <linux/dmaengine.h>
+
+struct audmapp_slave_config {
+       int             slave_id;
+       dma_addr_t      src;
+       dma_addr_t      dst;
+       u32             chcr;
+};
+
+struct audmapp_pdata {
+       struct audmapp_slave_config *slave;
+       int slave_num;
+};
+
+#endif /* SH_AUDMAPP_H */
index d8a7672519b615d1e9f856d6ad364feffd87cc07..441a6f290649703f30d7cab76f47a4fac7bc4515 100644 (file)
@@ -1,5 +1,4 @@
-/* arch/arm/mach-s3c2410/include/mach/leds-gpio.h
- *
+/*
  * Copyright (c) 2006 Simtec Electronics
  *     http://armlinux.simtec.co.uk/
  *     Ben Dooks <ben@simtec.co.uk>
@@ -11,8 +10,8 @@
  * published by the Free Software Foundation.
 */
 
-#ifndef __ASM_ARCH_LEDSGPIO_H
-#define __ASM_ARCH_LEDSGPIO_H "leds-gpio.h"
+#ifndef __LEDS_S3C24XX_H
+#define __LEDS_S3C24XX_H
 
 #define S3C24XX_LEDF_ACTLOW    (1<<0)          /* LED is on when GPIO low */
 #define S3C24XX_LEDF_TRISTATE  (1<<1)          /* tristate to turn off */
@@ -25,4 +24,4 @@ struct s3c24xx_led_platdata {
        char                    *def_trigger;
 };
 
-#endif /* __ASM_ARCH_LEDSGPIO_H */
+#endif /* __LEDS_S3C24XX_H */
index 075b3056c0c00692e6f23cf17d45349323a9d5ec..25f54c79f75772a9f133c585e17a2d8e4a59e8ac 100644 (file)
@@ -1719,6 +1719,24 @@ static inline pid_t task_tgid_vnr(struct task_struct *tsk)
 }
 
 
+static inline int pid_alive(const struct task_struct *p);
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+       pid_t pid = 0;
+
+       rcu_read_lock();
+       if (pid_alive(tsk))
+               pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+       rcu_read_unlock();
+
+       return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+       return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
                                        struct pid_namespace *ns)
 {
@@ -1758,7 +1776,7 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
  *
  * Return: 1 if the process is alive. 0 otherwise.
  */
-static inline int pid_alive(struct task_struct *p)
+static inline int pid_alive(const struct task_struct *p)
 {
        return p->pids[PIDTYPE_PID].pid != NULL;
 }
index 697ceb70a9a9107689c1fc98f8599552314a9fd8..a4a0588c5397f5e0057a3565a45a60ed3d92c4d9 100644 (file)
@@ -119,8 +119,10 @@ extern struct trace_event_functions exit_syscall_print_funcs;
        static struct syscall_metadata __syscall_meta_##sname;          \
        static struct ftrace_event_call __used                          \
          event_enter_##sname = {                                       \
-               .name                   = "sys_enter"#sname,            \
                .class                  = &event_class_syscall_enter,   \
+               {                                                       \
+                       .name                   = "sys_enter"#sname,    \
+               },                                                      \
                .event.funcs            = &enter_syscall_print_funcs,   \
                .data                   = (void *)&__syscall_meta_##sname,\
                .flags                  = TRACE_EVENT_FL_CAP_ANY,       \
@@ -133,8 +135,10 @@ extern struct trace_event_functions exit_syscall_print_funcs;
        static struct syscall_metadata __syscall_meta_##sname;          \
        static struct ftrace_event_call __used                          \
          event_exit_##sname = {                                        \
-               .name                   = "sys_exit"#sname,             \
                .class                  = &event_class_syscall_exit,    \
+               {                                                       \
+                       .name                   = "sys_exit"#sname,     \
+               },                                                      \
                .event.funcs            = &exit_syscall_print_funcs,    \
                .data                   = (void *)&__syscall_meta_##sname,\
                .flags                  = TRACE_EVENT_FL_CAP_ANY,       \
index 812b2553dfd84c78d4c4eab4e580b554e0edf2ab..9d30ee469c2aed19e9444de08ba37b0978bd1a27 100644 (file)
@@ -6,7 +6,7 @@
  *
  * See Documentation/trace/tracepoints.txt.
  *
- * (C) Copyright 2008 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
+ * Copyright (C) 2008-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
  * Heavily inspired from the Linux Kernel Markers.
  *
@@ -21,6 +21,7 @@
 
 struct module;
 struct tracepoint;
+struct notifier_block;
 
 struct tracepoint_func {
        void *func;
@@ -35,31 +36,38 @@ struct tracepoint {
        struct tracepoint_func __rcu *funcs;
 };
 
-/*
- * Connect a probe to a tracepoint.
- * Internal API, should not be used directly.
- */
-extern int tracepoint_probe_register(const char *name, void *probe, void *data);
-
-/*
- * Disconnect a probe from a tracepoint.
- * Internal API, should not be used directly.
- */
 extern int
-tracepoint_probe_unregister(const char *name, void *probe, void *data);
+tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
+extern int
+tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data);
+extern void
+for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
+               void *priv);
 
 #ifdef CONFIG_MODULES
 struct tp_module {
        struct list_head list;
-       unsigned int num_tracepoints;
-       struct tracepoint * const *tracepoints_ptrs;
+       struct module *mod;
 };
+
 bool trace_module_has_bad_taint(struct module *mod);
+extern int register_tracepoint_module_notifier(struct notifier_block *nb);
+extern int unregister_tracepoint_module_notifier(struct notifier_block *nb);
 #else
 static inline bool trace_module_has_bad_taint(struct module *mod)
 {
        return false;
 }
+static inline
+int register_tracepoint_module_notifier(struct notifier_block *nb)
+{
+       return 0;
+}
+static inline
+int unregister_tracepoint_module_notifier(struct notifier_block *nb)
+{
+       return 0;
+}
 #endif /* CONFIG_MODULES */
 
 /*
@@ -72,6 +80,11 @@ static inline void tracepoint_synchronize_unregister(void)
        synchronize_sched();
 }
 
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+extern void syscall_regfunc(void);
+extern void syscall_unregfunc(void);
+#endif /* CONFIG_HAVE_SYSCALL_TRACEPOINTS */
+
 #define PARAMS(args...) args
 
 #endif /* _LINUX_TRACEPOINT_H */
@@ -160,14 +173,14 @@ static inline void tracepoint_synchronize_unregister(void)
        static inline int                                               \
        register_trace_##name(void (*probe)(data_proto), void *data)    \
        {                                                               \
-               return tracepoint_probe_register(#name, (void *)probe,  \
-                                                data);                 \
+               return tracepoint_probe_register(&__tracepoint_##name,  \
+                                               (void *)probe, data);   \
        }                                                               \
        static inline int                                               \
        unregister_trace_##name(void (*probe)(data_proto), void *data)  \
        {                                                               \
-               return tracepoint_probe_unregister(#name, (void *)probe, \
-                                                  data);               \
+               return tracepoint_probe_unregister(&__tracepoint_##name,\
+                                               (void *)probe, data);   \
        }                                                               \
        static inline void                                              \
        check_trace_callback_type_##name(void (*cb)(data_proto))        \
index c55ce243cc0985c450786e4cb63f8ed3c8e5c53b..199bcc34241ba0155a367f11d05edf5d9c138a02 100644 (file)
@@ -9,14 +9,23 @@
 #ifndef __LINUX_UIO_H
 #define __LINUX_UIO_H
 
+#include <linux/kernel.h>
 #include <uapi/linux/uio.h>
 
+struct page;
 
 struct kvec {
        void *iov_base; /* and that should *never* hold a userland pointer */
        size_t iov_len;
 };
 
+struct iov_iter {
+       const struct iovec *iov;
+       unsigned long nr_segs;
+       size_t iov_offset;
+       size_t count;
+};
+
 /*
  * Total number of bytes covered by an iovec.
  *
@@ -34,8 +43,51 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
        return ret;
 }
 
+static inline struct iovec iov_iter_iovec(const struct iov_iter *iter)
+{
+       return (struct iovec) {
+               .iov_base = iter->iov->iov_base + iter->iov_offset,
+               .iov_len = min(iter->count,
+                              iter->iov->iov_len - iter->iov_offset),
+       };
+}
+
+#define iov_for_each(iov, iter, start)                         \
+       for (iter = (start);                                    \
+            (iter).count &&                                    \
+            ((iov = iov_iter_iovec(&(iter))), 1);              \
+            iov_iter_advance(&(iter), (iov).iov_len))
+
 unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
 
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes);
+size_t iov_iter_copy_from_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes);
+void iov_iter_advance(struct iov_iter *i, size_t bytes);
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes);
+size_t iov_iter_single_seg_count(const struct iov_iter *i);
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+                        struct iov_iter *i);
+
+static inline void iov_iter_init(struct iov_iter *i,
+                       const struct iovec *iov, unsigned long nr_segs,
+                       size_t count, size_t written)
+{
+       i->iov = iov;
+       i->nr_segs = nr_segs;
+       i->iov_offset = 0;
+       i->count = count + written;
+
+       iov_iter_advance(i, written);
+}
+
+static inline size_t iov_iter_count(struct iov_iter *i)
+{
+       return i->count;
+}
+
 int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
 int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
+
 #endif
index 0b9f890ce43162e572b3ac55d8cd72e8166f893c..fde142e5f25a430de2cc9fc5bdff691faefae448 100644 (file)
@@ -60,6 +60,7 @@ enum rc_filter_type {
 /**
  * struct rc_dev - represents a remote control device
  * @dev: driver model's view of this device
+ * @sysfs_groups: sysfs attribute groups
  * @input_name: name of the input child device
  * @input_phys: physical path to the input child device
  * @input_id: id of the input child device (struct input_id)
@@ -112,10 +113,12 @@ enum rc_filter_type {
  *     device doesn't interrupt host until it sees IR pulses
  * @s_learning_mode: enable wide band receiver used for learning
  * @s_carrier_report: enable carrier reports
- * @s_filter: set the scancode filter of a given type
+ * @s_filter: set the scancode filter 
+ * @s_wakeup_filter: set the wakeup scancode filter
  */
 struct rc_dev {
        struct device                   dev;
+       const struct attribute_group    *sysfs_groups[5];
        const char                      *input_name;
        const char                      *input_phys;
        struct input_id                 input_id;
@@ -159,8 +162,9 @@ struct rc_dev {
        int                             (*s_learning_mode)(struct rc_dev *dev, int enable);
        int                             (*s_carrier_report) (struct rc_dev *dev, int enable);
        int                             (*s_filter)(struct rc_dev *dev,
-                                                   enum rc_filter_type type,
                                                    struct rc_scancode_filter *filter);
+       int                             (*s_wakeup_filter)(struct rc_dev *dev,
+                                                          struct rc_scancode_filter *filter);
 };
 
 #define to_rc_dev(d) container_of(d, struct rc_dev, dev)
index c38a005bd0cf9d84a09216cd6f47102e71bb2972..6fab66c5c5af6356318e52c92703bdb3ea37a3d4 100644 (file)
@@ -67,7 +67,6 @@ enum p9_trans_status {
  * @REQ_STATUS_ALLOC: request has been allocated but not sent
  * @REQ_STATUS_UNSENT: request waiting to be sent
  * @REQ_STATUS_SENT: request sent to server
- * @REQ_STATUS_FLSH: a flush has been sent for this request
  * @REQ_STATUS_RCVD: response received from server
  * @REQ_STATUS_FLSHD: request has been flushed
  * @REQ_STATUS_ERROR: request encountered an error on the client side
@@ -83,7 +82,6 @@ enum p9_req_status_t {
        REQ_STATUS_ALLOC,
        REQ_STATUS_UNSENT,
        REQ_STATUS_SENT,
-       REQ_STATUS_FLSH,
        REQ_STATUS_RCVD,
        REQ_STATUS_FLSHD,
        REQ_STATUS_ERROR,
@@ -130,7 +128,6 @@ struct p9_req_t {
  * @proto_version: 9P protocol version to use
  * @trans_mod: module API instantiated with this client
  * @trans: tranport instance state and API
- * @conn: connection state information used by trans_fd
  * @fidpool: fid handle accounting for session
  * @fidlist: List of active fid handles
  * @tagpool - transaction id accounting for session
@@ -159,7 +156,6 @@ struct p9_client {
        struct p9_trans_module *trans_mod;
        enum p9_trans_status status;
        void *trans;
-       struct p9_conn *conn;
 
        struct p9_idpool *fidpool;
        struct list_head fidlist;
@@ -261,7 +257,7 @@ int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode,
 int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status);
 int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *fl);
 struct p9_req_t *p9_tag_lookup(struct p9_client *, u16);
-void p9_client_cb(struct p9_client *c, struct p9_req_t *req);
+void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status);
 
 int p9_parse_header(struct p9_fcall *, int32_t *, int8_t *, int16_t *, int);
 int p9stat_read(struct p9_client *, char *, int, struct p9_wstat *);
index 9a36d929711482da1f4090fb9e51c4ab9a261ffd..d9fa68f26c41c34c33db5f743a4142faf7886792 100644 (file)
@@ -40,6 +40,8 @@
  * @close: member function to discard a connection on this transport
  * @request: member function to issue a request to the transport
  * @cancel: member function to cancel a request (if it hasn't been sent)
+ * @cancelled: member function to notify that a cancelled request will not
+ *             not receive a reply
  *
  * This is the basic API for a transport module which is registered by the
  * transport module with the 9P core network module and used by the client
@@ -58,6 +60,7 @@ struct p9_trans_module {
        void (*close) (struct p9_client *);
        int (*request) (struct p9_client *, struct p9_req_t *req);
        int (*cancel) (struct p9_client *, struct p9_req_t *req);
+       int (*cancelled)(struct p9_client *, struct p9_req_t *req);
        int (*zc_request)(struct p9_client *, struct p9_req_t *,
                          char *, char *, int , int, int, int);
 };
index 4e845b80efd33464c719da6f56f7c36a3957a6a1..5853c913d2b0bbd481b0c18dbc23bf015a45962c 100644 (file)
@@ -423,11 +423,11 @@ extern int scsi_is_target_device(const struct device *);
 extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
                        int data_direction, void *buffer, unsigned bufflen,
                        unsigned char *sense, int timeout, int retries,
-                       int flag, int *resid);
+                       u64 flags, int *resid);
 extern int scsi_execute_req_flags(struct scsi_device *sdev,
        const unsigned char *cmd, int data_direction, void *buffer,
        unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
-       int retries, int *resid, int flags);
+       int retries, int *resid, u64 flags);
 static inline int scsi_execute_req(struct scsi_device *sdev,
        const unsigned char *cmd, int data_direction, void *buffer,
        unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
index f862cfff5f6a618daa1057836d30c9a73e0b5653..0b6a1876639ba3a9cfe5d656659a2b92279c80bb 100644 (file)
 
 struct snd_pcm_substream;
 
+int snd_cs8427_init(struct snd_i2c_bus *bus, struct snd_i2c_device *device);
 int snd_cs8427_create(struct snd_i2c_bus *bus, unsigned char addr,
                      unsigned int reset_timeout, struct snd_i2c_device **r_cs8427);
 int snd_cs8427_reg_write(struct snd_i2c_device *device, unsigned char reg,
index 4483fadfa68d8fdc744743d6bc5bcd52de301608..33b487b5da92dc76f549df5d09a3852518c14159 100644 (file)
@@ -21,6 +21,8 @@ struct iscsit_transport {
        int (*iscsit_get_dataout)(struct iscsi_conn *, struct iscsi_cmd *, bool);
        int (*iscsit_queue_data_in)(struct iscsi_conn *, struct iscsi_cmd *);
        int (*iscsit_queue_status)(struct iscsi_conn *, struct iscsi_cmd *);
+       void (*iscsit_aborted_task)(struct iscsi_conn *, struct iscsi_cmd *);
+       enum target_prot_op (*iscsit_get_sup_prot_ops)(struct iscsi_conn *);
 };
 
 static inline void *iscsit_priv_cmd(struct iscsi_cmd *cmd)
index 7020e33e742e595c50d51aa57e83f546c5e57a50..3a1c1eea1fffcaee767d4bef1a8ea2700f1884e2 100644 (file)
@@ -73,10 +73,12 @@ sense_reason_t sbc_execute_unmap(struct se_cmd *cmd,
        sense_reason_t (*do_unmap_fn)(struct se_cmd *cmd, void *priv,
                                      sector_t lba, sector_t nolb),
        void *priv);
+void   sbc_dif_generate(struct se_cmd *);
 sense_reason_t sbc_dif_verify_write(struct se_cmd *, sector_t, unsigned int,
                                     unsigned int, struct scatterlist *, int);
 sense_reason_t sbc_dif_verify_read(struct se_cmd *, sector_t, unsigned int,
                                    unsigned int, struct scatterlist *, int);
+sense_reason_t sbc_dif_read_strip(struct se_cmd *);
 
 void   transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
 int    transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
index 1772fadcff62aaa8ab1e3ab2b4a7aef15de87fa7..9ec9864ecf38629469ef0823baf153c33fb09472 100644 (file)
@@ -162,7 +162,7 @@ enum se_cmd_flags_table {
        SCF_SENT_CHECK_CONDITION        = 0x00000800,
        SCF_OVERFLOW_BIT                = 0x00001000,
        SCF_UNDERFLOW_BIT               = 0x00002000,
-       SCF_SENT_DELAYED_TAS            = 0x00004000,
+       SCF_SEND_DELAYED_TAS            = 0x00004000,
        SCF_ALUA_NON_OPTIMIZED          = 0x00008000,
        SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
        SCF_ACK_KREF                    = 0x00040000,
@@ -442,19 +442,18 @@ struct se_tmr_req {
 };
 
 enum target_prot_op {
-       TARGET_PROT_NORMAL = 0,
-       TARGET_PROT_DIN_INSERT,
-       TARGET_PROT_DOUT_INSERT,
-       TARGET_PROT_DIN_STRIP,
-       TARGET_PROT_DOUT_STRIP,
-       TARGET_PROT_DIN_PASS,
-       TARGET_PROT_DOUT_PASS,
+       TARGET_PROT_NORMAL      = 0,
+       TARGET_PROT_DIN_INSERT  = (1 << 0),
+       TARGET_PROT_DOUT_INSERT = (1 << 1),
+       TARGET_PROT_DIN_STRIP   = (1 << 2),
+       TARGET_PROT_DOUT_STRIP  = (1 << 3),
+       TARGET_PROT_DIN_PASS    = (1 << 4),
+       TARGET_PROT_DOUT_PASS   = (1 << 5),
 };
 
-enum target_prot_ho {
-       PROT_SEPERATED,
-       PROT_INTERLEAVED,
-};
+#define TARGET_PROT_ALL        TARGET_PROT_DIN_INSERT | TARGET_PROT_DOUT_INSERT | \
+                       TARGET_PROT_DIN_STRIP | TARGET_PROT_DOUT_STRIP | \
+                       TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS
 
 enum target_prot_type {
        TARGET_DIF_TYPE0_PROT,
@@ -463,6 +462,12 @@ enum target_prot_type {
        TARGET_DIF_TYPE3_PROT,
 };
 
+enum target_core_dif_check {
+       TARGET_DIF_CHECK_GUARD  = 0x1 << 0,
+       TARGET_DIF_CHECK_APPTAG = 0x1 << 1,
+       TARGET_DIF_CHECK_REFTAG = 0x1 << 2,
+};
+
 struct se_dif_v1_tuple {
        __be16                  guard_tag;
        __be16                  app_tag;
@@ -556,13 +561,14 @@ struct se_cmd {
        /* DIF related members */
        enum target_prot_op     prot_op;
        enum target_prot_type   prot_type;
+       u8                      prot_checks;
        u32                     prot_length;
        u32                     reftag_seed;
        struct scatterlist      *t_prot_sg;
        unsigned int            t_prot_nents;
-       enum target_prot_ho     prot_handover;
        sense_reason_t          pi_err;
        sector_t                bad_sector;
+       bool                    prot_pto;
 };
 
 struct se_ua {
@@ -603,6 +609,7 @@ struct se_node_acl {
 struct se_session {
        unsigned                sess_tearing_down:1;
        u64                     sess_bin_isid;
+       enum target_prot_op     sup_prot_ops;
        struct se_node_acl      *se_node_acl;
        struct se_portal_group *se_tpg;
        void                    *fabric_sess_ptr;
index 0218d689b3d787d9d98ecf507de7b4caba6a3146..22a4e98eec807ecaa93a735ad63fd05ef26809a9 100644 (file)
@@ -62,6 +62,7 @@ struct target_core_fabric_ops {
        int (*queue_data_in)(struct se_cmd *);
        int (*queue_status)(struct se_cmd *);
        void (*queue_tm_rsp)(struct se_cmd *);
+       void (*aborted_task)(struct se_cmd *);
        /*
         * fabric module calls for target_core_fabric_configfs.c
         */
@@ -83,10 +84,11 @@ struct target_core_fabric_ops {
        void (*fabric_drop_nodeacl)(struct se_node_acl *);
 };
 
-struct se_session *transport_init_session(void);
+struct se_session *transport_init_session(enum target_prot_op);
 int transport_alloc_session_tags(struct se_session *, unsigned int,
                unsigned int);
-struct se_session *transport_init_session_tags(unsigned int, unsigned int);
+struct se_session *transport_init_session_tags(unsigned int, unsigned int,
+               enum target_prot_op);
 void   __transport_register_session(struct se_portal_group *,
                struct se_node_acl *, struct se_session *, void *);
 void   transport_register_session(struct se_portal_group *,
index 5a4c04a75b3d369fc9665eca1deee12a2d442d61..14e49c7981359ccdac1e2d9d87a3c284e7c682f3 100644 (file)
@@ -13,9 +13,6 @@
 
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
 
-extern void syscall_regfunc(void);
-extern void syscall_unregfunc(void);
-
 TRACE_EVENT_FN(sys_enter,
 
        TP_PROTO(struct pt_regs *regs, long id),
index 8765126b328ce4220d3c61af8c655f0226f29564..0a1a4f7caf095154c43aa901de0eca61be2b0208 100644 (file)
@@ -470,10 +470,13 @@ static inline notrace int ftrace_get_offsets_##call(                      \
  * };
  *
  * static struct ftrace_event_call event_<call> = {
- *     .name                   = "<call>",
  *     .class                  = event_class_<template>,
+ *     {
+ *             .tp                     = &__tracepoint_<call>,
+ *     },
  *     .event                  = &ftrace_event_type_<call>,
  *     .print_fmt              = print_fmt_<call>,
+ *     .flags                  = TRACE_EVENT_FL_TRACEPOINT,
  * };
  * // its only safe to use pointers when doing linker tricks to
  * // create an array.
@@ -605,10 +608,13 @@ static struct ftrace_event_class __used __refdata event_class_##call = { \
 #define DEFINE_EVENT(template, call, proto, args)                      \
                                                                        \
 static struct ftrace_event_call __used event_##call = {                        \
-       .name                   = #call,                                \
        .class                  = &event_class_##template,              \
+       {                                                               \
+               .tp                     = &__tracepoint_##call,         \
+       },                                                              \
        .event.funcs            = &ftrace_event_type_funcs_##template,  \
        .print_fmt              = print_fmt_##template,                 \
+       .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
 };                                                                     \
 static struct ftrace_event_call __used                                 \
 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
@@ -619,10 +625,13 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
 static const char print_fmt_##call[] = print;                          \
                                                                        \
 static struct ftrace_event_call __used event_##call = {                        \
-       .name                   = #call,                                \
        .class                  = &event_class_##template,              \
+       {                                                               \
+               .tp                     = &__tracepoint_##call,         \
+       },                                                              \
        .event.funcs            = &ftrace_event_type_funcs_##call,      \
        .print_fmt              = print_fmt_##call,                     \
+       .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
 };                                                                     \
 static struct ftrace_event_call __used                                 \
 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
index 2d48fe1274ca52b4c94969b2bc9d46e6a6ffe48f..11917f747cb401be5b7dc8ab788fa5d0d4e8e47c 100644 (file)
@@ -70,7 +70,6 @@
 #define AUDIT_TTY_SET          1017    /* Set TTY auditing status */
 #define AUDIT_SET_FEATURE      1018    /* Turn an audit feature on or off */
 #define AUDIT_GET_FEATURE      1019    /* Get which features are enabled */
-#define AUDIT_FEATURE_CHANGE   1020    /* audit log listing feature changes */
 
 #define AUDIT_FIRST_USER_MSG   1100    /* Userspace messages mostly uninteresting to kernel */
 #define AUDIT_USER_AVC         1107    /* We filter this differently */
 #define AUDIT_NETFILTER_PKT    1324    /* Packets traversing netfilter chains */
 #define AUDIT_NETFILTER_CFG    1325    /* Netfilter chain modifications */
 #define AUDIT_SECCOMP          1326    /* Secure Computing event */
+#define AUDIT_PROCTITLE                1327    /* Proctitle emit event */
+#define AUDIT_FEATURE_CHANGE   1328    /* audit log listing feature changes */
 
 #define AUDIT_AVC              1400    /* SE Linux avc denial or grant */
 #define AUDIT_SELINUX_ERR      1401    /* Internal SE Linux Errors */
index ba478fa3012e48b769c9846b50808eb27946990f..154dd6d3c8fedaa54a04817567580b11727f2807 100644 (file)
@@ -308,8 +308,12 @@ struct vfs_cap_data {
 
 #define CAP_LEASE            28
 
+/* Allow writing the audit log via unicast netlink socket */
+
 #define CAP_AUDIT_WRITE      29
 
+/* Allow configuration of audit via unicast netlink socket */
+
 #define CAP_AUDIT_CONTROL    30
 
 #define CAP_SETFCAP         31
index e5ab62201119938753af001c34a74c1fee9dbe22..096fe1c6f83de5ef80bd56e69b177550c36fc0f3 100644 (file)
@@ -434,6 +434,7 @@ enum {
        NVME_SC_REFTAG_CHECK            = 0x284,
        NVME_SC_COMPARE_FAILED          = 0x285,
        NVME_SC_ACCESS_DENIED           = 0x286,
+       NVME_SC_DNR                     = 0x4000,
 };
 
 struct nvme_completion {
index 270db8914c01d89cc7583d3f80236eba1747f61b..9bf508ad09570f1c00d82f5ccc4f2e937a5c379b 100644 (file)
@@ -29,6 +29,8 @@
 #ifndef __V4L2_COMMON__
 #define __V4L2_COMMON__
 
+#include <linux/types.h>
+
 /*
  *
  * Selection interface definitions
index 427ba60d638fce09ebbfcabba9ba9ccc42d11d02..765018c24cf9b0964fc3d21aa7e92fce6e5fdd10 100644 (file)
@@ -292,9 +292,12 @@ config AUDIT
          logging of avc messages output).  Does not do system-call
          auditing without CONFIG_AUDITSYSCALL.
 
+config HAVE_ARCH_AUDITSYSCALL
+       bool
+
 config AUDITSYSCALL
        bool "Enable system-call auditing support"
-       depends on AUDIT && (X86 || PARISC || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT) || ALPHA)
+       depends on AUDIT && HAVE_ARCH_AUDITSYSCALL
        default y if SECURITY_SELINUX
        help
          Enable low-overhead system-call auditing infrastructure that
index 95a20f3f52f1c9f35b7d6aa2cb0a8b7c05fdbc69..7c2893602d0651f767e1a177dbfd6214e66e8d9c 100644 (file)
@@ -182,7 +182,7 @@ struct audit_buffer {
 
 struct audit_reply {
        __u32 portid;
-       struct net *net;        
+       struct net *net;
        struct sk_buff *skb;
 };
 
@@ -396,7 +396,7 @@ static void audit_printk_skb(struct sk_buff *skb)
                if (printk_ratelimit())
                        pr_notice("type=%d %s\n", nlh->nlmsg_type, data);
                else
-                       audit_log_lost("printk limit exceeded\n");
+                       audit_log_lost("printk limit exceeded");
        }
 
        audit_hold_skb(skb);
@@ -412,7 +412,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
                BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
                if (audit_pid) {
                        pr_err("*NO* daemon at audit_pid=%d\n", audit_pid);
-                       audit_log_lost("auditd disappeared\n");
+                       audit_log_lost("auditd disappeared");
                        audit_pid = 0;
                        audit_sock = NULL;
                }
@@ -607,7 +607,7 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
 {
        int err = 0;
 
-       /* Only support the initial namespaces for now. */
+       /* Only support initial user namespace for now. */
        /*
         * We return ECONNREFUSED because it tricks userspace into thinking
         * that audit was not configured into the kernel.  Lots of users
@@ -618,8 +618,7 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
         * userspace will reject all logins.  This should be removed when we
         * support non init namespaces!!
         */
-       if ((current_user_ns() != &init_user_ns) ||
-           (task_active_pid_ns(current) != &init_pid_ns))
+       if (current_user_ns() != &init_user_ns)
                return -ECONNREFUSED;
 
        switch (msg_type) {
@@ -639,6 +638,11 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
        case AUDIT_TTY_SET:
        case AUDIT_TRIM:
        case AUDIT_MAKE_EQUIV:
+               /* Only support auditd and auditctl in initial pid namespace
+                * for now. */
+               if ((task_active_pid_ns(current) != &init_pid_ns))
+                       return -EPERM;
+
                if (!capable(CAP_AUDIT_CONTROL))
                        err = -EPERM;
                break;
@@ -659,6 +663,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
 {
        int rc = 0;
        uid_t uid = from_kuid(&init_user_ns, current_uid());
+       pid_t pid = task_tgid_nr(current);
 
        if (!audit_enabled && msg_type != AUDIT_USER_AVC) {
                *ab = NULL;
@@ -668,7 +673,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type)
        *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
        if (unlikely(!*ab))
                return rc;
-       audit_log_format(*ab, "pid=%d uid=%u", task_tgid_vnr(current), uid);
+       audit_log_format(*ab, "pid=%d uid=%u", pid, uid);
        audit_log_session_info(*ab);
        audit_log_task_context(*ab);
 
@@ -1097,7 +1102,7 @@ static void __net_exit audit_net_exit(struct net *net)
                audit_sock = NULL;
        }
 
-       rcu_assign_pointer(aunet->nlsk, NULL);
+       RCU_INIT_POINTER(aunet->nlsk, NULL);
        synchronize_net();
        netlink_kernel_release(sock);
 }
@@ -1829,11 +1834,11 @@ void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk)
        spin_unlock_irq(&tsk->sighand->siglock);
 
        audit_log_format(ab,
-                        " ppid=%ld pid=%d auid=%u uid=%u gid=%u"
+                        " ppid=%d pid=%d auid=%u uid=%u gid=%u"
                         " euid=%u suid=%u fsuid=%u"
                         " egid=%u sgid=%u fsgid=%u tty=%s ses=%u",
-                        sys_getppid(),
-                        tsk->pid,
+                        task_ppid_nr(tsk),
+                        task_pid_nr(tsk),
                         from_kuid(&init_user_ns, audit_get_loginuid(tsk)),
                         from_kuid(&init_user_ns, cred->uid),
                         from_kgid(&init_user_ns, cred->gid),
index 8df132214606f2b06e08e916196779b1aee64ad6..7bb65730c89015a9a30249ecd6fd0921a7ec1c14 100644 (file)
@@ -106,6 +106,11 @@ struct audit_names {
        bool                    should_free;
 };
 
+struct audit_proctitle {
+       int     len;    /* length of the cmdline field. */
+       char    *value; /* the cmdline field */
+};
+
 /* The per-task audit context. */
 struct audit_context {
        int                 dummy;      /* must be the first element */
@@ -202,6 +207,7 @@ struct audit_context {
                } execve;
        };
        int fds[2];
+       struct audit_proctitle proctitle;
 
 #if AUDIT_DEBUG
        int                 put_count;
index 92062fd6cc8cec4deff933c04848782bfdcded11..8e9bc9c3dbb7ef49c360bf2775f57e0b74414cc6 100644 (file)
@@ -19,6 +19,8 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/audit.h>
 #include <linux/kthread.h>
@@ -226,7 +228,7 @@ static int audit_match_signal(struct audit_entry *entry)
 #endif
 
 /* Common user-space to kernel rule translation. */
-static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
+static inline struct audit_entry *audit_to_entry_common(struct audit_rule_data *rule)
 {
        unsigned listnr;
        struct audit_entry *entry;
@@ -249,7 +251,7 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
                ;
        }
        if (unlikely(rule->action == AUDIT_POSSIBLE)) {
-               printk(KERN_ERR "AUDIT_POSSIBLE is deprecated\n");
+               pr_err("AUDIT_POSSIBLE is deprecated\n");
                goto exit_err;
        }
        if (rule->action != AUDIT_NEVER && rule->action != AUDIT_ALWAYS)
@@ -403,7 +405,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
        int i;
        char *str;
 
-       entry = audit_to_entry_common((struct audit_rule *)data);
+       entry = audit_to_entry_common(data);
        if (IS_ERR(entry))
                goto exit_nofree;
 
@@ -431,6 +433,19 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                        f->val = 0;
                }
 
+               if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
+                       struct pid *pid;
+                       rcu_read_lock();
+                       pid = find_vpid(f->val);
+                       if (!pid) {
+                               rcu_read_unlock();
+                               err = -ESRCH;
+                               goto exit_free;
+                       }
+                       f->val = pid_nr(pid);
+                       rcu_read_unlock();
+               }
+
                err = audit_field_valid(entry, f);
                if (err)
                        goto exit_free;
@@ -479,8 +494,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                        /* Keep currently invalid fields around in case they
                         * become valid after a policy reload. */
                        if (err == -EINVAL) {
-                               printk(KERN_WARNING "audit rule for LSM "
-                                      "\'%s\' is invalid\n",  str);
+                               pr_warn("audit rule for LSM \'%s\' is invalid\n",
+                                       str);
                                err = 0;
                        }
                        if (err) {
@@ -709,8 +724,8 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
        /* Keep currently invalid fields around in case they
         * become valid after a policy reload. */
        if (ret == -EINVAL) {
-               printk(KERN_WARNING "audit rule for LSM \'%s\' is "
-                      "invalid\n", df->lsm_str);
+               pr_warn("audit rule for LSM \'%s\' is invalid\n",
+                       df->lsm_str);
                ret = 0;
        }
 
@@ -1240,12 +1255,14 @@ static int audit_filter_user_rules(struct audit_krule *rule, int type,
 
        for (i = 0; i < rule->field_count; i++) {
                struct audit_field *f = &rule->fields[i];
+               pid_t pid;
                int result = 0;
                u32 sid;
 
                switch (f->type) {
                case AUDIT_PID:
-                       result = audit_comparator(task_pid_vnr(current), f->op, f->val);
+                       pid = task_pid_nr(current);
+                       result = audit_comparator(pid, f->op, f->val);
                        break;
                case AUDIT_UID:
                        result = audit_uid_comparator(current_uid(), f->op, f->uid);
index 7aef2f4b6c644963fb33c320c212eb856df8c69a..f251a5e8d17ad2285569466e442ae461fe17948e 100644 (file)
@@ -42,6 +42,8 @@
  * and <dustin.kirkland@us.ibm.com> for LSPP certification compliance.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/init.h>
 #include <asm/types.h>
 #include <linux/atomic.h>
@@ -68,6 +70,7 @@
 #include <linux/capability.h>
 #include <linux/fs_struct.h>
 #include <linux/compat.h>
+#include <linux/ctype.h>
 
 #include "audit.h"
 
@@ -79,6 +82,9 @@
 /* no execve audit message should be longer than this (userspace limits) */
 #define MAX_EXECVE_AUDIT_LEN 7500
 
+/* max length to print of cmdline/proctitle value during audit */
+#define MAX_PROCTITLE_AUDIT_LEN 128
+
 /* number of audit rules */
 int audit_n_rules;
 
@@ -451,15 +457,17 @@ static int audit_filter_rules(struct task_struct *tsk,
                struct audit_field *f = &rule->fields[i];
                struct audit_names *n;
                int result = 0;
+               pid_t pid;
 
                switch (f->type) {
                case AUDIT_PID:
-                       result = audit_comparator(tsk->pid, f->op, f->val);
+                       pid = task_pid_nr(tsk);
+                       result = audit_comparator(pid, f->op, f->val);
                        break;
                case AUDIT_PPID:
                        if (ctx) {
                                if (!ctx->ppid)
-                                       ctx->ppid = sys_getppid();
+                                       ctx->ppid = task_ppid_nr(tsk);
                                result = audit_comparator(ctx->ppid, f->op, f->val);
                        }
                        break;
@@ -805,7 +813,8 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
        rcu_read_unlock();
 }
 
-static inline struct audit_context *audit_get_context(struct task_struct *tsk,
+/* Transfer the audit context pointer to the caller, clearing it in the tsk's struct */
+static inline struct audit_context *audit_take_context(struct task_struct *tsk,
                                                      int return_valid,
                                                      long return_code)
 {
@@ -842,6 +851,13 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk,
        return context;
 }
 
+static inline void audit_proctitle_free(struct audit_context *context)
+{
+       kfree(context->proctitle.value);
+       context->proctitle.value = NULL;
+       context->proctitle.len = 0;
+}
+
 static inline void audit_free_names(struct audit_context *context)
 {
        struct audit_names *n, *next;
@@ -850,16 +866,15 @@ static inline void audit_free_names(struct audit_context *context)
        if (context->put_count + context->ino_count != context->name_count) {
                int i = 0;
 
-               printk(KERN_ERR "%s:%d(:%d): major=%d in_syscall=%d"
-                      " name_count=%d put_count=%d"
-                      " ino_count=%d [NOT freeing]\n",
-                      __FILE__, __LINE__,
+               pr_err("%s:%d(:%d): major=%d in_syscall=%d"
+                      " name_count=%d put_count=%d ino_count=%d"
+                      " [NOT freeing]\n", __FILE__, __LINE__,
                       context->serial, context->major, context->in_syscall,
                       context->name_count, context->put_count,
                       context->ino_count);
                list_for_each_entry(n, &context->names_list, list) {
-                       printk(KERN_ERR "names[%d] = %p = %s\n", i++,
-                              n->name, n->name->name ?: "(null)");
+                       pr_err("names[%d] = %p = %s\n", i++, n->name,
+                              n->name->name ?: "(null)");
                }
                dump_stack();
                return;
@@ -955,6 +970,7 @@ static inline void audit_free_context(struct audit_context *context)
        audit_free_aux(context);
        kfree(context->filterkey);
        kfree(context->sockaddr);
+       audit_proctitle_free(context);
        kfree(context);
 }
 
@@ -1157,7 +1173,7 @@ static void audit_log_execve_info(struct audit_context *context,
         */
        buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL);
        if (!buf) {
-               audit_panic("out of memory for argv string\n");
+               audit_panic("out of memory for argv string");
                return;
        }
 
@@ -1271,6 +1287,59 @@ static void show_special(struct audit_context *context, int *call_panic)
        audit_log_end(ab);
 }
 
+static inline int audit_proctitle_rtrim(char *proctitle, int len)
+{
+       char *end = proctitle + len - 1;
+       while (end > proctitle && !isprint(*end))
+               end--;
+
+       /* catch the case where proctitle is only 1 non-print character */
+       len = end - proctitle + 1;
+       len -= isprint(proctitle[len-1]) == 0;
+       return len;
+}
+
+static void audit_log_proctitle(struct task_struct *tsk,
+                        struct audit_context *context)
+{
+       int res;
+       char *buf;
+       char *msg = "(null)";
+       int len = strlen(msg);
+       struct audit_buffer *ab;
+
+       ab = audit_log_start(context, GFP_KERNEL, AUDIT_PROCTITLE);
+       if (!ab)
+               return; /* audit_panic or being filtered */
+
+       audit_log_format(ab, "proctitle=");
+
+       /* Not  cached */
+       if (!context->proctitle.value) {
+               buf = kmalloc(MAX_PROCTITLE_AUDIT_LEN, GFP_KERNEL);
+               if (!buf)
+                       goto out;
+               /* Historically called this from procfs naming */
+               res = get_cmdline(tsk, buf, MAX_PROCTITLE_AUDIT_LEN);
+               if (res == 0) {
+                       kfree(buf);
+                       goto out;
+               }
+               res = audit_proctitle_rtrim(buf, res);
+               if (res == 0) {
+                       kfree(buf);
+                       goto out;
+               }
+               context->proctitle.value = buf;
+               context->proctitle.len = res;
+       }
+       msg = context->proctitle.value;
+       len = context->proctitle.len;
+out:
+       audit_log_n_untrustedstring(ab, msg, len);
+       audit_log_end(ab);
+}
+
 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
 {
        int i, call_panic = 0;
@@ -1388,6 +1457,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
                audit_log_name(context, n, NULL, i++, &call_panic);
        }
 
+       audit_log_proctitle(tsk, context);
+
        /* Send end of event record to help user space know we are finished */
        ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
        if (ab)
@@ -1406,7 +1477,7 @@ void __audit_free(struct task_struct *tsk)
 {
        struct audit_context *context;
 
-       context = audit_get_context(tsk, 0, 0);
+       context = audit_take_context(tsk, 0, 0);
        if (!context)
                return;
 
@@ -1500,7 +1571,7 @@ void __audit_syscall_exit(int success, long return_code)
        else
                success = AUDITSC_FAILURE;
 
-       context = audit_get_context(tsk, success, return_code);
+       context = audit_take_context(tsk, success, return_code);
        if (!context)
                return;
 
@@ -1550,7 +1621,7 @@ static inline void handle_one(const struct inode *inode)
        if (likely(put_tree_ref(context, chunk)))
                return;
        if (unlikely(!grow_tree_refs(context))) {
-               printk(KERN_WARNING "out of memory, audit has lost a tree reference\n");
+               pr_warn("out of memory, audit has lost a tree reference\n");
                audit_set_auditable(context);
                audit_put_chunk(chunk);
                unroll_tree_refs(context, p, count);
@@ -1609,8 +1680,7 @@ retry:
                        goto retry;
                }
                /* too bad */
-               printk(KERN_WARNING
-                       "out of memory, audit has lost a tree reference\n");
+               pr_warn("out of memory, audit has lost a tree reference\n");
                unroll_tree_refs(context, p, count);
                audit_set_auditable(context);
                return;
@@ -1682,7 +1752,7 @@ void __audit_getname(struct filename *name)
 
        if (!context->in_syscall) {
 #if AUDIT_DEBUG == 2
-               printk(KERN_ERR "%s:%d(:%d): ignoring getname(%p)\n",
+               pr_err("%s:%d(:%d): ignoring getname(%p)\n",
                       __FILE__, __LINE__, context->serial, name);
                dump_stack();
 #endif
@@ -1721,15 +1791,15 @@ void audit_putname(struct filename *name)
        BUG_ON(!context);
        if (!name->aname || !context->in_syscall) {
 #if AUDIT_DEBUG == 2
-               printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
+               pr_err("%s:%d(:%d): final_putname(%p)\n",
                       __FILE__, __LINE__, context->serial, name);
                if (context->name_count) {
                        struct audit_names *n;
                        int i = 0;
 
                        list_for_each_entry(n, &context->names_list, list)
-                               printk(KERN_ERR "name[%d] = %p = %s\n", i++,
-                                      n->name, n->name->name ?: "(null)");
+                               pr_err("name[%d] = %p = %s\n", i++, n->name,
+                                      n->name->name ?: "(null)");
                        }
 #endif
                final_putname(name);
@@ -1738,9 +1808,8 @@ void audit_putname(struct filename *name)
        else {
                ++context->put_count;
                if (context->put_count > context->name_count) {
-                       printk(KERN_ERR "%s:%d(:%d): major=%d"
-                              " in_syscall=%d putname(%p) name_count=%d"
-                              " put_count=%d\n",
+                       pr_err("%s:%d(:%d): major=%d in_syscall=%d putname(%p)"
+                              " name_count=%d put_count=%d\n",
                               __FILE__, __LINE__,
                               context->serial, context->major,
                               context->in_syscall, name->name,
@@ -1981,12 +2050,10 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
        if (!ab)
                return;
-       audit_log_format(ab, "pid=%d uid=%u"
-                        " old-auid=%u new-auid=%u old-ses=%u new-ses=%u"
-                        " res=%d",
-                        current->pid, uid,
-                        oldloginuid, loginuid, oldsessionid, sessionid,
-                        !rc);
+       audit_log_format(ab, "pid=%d uid=%u", task_pid_nr(current), uid);
+       audit_log_task_context(ab);
+       audit_log_format(ab, " old-auid=%u auid=%u old-ses=%u ses=%u res=%d",
+                        oldloginuid, loginuid, oldsessionid, sessionid, !rc);
        audit_log_end(ab);
 }
 
@@ -2208,7 +2275,7 @@ void __audit_ptrace(struct task_struct *t)
 {
        struct audit_context *context = current->audit_context;
 
-       context->target_pid = t->pid;
+       context->target_pid = task_pid_nr(t);
        context->target_auid = audit_get_loginuid(t);
        context->target_uid = task_uid(t);
        context->target_sessionid = audit_get_sessionid(t);
@@ -2233,7 +2300,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
 
        if (audit_pid && t->tgid == audit_pid) {
                if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) {
-                       audit_sig_pid = tsk->pid;
+                       audit_sig_pid = task_pid_nr(tsk);
                        if (uid_valid(tsk->loginuid))
                                audit_sig_uid = tsk->loginuid;
                        else
@@ -2247,7 +2314,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
        /* optimize the common case by putting first signal recipient directly
         * in audit_context */
        if (!ctx->target_pid) {
-               ctx->target_pid = t->tgid;
+               ctx->target_pid = task_tgid_nr(t);
                ctx->target_auid = audit_get_loginuid(t);
                ctx->target_uid = t_uid;
                ctx->target_sessionid = audit_get_sessionid(t);
@@ -2268,7 +2335,7 @@ int __audit_signal_info(int sig, struct task_struct *t)
        }
        BUG_ON(axp->pid_count >= AUDIT_AUX_PIDS);
 
-       axp->target_pid[axp->pid_count] = t->tgid;
+       axp->target_pid[axp->pid_count] = task_tgid_nr(t);
        axp->target_auid[axp->pid_count] = audit_get_loginuid(t);
        axp->target_uid[axp->pid_count] = t_uid;
        axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t);
@@ -2368,7 +2435,7 @@ static void audit_log_task(struct audit_buffer *ab)
                         from_kgid(&init_user_ns, gid),
                         sessionid);
        audit_log_task_context(ab);
-       audit_log_format(ab, " pid=%d comm=", current->pid);
+       audit_log_format(ab, " pid=%d comm=", task_pid_nr(current));
        audit_log_untrustedstring(ab, current->comm);
        if (mm) {
                down_read(&mm->mmap_sem);
index 52d6a6f56261d26dabc458684a3529d15710e30b..5a56d3c8dc03a799e5f53169c85c7f33f05727eb 100644 (file)
@@ -1195,8 +1195,6 @@ static void relay_pipe_buf_release(struct pipe_inode_info *pipe,
 
 static const struct pipe_buf_operations relay_pipe_buf_ops = {
        .can_merge = 0,
-       .map = generic_pipe_buf_map,
-       .unmap = generic_pipe_buf_unmap,
        .confirm = generic_pipe_buf_confirm,
        .release = relay_pipe_buf_release,
        .steal = generic_pipe_buf_steal,
@@ -1253,7 +1251,7 @@ static ssize_t subbuf_splice_actor(struct file *in,
        subbuf_pages = rbuf->chan->alloc_size >> PAGE_SHIFT;
        pidx = (read_start / PAGE_SIZE) % subbuf_pages;
        poff = read_start & ~PAGE_MASK;
-       nr_pages = min_t(unsigned int, subbuf_pages, pipe->buffers);
+       nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max);
 
        for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) {
                unsigned int this_len, this_end, private;
index fd609bd9d6dd7b5c40ec637aa288ae55f88d4505..d8d046c0726a4fede10cdca96910fd5125f5d52e 100644 (file)
@@ -71,7 +71,7 @@ static void populate_seccomp_data(struct seccomp_data *sd)
        struct pt_regs *regs = task_pt_regs(task);
 
        sd->nr = syscall_get_nr(task, regs);
-       sd->arch = syscall_get_arch(task, regs);
+       sd->arch = syscall_get_arch();
 
        /* Unroll syscall_get_args to help gcc on arm. */
        syscall_get_arguments(task, regs, 0, 1, (unsigned long *) &sd->args[0]);
@@ -348,7 +348,7 @@ static void seccomp_send_sigsys(int syscall, int reason)
        info.si_code = SYS_SECCOMP;
        info.si_call_addr = (void __user *)KSTK_EIP(current);
        info.si_errno = reason;
-       info.si_arch = syscall_get_arch(current, task_pt_regs(current));
+       info.si_arch = syscall_get_arch();
        info.si_syscall = syscall;
        force_sig_info(SIGSYS, &info, current);
 }
index 9be67c5e5b0f1eb78799db5e24c81cedda1053b3..737b0efa1a624aae606a0c50406de5f652e51862 100644 (file)
@@ -3611,6 +3611,8 @@ static const char readme_msg[] =
 #ifdef CONFIG_TRACER_SNAPSHOT
        "\t\t      snapshot\n"
 #endif
+       "\t\t      dump\n"
+       "\t\t      cpudump\n"
        "\t     example: echo do_fault:traceoff > set_ftrace_filter\n"
        "\t              echo do_trap:traceoff:3 > set_ftrace_filter\n"
        "\t     The first one will disable tracing every time do_fault is hit\n"
@@ -4390,8 +4392,6 @@ static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
 
 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
        .can_merge              = 0,
-       .map                    = generic_pipe_buf_map,
-       .unmap                  = generic_pipe_buf_unmap,
        .confirm                = generic_pipe_buf_confirm,
        .release                = generic_pipe_buf_release,
        .steal                  = generic_pipe_buf_steal,
@@ -4486,7 +4486,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
        trace_access_lock(iter->cpu_file);
 
        /* Fill as many pages as possible. */
-       for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
+       for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
                spd.pages[i] = alloc_page(GFP_KERNEL);
                if (!spd.pages[i])
                        break;
@@ -5279,8 +5279,6 @@ static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
 /* Pipe buffer operations for a buffer. */
 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
        .can_merge              = 0,
-       .map                    = generic_pipe_buf_map,
-       .unmap                  = generic_pipe_buf_unmap,
        .confirm                = generic_pipe_buf_confirm,
        .release                = buffer_pipe_buf_release,
        .steal                  = generic_pipe_buf_steal,
@@ -5356,7 +5354,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
        trace_access_lock(iter->cpu_file);
        entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
 
-       for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
+       for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
                struct page *page;
                int r;
 
index 83a4378dc5e00b91eebef35ed5e05898ff72295e..3ddfd8f62c05f10864164e7ff79fcc3d6b450537 100644 (file)
@@ -223,24 +223,25 @@ int ftrace_event_reg(struct ftrace_event_call *call,
 {
        struct ftrace_event_file *file = data;
 
+       WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
        switch (type) {
        case TRACE_REG_REGISTER:
-               return tracepoint_probe_register(call->name,
+               return tracepoint_probe_register(call->tp,
                                                 call->class->probe,
                                                 file);
        case TRACE_REG_UNREGISTER:
-               tracepoint_probe_unregister(call->name,
+               tracepoint_probe_unregister(call->tp,
                                            call->class->probe,
                                            file);
                return 0;
 
 #ifdef CONFIG_PERF_EVENTS
        case TRACE_REG_PERF_REGISTER:
-               return tracepoint_probe_register(call->name,
+               return tracepoint_probe_register(call->tp,
                                                 call->class->perf_probe,
                                                 call);
        case TRACE_REG_PERF_UNREGISTER:
-               tracepoint_probe_unregister(call->name,
+               tracepoint_probe_unregister(call->tp,
                                            call->class->perf_probe,
                                            call);
                return 0;
@@ -352,7 +353,7 @@ static int __ftrace_event_enable_disable(struct ftrace_event_file *file,
                        if (ret) {
                                tracing_stop_cmdline_record();
                                pr_info("event trace: Could not enable event "
-                                       "%s\n", call->name);
+                                       "%s\n", ftrace_event_name(call));
                                break;
                        }
                        set_bit(FTRACE_EVENT_FL_ENABLED_BIT, &file->flags);
@@ -481,27 +482,29 @@ __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
 {
        struct ftrace_event_file *file;
        struct ftrace_event_call *call;
+       const char *name;
        int ret = -EINVAL;
 
        list_for_each_entry(file, &tr->events, list) {
 
                call = file->event_call;
+               name = ftrace_event_name(call);
 
-               if (!call->name || !call->class || !call->class->reg)
+               if (!name || !call->class || !call->class->reg)
                        continue;
 
                if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
                        continue;
 
                if (match &&
-                   strcmp(match, call->name) != 0 &&
+                   strcmp(match, name) != 0 &&
                    strcmp(match, call->class->system) != 0)
                        continue;
 
                if (sub && strcmp(sub, call->class->system) != 0)
                        continue;
 
-               if (event && strcmp(event, call->name) != 0)
+               if (event && strcmp(event, name) != 0)
                        continue;
 
                ftrace_event_enable_disable(file, set);
@@ -699,7 +702,7 @@ static int t_show(struct seq_file *m, void *v)
 
        if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
                seq_printf(m, "%s:", call->class->system);
-       seq_printf(m, "%s\n", call->name);
+       seq_printf(m, "%s\n", ftrace_event_name(call));
 
        return 0;
 }
@@ -792,7 +795,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
        mutex_lock(&event_mutex);
        list_for_each_entry(file, &tr->events, list) {
                call = file->event_call;
-               if (!call->name || !call->class || !call->class->reg)
+               if (!ftrace_event_name(call) || !call->class || !call->class->reg)
                        continue;
 
                if (system && strcmp(call->class->system, system->name) != 0)
@@ -907,7 +910,7 @@ static int f_show(struct seq_file *m, void *v)
 
        switch ((unsigned long)v) {
        case FORMAT_HEADER:
-               seq_printf(m, "name: %s\n", call->name);
+               seq_printf(m, "name: %s\n", ftrace_event_name(call));
                seq_printf(m, "ID: %d\n", call->event.type);
                seq_printf(m, "format:\n");
                return 0;
@@ -1527,6 +1530,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
        struct trace_array *tr = file->tr;
        struct list_head *head;
        struct dentry *d_events;
+       const char *name;
        int ret;
 
        /*
@@ -1540,10 +1544,11 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
        } else
                d_events = parent;
 
-       file->dir = debugfs_create_dir(call->name, d_events);
+       name = ftrace_event_name(call);
+       file->dir = debugfs_create_dir(name, d_events);
        if (!file->dir) {
                pr_warning("Could not create debugfs '%s' directory\n",
-                          call->name);
+                          name);
                return -1;
        }
 
@@ -1567,7 +1572,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
                ret = call->class->define_fields(call);
                if (ret < 0) {
                        pr_warning("Could not initialize trace point"
-                                  " events/%s\n", call->name);
+                                  " events/%s\n", name);
                        return -1;
                }
        }
@@ -1631,15 +1636,17 @@ static void event_remove(struct ftrace_event_call *call)
 static int event_init(struct ftrace_event_call *call)
 {
        int ret = 0;
+       const char *name;
 
-       if (WARN_ON(!call->name))
+       name = ftrace_event_name(call);
+       if (WARN_ON(!name))
                return -EINVAL;
 
        if (call->class->raw_init) {
                ret = call->class->raw_init(call);
                if (ret < 0 && ret != -ENOSYS)
                        pr_warn("Could not initialize trace events/%s\n",
-                               call->name);
+                               name);
        }
 
        return ret;
@@ -1885,7 +1892,7 @@ __trace_add_event_dirs(struct trace_array *tr)
                ret = __trace_add_new_event(call, tr);
                if (ret < 0)
                        pr_warning("Could not create directory for event %s\n",
-                                  call->name);
+                                  ftrace_event_name(call));
        }
 }
 
@@ -1894,18 +1901,20 @@ find_event_file(struct trace_array *tr, const char *system,  const char *event)
 {
        struct ftrace_event_file *file;
        struct ftrace_event_call *call;
+       const char *name;
 
        list_for_each_entry(file, &tr->events, list) {
 
                call = file->event_call;
+               name = ftrace_event_name(call);
 
-               if (!call->name || !call->class || !call->class->reg)
+               if (!name || !call->class || !call->class->reg)
                        continue;
 
                if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
                        continue;
 
-               if (strcmp(event, call->name) == 0 &&
+               if (strcmp(event, name) == 0 &&
                    strcmp(system, call->class->system) == 0)
                        return file;
        }
@@ -1973,7 +1982,7 @@ event_enable_print(struct seq_file *m, unsigned long ip,
        seq_printf(m, "%s:%s:%s",
                   data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
                   data->file->event_call->class->system,
-                  data->file->event_call->name);
+                  ftrace_event_name(data->file->event_call));
 
        if (data->count == -1)
                seq_printf(m, ":unlimited\n");
@@ -2193,7 +2202,7 @@ __trace_early_add_event_dirs(struct trace_array *tr)
                ret = event_create_dir(tr->event_dir, file);
                if (ret < 0)
                        pr_warning("Could not create directory for event %s\n",
-                                  file->event_call->name);
+                                  ftrace_event_name(file->event_call));
        }
 }
 
@@ -2217,7 +2226,7 @@ __trace_early_add_events(struct trace_array *tr)
                ret = __trace_early_add_new_event(call, tr);
                if (ret < 0)
                        pr_warning("Could not create early event %s\n",
-                                  call->name);
+                                  ftrace_event_name(call));
        }
 }
 
@@ -2549,7 +2558,7 @@ static __init void event_trace_self_tests(void)
                        continue;
 #endif
 
-               pr_info("Testing event %s: ", call->name);
+               pr_info("Testing event %s: ", ftrace_event_name(call));
 
                /*
                 * If an event is already enabled, someone is using
index 8efbb69b04f00199e090b9e5fce7ea9feafdebaa..925f537f07d17db7caae363dd39a20bd2296d2ee 100644 (file)
@@ -1095,7 +1095,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
        seq_printf(m, "%s:%s:%s",
                   enable_data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
                   enable_data->file->event_call->class->system,
-                  enable_data->file->event_call->name);
+                  ftrace_event_name(enable_data->file->event_call));
 
        if (data->count == -1)
                seq_puts(m, ":unlimited");
index ee0a5098ac43adca42f1631450f6f8330782cd5c..d4ddde28a81ad0a2cbe87625fca8d8751e1e30b0 100644 (file)
@@ -173,9 +173,11 @@ struct ftrace_event_class __refdata event_class_ftrace_##call = {  \
 };                                                                     \
                                                                        \
 struct ftrace_event_call __used event_##call = {                       \
-       .name                   = #call,                                \
-       .event.type             = etype,                                \
        .class                  = &event_class_ftrace_##call,           \
+       {                                                               \
+               .name                   = #call,                        \
+       },                                                              \
+       .event.type             = etype,                                \
        .print_fmt              = print,                                \
        .flags                  = TRACE_EVENT_FL_IGNORE_ENABLE | TRACE_EVENT_FL_USE_CALL_FILTER, \
 };                                                                     \
index d021d21dd15005f39948a95b4aa444db0d093cac..903ae28962be7f513c2a382631da4da2aeef377e 100644 (file)
@@ -341,7 +341,7 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
        struct trace_kprobe *tk;
 
        list_for_each_entry(tk, &probe_list, list)
-               if (strcmp(tk->tp.call.name, event) == 0 &&
+               if (strcmp(ftrace_event_name(&tk->tp.call), event) == 0 &&
                    strcmp(tk->tp.call.class->system, group) == 0)
                        return tk;
        return NULL;
@@ -516,7 +516,8 @@ static int register_trace_kprobe(struct trace_kprobe *tk)
        mutex_lock(&probe_lock);
 
        /* Delete old (same name) event if exist */
-       old_tk = find_trace_kprobe(tk->tp.call.name, tk->tp.call.class->system);
+       old_tk = find_trace_kprobe(ftrace_event_name(&tk->tp.call),
+                       tk->tp.call.class->system);
        if (old_tk) {
                ret = unregister_trace_kprobe(old_tk);
                if (ret < 0)
@@ -564,7 +565,8 @@ static int trace_kprobe_module_callback(struct notifier_block *nb,
                        if (ret)
                                pr_warning("Failed to re-register probe %s on"
                                           "%s: %d\n",
-                                          tk->tp.call.name, mod->name, ret);
+                                          ftrace_event_name(&tk->tp.call),
+                                          mod->name, ret);
                }
        }
        mutex_unlock(&probe_lock);
@@ -818,7 +820,8 @@ static int probes_seq_show(struct seq_file *m, void *v)
        int i;
 
        seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p');
-       seq_printf(m, ":%s/%s", tk->tp.call.class->system, tk->tp.call.name);
+       seq_printf(m, ":%s/%s", tk->tp.call.class->system,
+                       ftrace_event_name(&tk->tp.call));
 
        if (!tk->symbol)
                seq_printf(m, " 0x%p", tk->rp.kp.addr);
@@ -876,7 +879,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
 {
        struct trace_kprobe *tk = v;
 
-       seq_printf(m, "  %-44s %15lu %15lu\n", tk->tp.call.name, tk->nhit,
+       seq_printf(m, "  %-44s %15lu %15lu\n",
+                  ftrace_event_name(&tk->tp.call), tk->nhit,
                   tk->rp.kp.nmissed);
 
        return 0;
@@ -1011,7 +1015,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
        field = (struct kprobe_trace_entry_head *)iter->ent;
        tp = container_of(event, struct trace_probe, call.event);
 
-       if (!trace_seq_printf(s, "%s: (", tp->call.name))
+       if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
                goto partial;
 
        if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
@@ -1047,7 +1051,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
        field = (struct kretprobe_trace_entry_head *)iter->ent;
        tp = container_of(event, struct trace_probe, call.event);
 
-       if (!trace_seq_printf(s, "%s: (", tp->call.name))
+       if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)))
                goto partial;
 
        if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
@@ -1286,7 +1290,8 @@ static int register_kprobe_event(struct trace_kprobe *tk)
        call->data = tk;
        ret = trace_add_event_call(call);
        if (ret) {
-               pr_info("Failed to register kprobe event: %s\n", call->name);
+               pr_info("Failed to register kprobe event: %s\n",
+                       ftrace_event_name(call));
                kfree(call->print_fmt);
                unregister_ftrace_event(&call->event);
        }
index ca0e79e2abaa6a76fc4d9b83035bb7e829cb1402..a436de18aa999d78e4ee3be58d04c7ddbdffff5f 100644 (file)
@@ -431,7 +431,7 @@ int ftrace_raw_output_prep(struct trace_iterator *iter,
        }
 
        trace_seq_init(p);
-       ret = trace_seq_printf(s, "%s: ", event->name);
+       ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event));
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
 
index e4473367e7a42a5113e27989e9ffaa9d8c62563a..930e51462dc871cf7bf038be265cc7ca281aad1c 100644 (file)
@@ -294,7 +294,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
        struct trace_uprobe *tu;
 
        list_for_each_entry(tu, &uprobe_list, list)
-               if (strcmp(tu->tp.call.name, event) == 0 &&
+               if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 &&
                    strcmp(tu->tp.call.class->system, group) == 0)
                        return tu;
 
@@ -324,7 +324,8 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
        mutex_lock(&uprobe_lock);
 
        /* register as an event */
-       old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system);
+       old_tu = find_probe_event(ftrace_event_name(&tu->tp.call),
+                       tu->tp.call.class->system);
        if (old_tu) {
                /* delete old event */
                ret = unregister_trace_uprobe(old_tu);
@@ -599,7 +600,8 @@ static int probes_seq_show(struct seq_file *m, void *v)
        char c = is_ret_probe(tu) ? 'r' : 'p';
        int i;
 
-       seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name);
+       seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system,
+                       ftrace_event_name(&tu->tp.call));
        seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset);
 
        for (i = 0; i < tu->tp.nr_args; i++)
@@ -649,7 +651,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
 {
        struct trace_uprobe *tu = v;
 
-       seq_printf(m, "  %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit);
+       seq_printf(m, "  %s %-44s %15lu\n", tu->filename,
+                       ftrace_event_name(&tu->tp.call), tu->nhit);
        return 0;
 }
 
@@ -844,12 +847,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
        tu = container_of(event, struct trace_uprobe, tp.call.event);
 
        if (is_ret_probe(tu)) {
-               if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name,
+               if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
+                                       ftrace_event_name(&tu->tp.call),
                                        entry->vaddr[1], entry->vaddr[0]))
                        goto partial;
                data = DATAOF_TRACE_ENTRY(entry, true);
        } else {
-               if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name,
+               if (!trace_seq_printf(s, "%s: (0x%lx)",
+                                       ftrace_event_name(&tu->tp.call),
                                        entry->vaddr[0]))
                        goto partial;
                data = DATAOF_TRACE_ENTRY(entry, false);
@@ -1275,7 +1280,8 @@ static int register_uprobe_event(struct trace_uprobe *tu)
        ret = trace_add_event_call(call);
 
        if (ret) {
-               pr_info("Failed to register uprobe event: %s\n", call->name);
+               pr_info("Failed to register uprobe event: %s\n",
+                       ftrace_event_name(call));
                kfree(call->print_fmt);
                unregister_ftrace_event(&call->event);
        }
index fb0a38a265555c6c846254859c83468d0aba0f35..ac5b23cf7212c6ebb0045bedce2f13ee8d0a19b3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Mathieu Desnoyers
+ * Copyright (C) 2008-2014 Mathieu Desnoyers
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -33,39 +33,27 @@ extern struct tracepoint * const __stop___tracepoints_ptrs[];
 /* Set to 1 to enable tracepoint debug output */
 static const int tracepoint_debug;
 
+#ifdef CONFIG_MODULES
 /*
- * Tracepoints mutex protects the builtin and module tracepoints and the hash
- * table, as well as the local module list.
+ * Tracepoint module list mutex protects the local module list.
  */
-static DEFINE_MUTEX(tracepoints_mutex);
+static DEFINE_MUTEX(tracepoint_module_list_mutex);
 
-#ifdef CONFIG_MODULES
-/* Local list of struct module */
+/* Local list of struct tp_module */
 static LIST_HEAD(tracepoint_module_list);
 #endif /* CONFIG_MODULES */
 
 /*
- * Tracepoint hash table, containing the active tracepoints.
- * Protected by tracepoints_mutex.
+ * tracepoints_mutex protects the builtin and module tracepoints.
+ * tracepoints_mutex nests inside tracepoint_module_list_mutex.
  */
-#define TRACEPOINT_HASH_BITS 6
-#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
-static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
+static DEFINE_MUTEX(tracepoints_mutex);
 
 /*
  * Note about RCU :
  * It is used to delay the free of multiple probes array until a quiescent
  * state is reached.
- * Tracepoint entries modifications are protected by the tracepoints_mutex.
  */
-struct tracepoint_entry {
-       struct hlist_node hlist;
-       struct tracepoint_func *funcs;
-       int refcount;   /* Number of times armed. 0 if disarmed. */
-       int enabled;    /* Tracepoint enabled */
-       char name[0];
-};
-
 struct tp_probes {
        struct rcu_head rcu;
        struct tracepoint_func probes[0];
@@ -92,34 +80,33 @@ static inline void release_probes(struct tracepoint_func *old)
        }
 }
 
-static void debug_print_probes(struct tracepoint_entry *entry)
+static void debug_print_probes(struct tracepoint_func *funcs)
 {
        int i;
 
-       if (!tracepoint_debug || !entry->funcs)
+       if (!tracepoint_debug || !funcs)
                return;
 
-       for (i = 0; entry->funcs[i].func; i++)
-               printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func);
+       for (i = 0; funcs[i].func; i++)
+               printk(KERN_DEBUG "Probe %d : %p\n", i, funcs[i].func);
 }
 
-static struct tracepoint_func *
-tracepoint_entry_add_probe(struct tracepoint_entry *entry,
-                          void *probe, void *data)
+static struct tracepoint_func *func_add(struct tracepoint_func **funcs,
+               struct tracepoint_func *tp_func)
 {
        int nr_probes = 0;
        struct tracepoint_func *old, *new;
 
-       if (WARN_ON(!probe))
+       if (WARN_ON(!tp_func->func))
                return ERR_PTR(-EINVAL);
 
-       debug_print_probes(entry);
-       old = entry->funcs;
+       debug_print_probes(*funcs);
+       old = *funcs;
        if (old) {
                /* (N -> N+1), (N != 0, 1) probes */
                for (nr_probes = 0; old[nr_probes].func; nr_probes++)
-                       if (old[nr_probes].func == probe &&
-                           old[nr_probes].data == data)
+                       if (old[nr_probes].func == tp_func->func &&
+                           old[nr_probes].data == tp_func->data)
                                return ERR_PTR(-EEXIST);
        }
        /* + 2 : one for new probe, one for NULL func */
@@ -128,33 +115,30 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry,
                return ERR_PTR(-ENOMEM);
        if (old)
                memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
-       new[nr_probes].func = probe;
-       new[nr_probes].data = data;
+       new[nr_probes] = *tp_func;
        new[nr_probes + 1].func = NULL;
-       entry->refcount = nr_probes + 1;
-       entry->funcs = new;
-       debug_print_probes(entry);
+       *funcs = new;
+       debug_print_probes(*funcs);
        return old;
 }
 
-static void *
-tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
-                             void *probe, void *data)
+static void *func_remove(struct tracepoint_func **funcs,
+               struct tracepoint_func *tp_func)
 {
        int nr_probes = 0, nr_del = 0, i;
        struct tracepoint_func *old, *new;
 
-       old = entry->funcs;
+       old = *funcs;
 
        if (!old)
                return ERR_PTR(-ENOENT);
 
-       debug_print_probes(entry);
+       debug_print_probes(*funcs);
        /* (N -> M), (N > 1, M >= 0) probes */
-       if (probe) {
+       if (tp_func->func) {
                for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
-                       if (old[nr_probes].func == probe &&
-                            old[nr_probes].data == data)
+                       if (old[nr_probes].func == tp_func->func &&
+                            old[nr_probes].data == tp_func->data)
                                nr_del++;
                }
        }
@@ -165,9 +149,8 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
         */
        if (nr_probes - nr_del == 0) {
                /* N -> 0, (N > 1) */
-               entry->funcs = NULL;
-               entry->refcount = 0;
-               debug_print_probes(entry);
+               *funcs = NULL;
+               debug_print_probes(*funcs);
                return old;
        } else {
                int j = 0;
@@ -177,91 +160,35 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
                if (new == NULL)
                        return ERR_PTR(-ENOMEM);
                for (i = 0; old[i].func; i++)
-                       if (old[i].func != probe || old[i].data != data)
+                       if (old[i].func != tp_func->func
+                                       || old[i].data != tp_func->data)
                                new[j++] = old[i];
                new[nr_probes - nr_del].func = NULL;
-               entry->refcount = nr_probes - nr_del;
-               entry->funcs = new;
+               *funcs = new;
        }
-       debug_print_probes(entry);
+       debug_print_probes(*funcs);
        return old;
 }
 
 /*
- * Get tracepoint if the tracepoint is present in the tracepoint hash table.
- * Must be called with tracepoints_mutex held.
- * Returns NULL if not present.
+ * Add the probe function to a tracepoint.
  */
-static struct tracepoint_entry *get_tracepoint(const char *name)
+static int tracepoint_add_func(struct tracepoint *tp,
+               struct tracepoint_func *func)
 {
-       struct hlist_head *head;
-       struct tracepoint_entry *e;
-       u32 hash = jhash(name, strlen(name), 0);
-
-       head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-       hlist_for_each_entry(e, head, hlist) {
-               if (!strcmp(name, e->name))
-                       return e;
-       }
-       return NULL;
-}
+       struct tracepoint_func *old, *tp_funcs;
 
-/*
- * Add the tracepoint to the tracepoint hash table. Must be called with
- * tracepoints_mutex held.
- */
-static struct tracepoint_entry *add_tracepoint(const char *name)
-{
-       struct hlist_head *head;
-       struct tracepoint_entry *e;
-       size_t name_len = strlen(name) + 1;
-       u32 hash = jhash(name, name_len-1, 0);
-
-       head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
-       hlist_for_each_entry(e, head, hlist) {
-               if (!strcmp(name, e->name)) {
-                       printk(KERN_NOTICE
-                               "tracepoint %s busy\n", name);
-                       return ERR_PTR(-EEXIST);        /* Already there */
-               }
-       }
-       /*
-        * Using kmalloc here to allocate a variable length element. Could
-        * cause some memory fragmentation if overused.
-        */
-       e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
-       if (!e)
-               return ERR_PTR(-ENOMEM);
-       memcpy(&e->name[0], name, name_len);
-       e->funcs = NULL;
-       e->refcount = 0;
-       e->enabled = 0;
-       hlist_add_head(&e->hlist, head);
-       return e;
-}
+       if (tp->regfunc && !static_key_enabled(&tp->key))
+               tp->regfunc();
 
-/*
- * Remove the tracepoint from the tracepoint hash table. Must be called with
- * mutex_lock held.
- */
-static inline void remove_tracepoint(struct tracepoint_entry *e)
-{
-       hlist_del(&e->hlist);
-       kfree(e);
-}
-
-/*
- * Sets the probe callback corresponding to one tracepoint.
- */
-static void set_tracepoint(struct tracepoint_entry **entry,
-       struct tracepoint *elem, int active)
-{
-       WARN_ON(strcmp((*entry)->name, elem->name) != 0);
-
-       if (elem->regfunc && !static_key_enabled(&elem->key) && active)
-               elem->regfunc();
-       else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
-               elem->unregfunc();
+       tp_funcs = rcu_dereference_protected(tp->funcs,
+                       lockdep_is_held(&tracepoints_mutex));
+       old = func_add(&tp_funcs, func);
+       if (IS_ERR(old)) {
+               WARN_ON_ONCE(1);
+               return PTR_ERR(old);
+       }
+       release_probes(old);
 
        /*
         * rcu_assign_pointer has a smp_wmb() which makes sure that the new
@@ -270,193 +197,90 @@ static void set_tracepoint(struct tracepoint_entry **entry,
         * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
         * is used.
         */
-       rcu_assign_pointer(elem->funcs, (*entry)->funcs);
-       if (active && !static_key_enabled(&elem->key))
-               static_key_slow_inc(&elem->key);
-       else if (!active && static_key_enabled(&elem->key))
-               static_key_slow_dec(&elem->key);
+       rcu_assign_pointer(tp->funcs, tp_funcs);
+       if (!static_key_enabled(&tp->key))
+               static_key_slow_inc(&tp->key);
+       return 0;
 }
 
 /*
- * Disable a tracepoint and its probe callback.
+ * Remove a probe function from a tracepoint.
  * Note: only waiting an RCU period after setting elem->call to the empty
  * function insures that the original callback is not used anymore. This insured
  * by preempt_disable around the call site.
  */
-static void disable_tracepoint(struct tracepoint *elem)
+static int tracepoint_remove_func(struct tracepoint *tp,
+               struct tracepoint_func *func)
 {
-       if (elem->unregfunc && static_key_enabled(&elem->key))
-               elem->unregfunc();
-
-       if (static_key_enabled(&elem->key))
-               static_key_slow_dec(&elem->key);
-       rcu_assign_pointer(elem->funcs, NULL);
-}
+       struct tracepoint_func *old, *tp_funcs;
 
-/**
- * tracepoint_update_probe_range - Update a probe range
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Updates the probe callback corresponding to a range of tracepoints.
- * Called with tracepoints_mutex held.
- */
-static void tracepoint_update_probe_range(struct tracepoint * const *begin,
-                                         struct tracepoint * const *end)
-{
-       struct tracepoint * const *iter;
-       struct tracepoint_entry *mark_entry;
-
-       if (!begin)
-               return;
-
-       for (iter = begin; iter < end; iter++) {
-               mark_entry = get_tracepoint((*iter)->name);
-               if (mark_entry) {
-                       set_tracepoint(&mark_entry, *iter,
-                                       !!mark_entry->refcount);
-                       mark_entry->enabled = !!mark_entry->refcount;
-               } else {
-                       disable_tracepoint(*iter);
-               }
+       tp_funcs = rcu_dereference_protected(tp->funcs,
+                       lockdep_is_held(&tracepoints_mutex));
+       old = func_remove(&tp_funcs, func);
+       if (IS_ERR(old)) {
+               WARN_ON_ONCE(1);
+               return PTR_ERR(old);
        }
-}
-
-#ifdef CONFIG_MODULES
-void module_update_tracepoints(void)
-{
-       struct tp_module *tp_mod;
-
-       list_for_each_entry(tp_mod, &tracepoint_module_list, list)
-               tracepoint_update_probe_range(tp_mod->tracepoints_ptrs,
-                       tp_mod->tracepoints_ptrs + tp_mod->num_tracepoints);
-}
-#else /* CONFIG_MODULES */
-void module_update_tracepoints(void)
-{
-}
-#endif /* CONFIG_MODULES */
+       release_probes(old);
 
+       if (!tp_funcs) {
+               /* Removed last function */
+               if (tp->unregfunc && static_key_enabled(&tp->key))
+                       tp->unregfunc();
 
-/*
- * Update probes, removing the faulty probes.
- * Called with tracepoints_mutex held.
- */
-static void tracepoint_update_probes(void)
-{
-       /* Core kernel tracepoints */
-       tracepoint_update_probe_range(__start___tracepoints_ptrs,
-               __stop___tracepoints_ptrs);
-       /* tracepoints in modules. */
-       module_update_tracepoints();
-}
-
-static struct tracepoint_func *
-tracepoint_add_probe(const char *name, void *probe, void *data)
-{
-       struct tracepoint_entry *entry;
-       struct tracepoint_func *old;
-
-       entry = get_tracepoint(name);
-       if (!entry) {
-               entry = add_tracepoint(name);
-               if (IS_ERR(entry))
-                       return (struct tracepoint_func *)entry;
+               if (static_key_enabled(&tp->key))
+                       static_key_slow_dec(&tp->key);
        }
-       old = tracepoint_entry_add_probe(entry, probe, data);
-       if (IS_ERR(old) && !entry->refcount)
-               remove_tracepoint(entry);
-       return old;
+       rcu_assign_pointer(tp->funcs, tp_funcs);
+       return 0;
 }
 
 /**
  * tracepoint_probe_register -  Connect a probe to a tracepoint
- * @name: tracepoint name
+ * @tp: tracepoint
  * @probe: probe handler
- * @data: probe private data
- *
- * Returns:
- * - 0 if the probe was successfully registered, and tracepoint
- *   callsites are currently loaded for that probe,
- * - -ENODEV if the probe was successfully registered, but no tracepoint
- *   callsite is currently loaded for that probe,
- * - other negative error value on error.
- *
- * When tracepoint_probe_register() returns either 0 or -ENODEV,
- * parameters @name, @probe, and @data may be used by the tracepoint
- * infrastructure until the probe is unregistered.
  *
- * The probe address must at least be aligned on the architecture pointer size.
+ * Returns 0 if ok, error value on error.
+ * Note: if @tp is within a module, the caller is responsible for
+ * unregistering the probe before the module is gone. This can be
+ * performed either with a tracepoint module going notifier, or from
+ * within module exit functions.
  */
-int tracepoint_probe_register(const char *name, void *probe, void *data)
+int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data)
 {
-       struct tracepoint_func *old;
-       struct tracepoint_entry *entry;
-       int ret = 0;
+       struct tracepoint_func tp_func;
+       int ret;
 
        mutex_lock(&tracepoints_mutex);
-       old = tracepoint_add_probe(name, probe, data);
-       if (IS_ERR(old)) {
-               mutex_unlock(&tracepoints_mutex);
-               return PTR_ERR(old);
-       }
-       tracepoint_update_probes();             /* may update entry */
-       entry = get_tracepoint(name);
-       /* Make sure the entry was enabled */
-       if (!entry || !entry->enabled)
-               ret = -ENODEV;
+       tp_func.func = probe;
+       tp_func.data = data;
+       ret = tracepoint_add_func(tp, &tp_func);
        mutex_unlock(&tracepoints_mutex);
-       release_probes(old);
        return ret;
 }
 EXPORT_SYMBOL_GPL(tracepoint_probe_register);
 
-static struct tracepoint_func *
-tracepoint_remove_probe(const char *name, void *probe, void *data)
-{
-       struct tracepoint_entry *entry;
-       struct tracepoint_func *old;
-
-       entry = get_tracepoint(name);
-       if (!entry)
-               return ERR_PTR(-ENOENT);
-       old = tracepoint_entry_remove_probe(entry, probe, data);
-       if (IS_ERR(old))
-               return old;
-       if (!entry->refcount)
-               remove_tracepoint(entry);
-       return old;
-}
-
 /**
  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
- * @name: tracepoint name
+ * @tp: tracepoint
  * @probe: probe function pointer
- * @data: probe private data
  *
- * We do not need to call a synchronize_sched to make sure the probes have
- * finished running before doing a module unload, because the module unload
- * itself uses stop_machine(), which insures that every preempt disabled section
- * have finished.
+ * Returns 0 if ok, error value on error.
  */
-int tracepoint_probe_unregister(const char *name, void *probe, void *data)
+int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
 {
-       struct tracepoint_func *old;
+       struct tracepoint_func tp_func;
+       int ret;
 
        mutex_lock(&tracepoints_mutex);
-       old = tracepoint_remove_probe(name, probe, data);
-       if (IS_ERR(old)) {
-               mutex_unlock(&tracepoints_mutex);
-               return PTR_ERR(old);
-       }
-       tracepoint_update_probes();             /* may update entry */
+       tp_func.func = probe;
+       tp_func.data = data;
+       ret = tracepoint_remove_func(tp, &tp_func);
        mutex_unlock(&tracepoints_mutex);
-       release_probes(old);
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
 
-
 #ifdef CONFIG_MODULES
 bool trace_module_has_bad_taint(struct module *mod)
 {
@@ -464,6 +288,74 @@ bool trace_module_has_bad_taint(struct module *mod)
                               (1 << TAINT_UNSIGNED_MODULE));
 }
 
+static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
+
+/**
+ * register_tracepoint_notifier - register tracepoint coming/going notifier
+ * @nb: notifier block
+ *
+ * Notifiers registered with this function are called on module
+ * coming/going with the tracepoint_module_list_mutex held.
+ * The notifier block callback should expect a "struct tp_module" data
+ * pointer.
+ */
+int register_tracepoint_module_notifier(struct notifier_block *nb)
+{
+       struct tp_module *tp_mod;
+       int ret;
+
+       mutex_lock(&tracepoint_module_list_mutex);
+       ret = blocking_notifier_chain_register(&tracepoint_notify_list, nb);
+       if (ret)
+               goto end;
+       list_for_each_entry(tp_mod, &tracepoint_module_list, list)
+               (void) nb->notifier_call(nb, MODULE_STATE_COMING, tp_mod);
+end:
+       mutex_unlock(&tracepoint_module_list_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(register_tracepoint_module_notifier);
+
+/**
+ * unregister_tracepoint_notifier - unregister tracepoint coming/going notifier
+ * @nb: notifier block
+ *
+ * The notifier block callback should expect a "struct tp_module" data
+ * pointer.
+ */
+int unregister_tracepoint_module_notifier(struct notifier_block *nb)
+{
+       struct tp_module *tp_mod;
+       int ret;
+
+       mutex_lock(&tracepoint_module_list_mutex);
+       ret = blocking_notifier_chain_unregister(&tracepoint_notify_list, nb);
+       if (ret)
+               goto end;
+       list_for_each_entry(tp_mod, &tracepoint_module_list, list)
+               (void) nb->notifier_call(nb, MODULE_STATE_GOING, tp_mod);
+end:
+       mutex_unlock(&tracepoint_module_list_mutex);
+       return ret;
+
+}
+EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
+
+/*
+ * Ensure the tracer unregistered the module's probes before the module
+ * teardown is performed. Prevents leaks of probe and data pointers.
+ */
+static void tp_module_going_check_quiescent(struct tracepoint * const *begin,
+               struct tracepoint * const *end)
+{
+       struct tracepoint * const *iter;
+
+       if (!begin)
+               return;
+       for (iter = begin; iter < end; iter++)
+               WARN_ON_ONCE((*iter)->funcs);
+}
+
 static int tracepoint_module_coming(struct module *mod)
 {
        struct tp_module *tp_mod;
@@ -479,36 +371,41 @@ static int tracepoint_module_coming(struct module *mod)
         */
        if (trace_module_has_bad_taint(mod))
                return 0;
-       mutex_lock(&tracepoints_mutex);
+       mutex_lock(&tracepoint_module_list_mutex);
        tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
        if (!tp_mod) {
                ret = -ENOMEM;
                goto end;
        }
-       tp_mod->num_tracepoints = mod->num_tracepoints;
-       tp_mod->tracepoints_ptrs = mod->tracepoints_ptrs;
+       tp_mod->mod = mod;
        list_add_tail(&tp_mod->list, &tracepoint_module_list);
-       tracepoint_update_probe_range(mod->tracepoints_ptrs,
-               mod->tracepoints_ptrs + mod->num_tracepoints);
+       blocking_notifier_call_chain(&tracepoint_notify_list,
+                       MODULE_STATE_COMING, tp_mod);
 end:
-       mutex_unlock(&tracepoints_mutex);
+       mutex_unlock(&tracepoint_module_list_mutex);
        return ret;
 }
 
-static int tracepoint_module_going(struct module *mod)
+static void tracepoint_module_going(struct module *mod)
 {
-       struct tp_module *pos;
+       struct tp_module *tp_mod;
 
        if (!mod->num_tracepoints)
-               return 0;
+               return;
 
-       mutex_lock(&tracepoints_mutex);
-       tracepoint_update_probe_range(mod->tracepoints_ptrs,
-               mod->tracepoints_ptrs + mod->num_tracepoints);
-       list_for_each_entry(pos, &tracepoint_module_list, list) {
-               if (pos->tracepoints_ptrs == mod->tracepoints_ptrs) {
-                       list_del(&pos->list);
-                       kfree(pos);
+       mutex_lock(&tracepoint_module_list_mutex);
+       list_for_each_entry(tp_mod, &tracepoint_module_list, list) {
+               if (tp_mod->mod == mod) {
+                       blocking_notifier_call_chain(&tracepoint_notify_list,
+                                       MODULE_STATE_GOING, tp_mod);
+                       list_del(&tp_mod->list);
+                       kfree(tp_mod);
+                       /*
+                        * Called the going notifier before checking for
+                        * quiescence.
+                        */
+                       tp_module_going_check_quiescent(mod->tracepoints_ptrs,
+                               mod->tracepoints_ptrs + mod->num_tracepoints);
                        break;
                }
        }
@@ -518,12 +415,11 @@ static int tracepoint_module_going(struct module *mod)
         * flag on "going", in case a module taints the kernel only after being
         * loaded.
         */
-       mutex_unlock(&tracepoints_mutex);
-       return 0;
+       mutex_unlock(&tracepoint_module_list_mutex);
 }
 
-int tracepoint_module_notify(struct notifier_block *self,
-                            unsigned long val, void *data)
+static int tracepoint_module_notify(struct notifier_block *self,
+               unsigned long val, void *data)
 {
        struct module *mod = data;
        int ret = 0;
@@ -535,24 +431,58 @@ int tracepoint_module_notify(struct notifier_block *self,
        case MODULE_STATE_LIVE:
                break;
        case MODULE_STATE_GOING:
-               ret = tracepoint_module_going(mod);
+               tracepoint_module_going(mod);
+               break;
+       case MODULE_STATE_UNFORMED:
                break;
        }
        return ret;
 }
 
-struct notifier_block tracepoint_module_nb = {
+static struct notifier_block tracepoint_module_nb = {
        .notifier_call = tracepoint_module_notify,
        .priority = 0,
 };
 
-static int init_tracepoints(void)
+static __init int init_tracepoints(void)
 {
-       return register_module_notifier(&tracepoint_module_nb);
+       int ret;
+
+       ret = register_module_notifier(&tracepoint_module_nb);
+       if (ret)
+               pr_warning("Failed to register tracepoint module enter notifier\n");
+
+       return ret;
 }
 __initcall(init_tracepoints);
 #endif /* CONFIG_MODULES */
 
+static void for_each_tracepoint_range(struct tracepoint * const *begin,
+               struct tracepoint * const *end,
+               void (*fct)(struct tracepoint *tp, void *priv),
+               void *priv)
+{
+       struct tracepoint * const *iter;
+
+       if (!begin)
+               return;
+       for (iter = begin; iter < end; iter++)
+               fct(*iter, priv);
+}
+
+/**
+ * for_each_kernel_tracepoint - iteration on all kernel tracepoints
+ * @fct: callback
+ * @priv: private data
+ */
+void for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv),
+               void *priv)
+{
+       for_each_tracepoint_range(__start___tracepoints_ptrs,
+               __stop___tracepoints_ptrs, fct, priv);
+}
+EXPORT_SYMBOL_GPL(for_each_kernel_tracepoint);
+
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
 
 /* NB: reg/unreg are called while guarded with the tracepoints_mutex */
index 5d4984c505f8c10d0d7250b7f59d3234fe3b21b5..4771fb3f4da4deafdab8b26c76df9c0bec618300 100644 (file)
@@ -182,6 +182,15 @@ config AUDIT_GENERIC
        depends on AUDIT && !AUDIT_ARCH
        default y
 
+config AUDIT_ARCH_COMPAT_GENERIC
+       bool
+       default n
+
+config AUDIT_COMPAT_GENERIC
+       bool
+       depends on AUDIT_GENERIC && AUDIT_ARCH_COMPAT_GENERIC && COMPAT
+       default y
+
 config RANDOM32_SELFTEST
        bool "PRNG perform self test on init"
        default n
index dd7f8858188a6ac92ac19bb7ae032d62786ee612..140b66a874c1d5e9a069eade2fa6aebc6d65013d 100644 (file)
@@ -1045,16 +1045,6 @@ config DEBUG_BUGVERBOSE
          of the BUG call as well as the EIP and oops trace.  This aids
          debugging but costs about 70-100K of memory.
 
-config DEBUG_WRITECOUNT
-       bool "Debug filesystem writers count"
-       depends on DEBUG_KERNEL
-       help
-         Enable this to catch wrong use of the writers count in struct
-         vfsmount.  This will increase the size of each file struct by
-         32 bits.
-
-         If unsure, say N.
-
 config DEBUG_LIST
        bool "Debug linked list manipulation"
        depends on DEBUG_KERNEL
index 48140e3ba73f5864a734edb3032be01745b2c678..0cd7b68e1382dee93301898da70a6ca51c3d764e 100644 (file)
@@ -96,6 +96,7 @@ obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
 obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
 obj-$(CONFIG_SMP) += percpu_counter.o
 obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
 
 obj-$(CONFIG_SWIOTLB) += swiotlb.o
 obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
index 76bbed4a20e55d85ebdfc32881d436ee98592f4f..1d726a22565bdc41a36a19b1a5c26a441549f9b3 100644 (file)
@@ -30,11 +30,17 @@ static unsigned signal_class[] = {
 
 int audit_classify_arch(int arch)
 {
-       return 0;
+       if (audit_is_compat(arch))
+               return 1;
+       else
+               return 0;
 }
 
 int audit_classify_syscall(int abi, unsigned syscall)
 {
+       if (audit_is_compat(abi))
+               return audit_classify_compat_syscall(abi, syscall);
+
        switch(syscall) {
 #ifdef __NR_open
        case __NR_open:
@@ -57,6 +63,13 @@ int audit_classify_syscall(int abi, unsigned syscall)
 
 static int __init audit_classes_init(void)
 {
+#ifdef CONFIG_AUDIT_COMPAT_GENERIC
+       audit_register_class(AUDIT_CLASS_WRITE_32, compat_write_class);
+       audit_register_class(AUDIT_CLASS_READ_32, compat_read_class);
+       audit_register_class(AUDIT_CLASS_DIR_WRITE_32, compat_dir_class);
+       audit_register_class(AUDIT_CLASS_CHATTR_32, compat_chattr_class);
+       audit_register_class(AUDIT_CLASS_SIGNAL_32, compat_signal_class);
+#endif
        audit_register_class(AUDIT_CLASS_WRITE, write_class);
        audit_register_class(AUDIT_CLASS_READ, read_class);
        audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
diff --git a/lib/compat_audit.c b/lib/compat_audit.c
new file mode 100644 (file)
index 0000000..873f75b
--- /dev/null
@@ -0,0 +1,50 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <asm/unistd32.h>
+
+unsigned compat_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned compat_read_class[] = {
+#include <asm-generic/audit_read.h>
+~0U
+};
+
+unsigned compat_write_class[] = {
+#include <asm-generic/audit_write.h>
+~0U
+};
+
+unsigned compat_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+unsigned compat_signal_class[] = {
+#include <asm-generic/audit_signal.h>
+~0U
+};
+
+int audit_classify_compat_syscall(int abi, unsigned syscall)
+{
+       switch (syscall) {
+#ifdef __NR_open
+       case __NR_open:
+               return 2;
+#endif
+#ifdef __NR_openat
+       case __NR_openat:
+               return 3;
+#endif
+#ifdef __NR_socketcall
+       case __NR_socketcall:
+               return 4;
+#endif
+       case __NR_execve:
+               return 5;
+       default:
+               return 1;
+       }
+}
index 9e5aaf92197d3fcc7038e86d9769dc0def5bf1c3..b484452dac57ea5e531918837d658c9d5d159ea6 100644 (file)
@@ -17,7 +17,8 @@ obj-y                 := filemap.o mempool.o oom_kill.o fadvise.o \
                           util.o mmzone.o vmstat.o backing-dev.o \
                           mm_init.o mmu_context.o percpu.o slab_common.o \
                           compaction.o balloon_compaction.o vmacache.o \
-                          interval_tree.o list_lru.o workingset.o $(mmu-y)
+                          interval_tree.o list_lru.o workingset.o \
+                          iov_iter.o $(mmu-y)
 
 obj-y += init-mm.o
 
index 27ebc0c9571bb8831ceb38c4e71e1bf57227f098..a82fbe4c9e8e1c1d5a3eed5e2649ec87a7bfd16d 100644 (file)
@@ -77,7 +77,7 @@
  *  ->mmap_sem
  *    ->lock_page              (access_process_vm)
  *
- *  ->i_mutex                  (generic_file_buffered_write)
+ *  ->i_mutex                  (generic_perform_write)
  *    ->mmap_sem               (fault_in_pages_readable->do_page_fault)
  *
  *  bdi->wb.list_lock
@@ -1428,7 +1428,8 @@ static void shrink_readahead_size_eio(struct file *filp,
  * do_generic_file_read - generic file read routine
  * @filp:      the file to read
  * @ppos:      current file position
- * @desc:      read_descriptor
+ * @iter:      data destination
+ * @written:   already copied
  *
  * This is a generic file read routine, and uses the
  * mapping->a_ops->readpage() function for the actual low-level stuff.
@@ -1436,8 +1437,8 @@ static void shrink_readahead_size_eio(struct file *filp,
  * This is really ugly. But the goto's actually try to clarify some
  * of the logic when it comes to error handling etc.
  */
-static void do_generic_file_read(struct file *filp, loff_t *ppos,
-               read_descriptor_t *desc)
+static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos,
+               struct iov_iter *iter, ssize_t written)
 {
        struct address_space *mapping = filp->f_mapping;
        struct inode *inode = mapping->host;
@@ -1447,12 +1448,12 @@ static void do_generic_file_read(struct file *filp, loff_t *ppos,
        pgoff_t prev_index;
        unsigned long offset;      /* offset into pagecache page */
        unsigned int prev_offset;
-       int error;
+       int error = 0;
 
        index = *ppos >> PAGE_CACHE_SHIFT;
        prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT;
        prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1);
-       last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
+       last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
        offset = *ppos & ~PAGE_CACHE_MASK;
 
        for (;;) {
@@ -1487,7 +1488,7 @@ find_page:
                        if (!page->mapping)
                                goto page_not_up_to_date_locked;
                        if (!mapping->a_ops->is_partially_uptodate(page,
-                                                               desc, offset))
+                                                       offset, iter->count))
                                goto page_not_up_to_date_locked;
                        unlock_page(page);
                }
@@ -1537,24 +1538,23 @@ page_ok:
                /*
                 * Ok, we have the page, and it's up-to-date, so
                 * now we can copy it to user space...
-                *
-                * The file_read_actor routine returns how many bytes were
-                * actually used..
-                * NOTE! This may not be the same as how much of a user buffer
-                * we filled up (we may be padding etc), so we can only update
-                * "pos" here (the actor routine has to update the user buffer
-                * pointers and the remaining count).
                 */
-               ret = file_read_actor(desc, page, offset, nr);
+
+               ret = copy_page_to_iter(page, offset, nr, iter);
                offset += ret;
                index += offset >> PAGE_CACHE_SHIFT;
                offset &= ~PAGE_CACHE_MASK;
                prev_offset = offset;
 
                page_cache_release(page);
-               if (ret == nr && desc->count)
-                       continue;
-               goto out;
+               written += ret;
+               if (!iov_iter_count(iter))
+                       goto out;
+               if (ret < nr) {
+                       error = -EFAULT;
+                       goto out;
+               }
+               continue;
 
 page_not_up_to_date:
                /* Get exclusive access to the page ... */
@@ -1589,6 +1589,7 @@ readpage:
                if (unlikely(error)) {
                        if (error == AOP_TRUNCATED_PAGE) {
                                page_cache_release(page);
+                               error = 0;
                                goto find_page;
                        }
                        goto readpage_error;
@@ -1619,7 +1620,6 @@ readpage:
 
 readpage_error:
                /* UHHUH! A synchronous read error occurred. Report it */
-               desc->error = error;
                page_cache_release(page);
                goto out;
 
@@ -1630,16 +1630,17 @@ no_cached_page:
                 */
                page = page_cache_alloc_cold(mapping);
                if (!page) {
-                       desc->error = -ENOMEM;
+                       error = -ENOMEM;
                        goto out;
                }
                error = add_to_page_cache_lru(page, mapping,
                                                index, GFP_KERNEL);
                if (error) {
                        page_cache_release(page);
-                       if (error == -EEXIST)
+                       if (error == -EEXIST) {
+                               error = 0;
                                goto find_page;
-                       desc->error = error;
+                       }
                        goto out;
                }
                goto readpage;
@@ -1652,44 +1653,7 @@ out:
 
        *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
        file_accessed(filp);
-}
-
-int file_read_actor(read_descriptor_t *desc, struct page *page,
-                       unsigned long offset, unsigned long size)
-{
-       char *kaddr;
-       unsigned long left, count = desc->count;
-
-       if (size > count)
-               size = count;
-
-       /*
-        * Faults on the destination of a read are common, so do it before
-        * taking the kmap.
-        */
-       if (!fault_in_pages_writeable(desc->arg.buf, size)) {
-               kaddr = kmap_atomic(page);
-               left = __copy_to_user_inatomic(desc->arg.buf,
-                                               kaddr + offset, size);
-               kunmap_atomic(kaddr);
-               if (left == 0)
-                       goto success;
-       }
-
-       /* Do it the slow way */
-       kaddr = kmap(page);
-       left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
-       kunmap(page);
-
-       if (left) {
-               size -= left;
-               desc->error = -EFAULT;
-       }
-success:
-       desc->count = count - size;
-       desc->written += size;
-       desc->arg.buf += size;
-       return size;
+       return written ? written : error;
 }
 
 /*
@@ -1747,14 +1711,15 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
 {
        struct file *filp = iocb->ki_filp;
        ssize_t retval;
-       unsigned long seg = 0;
        size_t count;
        loff_t *ppos = &iocb->ki_pos;
+       struct iov_iter i;
 
        count = 0;
        retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
        if (retval)
                return retval;
+       iov_iter_init(&i, iov, nr_segs, count, 0);
 
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (filp->f_flags & O_DIRECT) {
@@ -1776,6 +1741,11 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                if (retval > 0) {
                        *ppos = pos + retval;
                        count -= retval;
+                       /*
+                        * If we did a short DIO read we need to skip the
+                        * section of the iov that we've already read data into.
+                        */
+                       iov_iter_advance(&i, retval);
                }
 
                /*
@@ -1792,39 +1762,7 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                }
        }
 
-       count = retval;
-       for (seg = 0; seg < nr_segs; seg++) {
-               read_descriptor_t desc;
-               loff_t offset = 0;
-
-               /*
-                * If we did a short DIO read we need to skip the section of the
-                * iov that we've already read data into.
-                */
-               if (count) {
-                       if (count > iov[seg].iov_len) {
-                               count -= iov[seg].iov_len;
-                               continue;
-                       }
-                       offset = count;
-                       count = 0;
-               }
-
-               desc.written = 0;
-               desc.arg.buf = iov[seg].iov_base + offset;
-               desc.count = iov[seg].iov_len - offset;
-               if (desc.count == 0)
-                       continue;
-               desc.error = 0;
-               do_generic_file_read(filp, ppos, &desc);
-               retval += desc.written;
-               if (desc.error) {
-                       retval = retval ?: desc.error;
-                       break;
-               }
-               if (desc.count > 0)
-                       break;
-       }
+       retval = do_generic_file_read(filp, ppos, &i, retval);
 out:
        return retval;
 }
@@ -2335,150 +2273,6 @@ struct page *read_cache_page_gfp(struct address_space *mapping,
 }
 EXPORT_SYMBOL(read_cache_page_gfp);
 
-static size_t __iovec_copy_from_user_inatomic(char *vaddr,
-                       const struct iovec *iov, size_t base, size_t bytes)
-{
-       size_t copied = 0, left = 0;
-
-       while (bytes) {
-               char __user *buf = iov->iov_base + base;
-               int copy = min(bytes, iov->iov_len - base);
-
-               base = 0;
-               left = __copy_from_user_inatomic(vaddr, buf, copy);
-               copied += copy;
-               bytes -= copy;
-               vaddr += copy;
-               iov++;
-
-               if (unlikely(left))
-                       break;
-       }
-       return copied - left;
-}
-
-/*
- * Copy as much as we can into the page and return the number of bytes which
- * were successfully copied.  If a fault is encountered then return the number of
- * bytes which were copied.
- */
-size_t iov_iter_copy_from_user_atomic(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       BUG_ON(!in_atomic());
-       kaddr = kmap_atomic(page);
-       if (likely(i->nr_segs == 1)) {
-               int left;
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
-               copied = bytes - left;
-       } else {
-               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-                                               i->iov, i->iov_offset, bytes);
-       }
-       kunmap_atomic(kaddr);
-
-       return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
-
-/*
- * This has the same sideeffects and return value as
- * iov_iter_copy_from_user_atomic().
- * The difference is that it attempts to resolve faults.
- * Page must not be locked.
- */
-size_t iov_iter_copy_from_user(struct page *page,
-               struct iov_iter *i, unsigned long offset, size_t bytes)
-{
-       char *kaddr;
-       size_t copied;
-
-       kaddr = kmap(page);
-       if (likely(i->nr_segs == 1)) {
-               int left;
-               char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user(kaddr + offset, buf, bytes);
-               copied = bytes - left;
-       } else {
-               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
-                                               i->iov, i->iov_offset, bytes);
-       }
-       kunmap(page);
-       return copied;
-}
-EXPORT_SYMBOL(iov_iter_copy_from_user);
-
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
-{
-       BUG_ON(i->count < bytes);
-
-       if (likely(i->nr_segs == 1)) {
-               i->iov_offset += bytes;
-               i->count -= bytes;
-       } else {
-               const struct iovec *iov = i->iov;
-               size_t base = i->iov_offset;
-               unsigned long nr_segs = i->nr_segs;
-
-               /*
-                * The !iov->iov_len check ensures we skip over unlikely
-                * zero-length segments (without overruning the iovec).
-                */
-               while (bytes || unlikely(i->count && !iov->iov_len)) {
-                       int copy;
-
-                       copy = min(bytes, iov->iov_len - base);
-                       BUG_ON(!i->count || i->count < copy);
-                       i->count -= copy;
-                       bytes -= copy;
-                       base += copy;
-                       if (iov->iov_len == base) {
-                               iov++;
-                               nr_segs--;
-                               base = 0;
-                       }
-               }
-               i->iov = iov;
-               i->iov_offset = base;
-               i->nr_segs = nr_segs;
-       }
-}
-EXPORT_SYMBOL(iov_iter_advance);
-
-/*
- * Fault in the first iovec of the given iov_iter, to a maximum length
- * of bytes. Returns 0 on success, or non-zero if the memory could not be
- * accessed (ie. because it is an invalid address).
- *
- * writev-intensive code may want this to prefault several iovecs -- that
- * would be possible (callers must not rely on the fact that _only_ the
- * first iovec will be faulted with the current implementation).
- */
-int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
-{
-       char __user *buf = i->iov->iov_base + i->iov_offset;
-       bytes = min(bytes, i->iov->iov_len - i->iov_offset);
-       return fault_in_pages_readable(buf, bytes);
-}
-EXPORT_SYMBOL(iov_iter_fault_in_readable);
-
-/*
- * Return the count of just the current iov_iter segment.
- */
-size_t iov_iter_single_seg_count(const struct iov_iter *i)
-{
-       const struct iovec *iov = i->iov;
-       if (i->nr_segs == 1)
-               return i->count;
-       else
-               return min(i->count, iov->iov_len - i->iov_offset);
-}
-EXPORT_SYMBOL(iov_iter_single_seg_count);
-
 /*
  * Performs necessary checks before doing a write
  *
@@ -2585,7 +2379,7 @@ EXPORT_SYMBOL(pagecache_write_end);
 
 ssize_t
 generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long *nr_segs, loff_t pos, loff_t *ppos,
+               unsigned long *nr_segs, loff_t pos,
                size_t count, size_t ocount)
 {
        struct file     *file = iocb->ki_filp;
@@ -2646,7 +2440,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
                        i_size_write(inode, pos);
                        mark_inode_dirty(inode);
                }
-               *ppos = pos;
+               iocb->ki_pos = pos;
        }
 out:
        return written;
@@ -2692,7 +2486,7 @@ found:
 }
 EXPORT_SYMBOL(grab_cache_page_write_begin);
 
-static ssize_t generic_perform_write(struct file *file,
+ssize_t generic_perform_write(struct file *file,
                                struct iov_iter *i, loff_t pos)
 {
        struct address_space *mapping = file->f_mapping;
@@ -2742,9 +2536,7 @@ again:
                if (mapping_writably_mapped(mapping))
                        flush_dcache_page(page);
 
-               pagefault_disable();
                copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
-               pagefault_enable();
                flush_dcache_page(page);
 
                mark_page_accessed(page);
@@ -2782,27 +2574,7 @@ again:
 
        return written ? written : status;
 }
-
-ssize_t
-generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
-               unsigned long nr_segs, loff_t pos, loff_t *ppos,
-               size_t count, ssize_t written)
-{
-       struct file *file = iocb->ki_filp;
-       ssize_t status;
-       struct iov_iter i;
-
-       iov_iter_init(&i, iov, nr_segs, count, written);
-       status = generic_perform_write(file, &i, pos);
-
-       if (likely(status >= 0)) {
-               written += status;
-               *ppos = pos + status;
-       }
-       
-       return written ? written : status;
-}
-EXPORT_SYMBOL(generic_file_buffered_write);
+EXPORT_SYMBOL(generic_perform_write);
 
 /**
  * __generic_file_aio_write - write data to a file
@@ -2824,16 +2596,18 @@ EXPORT_SYMBOL(generic_file_buffered_write);
  * avoid syncing under i_mutex.
  */
 ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                                unsigned long nr_segs, loff_t *ppos)
+                                unsigned long nr_segs)
 {
        struct file *file = iocb->ki_filp;
        struct address_space * mapping = file->f_mapping;
        size_t ocount;          /* original count */
        size_t count;           /* after file limit checks */
        struct inode    *inode = mapping->host;
-       loff_t          pos;
-       ssize_t         written;
+       loff_t          pos = iocb->ki_pos;
+       ssize_t         written = 0;
        ssize_t         err;
+       ssize_t         status;
+       struct iov_iter from;
 
        ocount = 0;
        err = generic_segment_checks(iov, &nr_segs, &ocount, VERIFY_READ);
@@ -2841,12 +2615,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                return err;
 
        count = ocount;
-       pos = *ppos;
 
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = mapping->backing_dev_info;
-       written = 0;
-
        err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
        if (err)
                goto out;
@@ -2862,45 +2633,47 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        if (err)
                goto out;
 
+       iov_iter_init(&from, iov, nr_segs, count, 0);
+
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (unlikely(file->f_flags & O_DIRECT)) {
                loff_t endbyte;
-               ssize_t written_buffered;
 
-               written = generic_file_direct_write(iocb, iov, &nr_segs, pos,
-                                                       ppos, count, ocount);
+               written = generic_file_direct_write(iocb, iov, &from.nr_segs, pos,
+                                                       count, ocount);
                if (written < 0 || written == count)
                        goto out;
+               iov_iter_advance(&from, written);
+
                /*
                 * direct-io write to a hole: fall through to buffered I/O
                 * for completing the rest of the request.
                 */
                pos += written;
                count -= written;
-               written_buffered = generic_file_buffered_write(iocb, iov,
-                                               nr_segs, pos, ppos, count,
-                                               written);
+
+               status = generic_perform_write(file, &from, pos);
                /*
-                * If generic_file_buffered_write() retuned a synchronous error
+                * If generic_perform_write() returned a synchronous error
                 * then we want to return the number of bytes which were
                 * direct-written, or the error code if that was zero.  Note
                 * that this differs from normal direct-io semantics, which
                 * will return -EFOO even if some bytes were written.
                 */
-               if (written_buffered < 0) {
-                       err = written_buffered;
+               if (unlikely(status < 0) && !written) {
+                       err = status;
                        goto out;
                }
-
+               iocb->ki_pos = pos + status;
                /*
                 * We need to ensure that the page cache pages are written to
                 * disk and invalidated to preserve the expected O_DIRECT
                 * semantics.
                 */
-               endbyte = pos + written_buffered - written - 1;
+               endbyte = pos + status - 1;
                err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte);
                if (err == 0) {
-                       written = written_buffered;
+                       written += status;
                        invalidate_mapping_pages(mapping,
                                                 pos >> PAGE_CACHE_SHIFT,
                                                 endbyte >> PAGE_CACHE_SHIFT);
@@ -2911,8 +2684,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                         */
                }
        } else {
-               written = generic_file_buffered_write(iocb, iov, nr_segs,
-                               pos, ppos, count, written);
+               written = generic_perform_write(file, &from, pos);
+               if (likely(written >= 0))
+                       iocb->ki_pos = pos + written;
        }
 out:
        current->backing_dev_info = NULL;
@@ -2941,7 +2715,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
        BUG_ON(iocb->ki_pos != pos);
 
        mutex_lock(&inode->i_mutex);
-       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+       ret = __generic_file_aio_write(iocb, iov, nr_segs);
        mutex_unlock(&inode->i_mutex);
 
        if (ret > 0) {
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
new file mode 100644 (file)
index 0000000..10e46cd
--- /dev/null
@@ -0,0 +1,224 @@
+#include <linux/export.h>
+#include <linux/uio.h>
+#include <linux/pagemap.h>
+
+size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
+                        struct iov_iter *i)
+{
+       size_t skip, copy, left, wanted;
+       const struct iovec *iov;
+       char __user *buf;
+       void *kaddr, *from;
+
+       if (unlikely(bytes > i->count))
+               bytes = i->count;
+
+       if (unlikely(!bytes))
+               return 0;
+
+       wanted = bytes;
+       iov = i->iov;
+       skip = i->iov_offset;
+       buf = iov->iov_base + skip;
+       copy = min(bytes, iov->iov_len - skip);
+
+       if (!fault_in_pages_writeable(buf, copy)) {
+               kaddr = kmap_atomic(page);
+               from = kaddr + offset;
+
+               /* first chunk, usually the only one */
+               left = __copy_to_user_inatomic(buf, from, copy);
+               copy -= left;
+               skip += copy;
+               from += copy;
+               bytes -= copy;
+
+               while (unlikely(!left && bytes)) {
+                       iov++;
+                       buf = iov->iov_base;
+                       copy = min(bytes, iov->iov_len);
+                       left = __copy_to_user_inatomic(buf, from, copy);
+                       copy -= left;
+                       skip = copy;
+                       from += copy;
+                       bytes -= copy;
+               }
+               if (likely(!bytes)) {
+                       kunmap_atomic(kaddr);
+                       goto done;
+               }
+               offset = from - kaddr;
+               buf += copy;
+               kunmap_atomic(kaddr);
+               copy = min(bytes, iov->iov_len - skip);
+       }
+       /* Too bad - revert to non-atomic kmap */
+       kaddr = kmap(page);
+       from = kaddr + offset;
+       left = __copy_to_user(buf, from, copy);
+       copy -= left;
+       skip += copy;
+       from += copy;
+       bytes -= copy;
+       while (unlikely(!left && bytes)) {
+               iov++;
+               buf = iov->iov_base;
+               copy = min(bytes, iov->iov_len);
+               left = __copy_to_user(buf, from, copy);
+               copy -= left;
+               skip = copy;
+               from += copy;
+               bytes -= copy;
+       }
+       kunmap(page);
+done:
+       i->count -= wanted - bytes;
+       i->nr_segs -= iov - i->iov;
+       i->iov = iov;
+       i->iov_offset = skip;
+       return wanted - bytes;
+}
+EXPORT_SYMBOL(copy_page_to_iter);
+
+static size_t __iovec_copy_from_user_inatomic(char *vaddr,
+                       const struct iovec *iov, size_t base, size_t bytes)
+{
+       size_t copied = 0, left = 0;
+
+       while (bytes) {
+               char __user *buf = iov->iov_base + base;
+               int copy = min(bytes, iov->iov_len - base);
+
+               base = 0;
+               left = __copy_from_user_inatomic(vaddr, buf, copy);
+               copied += copy;
+               bytes -= copy;
+               vaddr += copy;
+               iov++;
+
+               if (unlikely(left))
+                       break;
+       }
+       return copied - left;
+}
+
+/*
+ * Copy as much as we can into the page and return the number of bytes which
+ * were successfully copied.  If a fault is encountered then return the number of
+ * bytes which were copied.
+ */
+size_t iov_iter_copy_from_user_atomic(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       char *kaddr;
+       size_t copied;
+
+       kaddr = kmap_atomic(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = i->iov->iov_base + i->iov_offset;
+               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+                                               i->iov, i->iov_offset, bytes);
+       }
+       kunmap_atomic(kaddr);
+
+       return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+
+/*
+ * This has the same sideeffects and return value as
+ * iov_iter_copy_from_user_atomic().
+ * The difference is that it attempts to resolve faults.
+ * Page must not be locked.
+ */
+size_t iov_iter_copy_from_user(struct page *page,
+               struct iov_iter *i, unsigned long offset, size_t bytes)
+{
+       char *kaddr;
+       size_t copied;
+
+       kaddr = kmap(page);
+       if (likely(i->nr_segs == 1)) {
+               int left;
+               char __user *buf = i->iov->iov_base + i->iov_offset;
+               left = __copy_from_user(kaddr + offset, buf, bytes);
+               copied = bytes - left;
+       } else {
+               copied = __iovec_copy_from_user_inatomic(kaddr + offset,
+                                               i->iov, i->iov_offset, bytes);
+       }
+       kunmap(page);
+       return copied;
+}
+EXPORT_SYMBOL(iov_iter_copy_from_user);
+
+void iov_iter_advance(struct iov_iter *i, size_t bytes)
+{
+       BUG_ON(i->count < bytes);
+
+       if (likely(i->nr_segs == 1)) {
+               i->iov_offset += bytes;
+               i->count -= bytes;
+       } else {
+               const struct iovec *iov = i->iov;
+               size_t base = i->iov_offset;
+               unsigned long nr_segs = i->nr_segs;
+
+               /*
+                * The !iov->iov_len check ensures we skip over unlikely
+                * zero-length segments (without overruning the iovec).
+                */
+               while (bytes || unlikely(i->count && !iov->iov_len)) {
+                       int copy;
+
+                       copy = min(bytes, iov->iov_len - base);
+                       BUG_ON(!i->count || i->count < copy);
+                       i->count -= copy;
+                       bytes -= copy;
+                       base += copy;
+                       if (iov->iov_len == base) {
+                               iov++;
+                               nr_segs--;
+                               base = 0;
+                       }
+               }
+               i->iov = iov;
+               i->iov_offset = base;
+               i->nr_segs = nr_segs;
+       }
+}
+EXPORT_SYMBOL(iov_iter_advance);
+
+/*
+ * Fault in the first iovec of the given iov_iter, to a maximum length
+ * of bytes. Returns 0 on success, or non-zero if the memory could not be
+ * accessed (ie. because it is an invalid address).
+ *
+ * writev-intensive code may want this to prefault several iovecs -- that
+ * would be possible (callers must not rely on the fact that _only_ the
+ * first iovec will be faulted with the current implementation).
+ */
+int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
+{
+       char __user *buf = i->iov->iov_base + i->iov_offset;
+       bytes = min(bytes, i->iov->iov_len - i->iov_offset);
+       return fault_in_pages_readable(buf, bytes);
+}
+EXPORT_SYMBOL(iov_iter_fault_in_readable);
+
+/*
+ * Return the count of just the current iov_iter segment.
+ */
+size_t iov_iter_single_seg_count(const struct iov_iter *i)
+{
+       const struct iovec *iov = i->iov;
+       if (i->nr_segs == 1)
+               return i->count;
+       else
+               return min(i->count, iov->iov_len - i->iov_offset);
+}
+EXPORT_SYMBOL(iov_iter_single_seg_count);
index cb79065c19e55f054888430555b44199425dd72b..8505c9262b35853e22580c6c9b74c4d12bc86acc 100644 (file)
 
 /**
  * process_vm_rw_pages - read/write pages from task specified
- * @task: task to read/write from
- * @mm: mm for task
- * @process_pages: struct pages area that can store at least
- *  nr_pages_to_copy struct page pointers
- * @pa: address of page in task to start copying from/to
+ * @pages: array of pointers to pages we want to copy
  * @start_offset: offset in page to start copying from/to
  * @len: number of bytes to copy
- * @lvec: iovec array specifying where to copy to/from
- * @lvec_cnt: number of elements in iovec array
- * @lvec_current: index in iovec array we are up to
- * @lvec_offset: offset in bytes from current iovec iov_base we are up to
+ * @iter: where to copy to/from locally
  * @vm_write: 0 means copy from, 1 means copy to
- * @nr_pages_to_copy: number of pages to copy
- * @bytes_copied: returns number of bytes successfully copied
  * Returns 0 on success, error code otherwise
  */
-static int process_vm_rw_pages(struct task_struct *task,
-                              struct mm_struct *mm,
-                              struct page **process_pages,
-                              unsigned long pa,
-                              unsigned long start_offset,
-                              unsigned long len,
-                              const struct iovec *lvec,
-                              unsigned long lvec_cnt,
-                              unsigned long *lvec_current,
-                              size_t *lvec_offset,
-                              int vm_write,
-                              unsigned int nr_pages_to_copy,
-                              ssize_t *bytes_copied)
+static int process_vm_rw_pages(struct page **pages,
+                              unsigned offset,
+                              size_t len,
+                              struct iov_iter *iter,
+                              int vm_write)
 {
-       int pages_pinned;
-       void *target_kaddr;
-       int pgs_copied = 0;
-       int j;
-       int ret;
-       ssize_t bytes_to_copy;
-       ssize_t rc = 0;
-
-       *bytes_copied = 0;
-
-       /* Get the pages we're interested in */
-       down_read(&mm->mmap_sem);
-       pages_pinned = get_user_pages(task, mm, pa,
-                                     nr_pages_to_copy,
-                                     vm_write, 0, process_pages, NULL);
-       up_read(&mm->mmap_sem);
-
-       if (pages_pinned != nr_pages_to_copy) {
-               rc = -EFAULT;
-               goto end;
-       }
-
        /* Do the copy for each page */
-       for (pgs_copied = 0;
-            (pgs_copied < nr_pages_to_copy) && (*lvec_current < lvec_cnt);
-            pgs_copied++) {
-               /* Make sure we have a non zero length iovec */
-               while (*lvec_current < lvec_cnt
-                      && lvec[*lvec_current].iov_len == 0)
-                       (*lvec_current)++;
-               if (*lvec_current == lvec_cnt)
-                       break;
-
-               /*
-                * Will copy smallest of:
-                * - bytes remaining in page
-                * - bytes remaining in destination iovec
-                */
-               bytes_to_copy = min_t(ssize_t, PAGE_SIZE - start_offset,
-                                     len - *bytes_copied);
-               bytes_to_copy = min_t(ssize_t, bytes_to_copy,
-                                     lvec[*lvec_current].iov_len
-                                     - *lvec_offset);
-
-               target_kaddr = kmap(process_pages[pgs_copied]) + start_offset;
-
-               if (vm_write)
-                       ret = copy_from_user(target_kaddr,
-                                            lvec[*lvec_current].iov_base
-                                            + *lvec_offset,
-                                            bytes_to_copy);
-               else
-                       ret = copy_to_user(lvec[*lvec_current].iov_base
-                                          + *lvec_offset,
-                                          target_kaddr, bytes_to_copy);
-               kunmap(process_pages[pgs_copied]);
-               if (ret) {
-                       *bytes_copied += bytes_to_copy - ret;
-                       pgs_copied++;
-                       rc = -EFAULT;
-                       goto end;
-               }
-               *bytes_copied += bytes_to_copy;
-               *lvec_offset += bytes_to_copy;
-               if (*lvec_offset == lvec[*lvec_current].iov_len) {
-                       /*
-                        * Need to copy remaining part of page into the
-                        * next iovec if there are any bytes left in page
-                        */
-                       (*lvec_current)++;
-                       *lvec_offset = 0;
-                       start_offset = (start_offset + bytes_to_copy)
-                               % PAGE_SIZE;
-                       if (start_offset)
-                               pgs_copied--;
+       while (len && iov_iter_count(iter)) {
+               struct page *page = *pages++;
+               size_t copy = PAGE_SIZE - offset;
+               size_t copied;
+
+               if (copy > len)
+                       copy = len;
+
+               if (vm_write) {
+                       if (copy > iov_iter_count(iter))
+                               copy = iov_iter_count(iter);
+                       copied = iov_iter_copy_from_user(page, iter,
+                                       offset, copy);
+                       iov_iter_advance(iter, copied);
+                       set_page_dirty_lock(page);
                } else {
-                       start_offset = 0;
-               }
-       }
-
-end:
-       if (vm_write) {
-               for (j = 0; j < pages_pinned; j++) {
-                       if (j < pgs_copied)
-                               set_page_dirty_lock(process_pages[j]);
-                       put_page(process_pages[j]);
+                       copied = copy_page_to_iter(page, offset, copy, iter);
                }
-       } else {
-               for (j = 0; j < pages_pinned; j++)
-                       put_page(process_pages[j]);
+               len -= copied;
+               if (copied < copy && iov_iter_count(iter))
+                       return -EFAULT;
+               offset = 0;
        }
-
-       return rc;
+       return 0;
 }
 
 /* Maximum number of pages kmalloc'd to hold struct page's during copy */
@@ -155,67 +70,60 @@ end:
  * process_vm_rw_single_vec - read/write pages from task specified
  * @addr: start memory address of target process
  * @len: size of area to copy to/from
- * @lvec: iovec array specifying where to copy to/from locally
- * @lvec_cnt: number of elements in iovec array
- * @lvec_current: index in iovec array we are up to
- * @lvec_offset: offset in bytes from current iovec iov_base we are up to
+ * @iter: where to copy to/from locally
  * @process_pages: struct pages area that can store at least
  *  nr_pages_to_copy struct page pointers
  * @mm: mm for task
  * @task: task to read/write from
  * @vm_write: 0 means copy from, 1 means copy to
- * @bytes_copied: returns number of bytes successfully copied
  * Returns 0 on success or on failure error code
  */
 static int process_vm_rw_single_vec(unsigned long addr,
                                    unsigned long len,
-                                   const struct iovec *lvec,
-                                   unsigned long lvec_cnt,
-                                   unsigned long *lvec_current,
-                                   size_t *lvec_offset,
+                                   struct iov_iter *iter,
                                    struct page **process_pages,
                                    struct mm_struct *mm,
                                    struct task_struct *task,
-                                   int vm_write,
-                                   ssize_t *bytes_copied)
+                                   int vm_write)
 {
        unsigned long pa = addr & PAGE_MASK;
        unsigned long start_offset = addr - pa;
        unsigned long nr_pages;
-       ssize_t bytes_copied_loop;
        ssize_t rc = 0;
-       unsigned long nr_pages_copied = 0;
-       unsigned long nr_pages_to_copy;
        unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
                / sizeof(struct pages *);
 
-       *bytes_copied = 0;
-
        /* Work out address and page range required */
        if (len == 0)
                return 0;
        nr_pages = (addr + len - 1) / PAGE_SIZE - addr / PAGE_SIZE + 1;
 
-       while ((nr_pages_copied < nr_pages) && (*lvec_current < lvec_cnt)) {
-               nr_pages_to_copy = min(nr_pages - nr_pages_copied,
-                                      max_pages_per_loop);
+       while (!rc && nr_pages && iov_iter_count(iter)) {
+               int pages = min(nr_pages, max_pages_per_loop);
+               size_t bytes;
 
-               rc = process_vm_rw_pages(task, mm, process_pages, pa,
-                                        start_offset, len,
-                                        lvec, lvec_cnt,
-                                        lvec_current, lvec_offset,
-                                        vm_write, nr_pages_to_copy,
-                                        &bytes_copied_loop);
-               start_offset = 0;
-               *bytes_copied += bytes_copied_loop;
+               /* Get the pages we're interested in */
+               down_read(&mm->mmap_sem);
+               pages = get_user_pages(task, mm, pa, pages,
+                                     vm_write, 0, process_pages, NULL);
+               up_read(&mm->mmap_sem);
 
-               if (rc < 0) {
-                       return rc;
-               } else {
-                       len -= bytes_copied_loop;
-                       nr_pages_copied += nr_pages_to_copy;
-                       pa += nr_pages_to_copy * PAGE_SIZE;
-               }
+               if (pages <= 0)
+                       return -EFAULT;
+
+               bytes = pages * PAGE_SIZE - start_offset;
+               if (bytes > len)
+                       bytes = len;
+
+               rc = process_vm_rw_pages(process_pages,
+                                        start_offset, bytes, iter,
+                                        vm_write);
+               len -= bytes;
+               start_offset = 0;
+               nr_pages -= pages;
+               pa += pages * PAGE_SIZE;
+               while (pages)
+                       put_page(process_pages[--pages]);
        }
 
        return rc;
@@ -228,8 +136,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
 /**
  * process_vm_rw_core - core of reading/writing pages from task specified
  * @pid: PID of process to read/write from/to
- * @lvec: iovec array specifying where to copy to/from locally
- * @liovcnt: size of lvec array
+ * @iter: where to copy to/from locally
  * @rvec: iovec array specifying where to copy to/from in the other process
  * @riovcnt: size of rvec array
  * @flags: currently unused
@@ -238,8 +145,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
  *  return less bytes than expected if an error occurs during the copying
  *  process.
  */
-static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
-                                 unsigned long liovcnt,
+static ssize_t process_vm_rw_core(pid_t pid, struct iov_iter *iter,
                                  const struct iovec *rvec,
                                  unsigned long riovcnt,
                                  unsigned long flags, int vm_write)
@@ -250,13 +156,10 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
        struct mm_struct *mm;
        unsigned long i;
        ssize_t rc = 0;
-       ssize_t bytes_copied_loop;
-       ssize_t bytes_copied = 0;
        unsigned long nr_pages = 0;
        unsigned long nr_pages_iov;
-       unsigned long iov_l_curr_idx = 0;
-       size_t iov_l_curr_offset = 0;
        ssize_t iov_len;
+       size_t total_len = iov_iter_count(iter);
 
        /*
         * Work out how many pages of struct pages we're going to need
@@ -310,24 +213,20 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
                goto put_task_struct;
        }
 
-       for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
+       for (i = 0; i < riovcnt && iov_iter_count(iter) && !rc; i++)
                rc = process_vm_rw_single_vec(
                        (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
-                       lvec, liovcnt, &iov_l_curr_idx, &iov_l_curr_offset,
-                       process_pages, mm, task, vm_write, &bytes_copied_loop);
-               bytes_copied += bytes_copied_loop;
-               if (rc != 0) {
-                       /* If we have managed to copy any data at all then
-                          we return the number of bytes copied. Otherwise
-                          we return the error code */
-                       if (bytes_copied)
-                               rc = bytes_copied;
-                       goto put_mm;
-               }
-       }
+                       iter, process_pages, mm, task, vm_write);
+
+       /* copied = space before - space after */
+       total_len -= iov_iter_count(iter);
+
+       /* If we have managed to copy any data at all then
+          we return the number of bytes copied. Otherwise
+          we return the error code */
+       if (total_len)
+               rc = total_len;
 
-       rc = bytes_copied;
-put_mm:
        mmput(mm);
 
 put_task_struct:
@@ -363,6 +262,7 @@ static ssize_t process_vm_rw(pid_t pid,
        struct iovec iovstack_r[UIO_FASTIOV];
        struct iovec *iov_l = iovstack_l;
        struct iovec *iov_r = iovstack_r;
+       struct iov_iter iter;
        ssize_t rc;
 
        if (flags != 0)
@@ -378,13 +278,14 @@ static ssize_t process_vm_rw(pid_t pid,
        if (rc <= 0)
                goto free_iovecs;
 
+       iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
+
        rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
                                   iovstack_r, &iov_r);
        if (rc <= 0)
                goto free_iovecs;
 
-       rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
-                               vm_write);
+       rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
 
 free_iovecs:
        if (iov_r != iovstack_r)
@@ -424,6 +325,7 @@ compat_process_vm_rw(compat_pid_t pid,
        struct iovec iovstack_r[UIO_FASTIOV];
        struct iovec *iov_l = iovstack_l;
        struct iovec *iov_r = iovstack_r;
+       struct iov_iter iter;
        ssize_t rc = -EFAULT;
 
        if (flags != 0)
@@ -439,14 +341,14 @@ compat_process_vm_rw(compat_pid_t pid,
                                                  &iov_l);
        if (rc <= 0)
                goto free_iovecs;
+       iov_iter_init(&iter, iov_l, liovcnt, rc, 0);
        rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
                                          UIO_FASTIOV, iovstack_r,
                                          &iov_r);
        if (rc <= 0)
                goto free_iovecs;
 
-       rc = process_vm_rw_core(pid, iov_l, liovcnt, iov_r, riovcnt, flags,
-                          vm_write);
+       rc = process_vm_rw_core(pid, &iter, iov_r, riovcnt, flags, vm_write);
 
 free_iovecs:
        if (iov_r != iovstack_r)
index 70273f8df5867a33aeea7267e4268a949a35aaaa..8f1a95406bae6a61b20be247b50603cdedc30398 100644 (file)
@@ -1402,13 +1402,25 @@ shmem_write_end(struct file *file, struct address_space *mapping,
        return copied;
 }
 
-static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
+static ssize_t shmem_file_aio_read(struct kiocb *iocb,
+               const struct iovec *iov, unsigned long nr_segs, loff_t pos)
 {
-       struct inode *inode = file_inode(filp);
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
        struct address_space *mapping = inode->i_mapping;
        pgoff_t index;
        unsigned long offset;
        enum sgp_type sgp = SGP_READ;
+       int error;
+       ssize_t retval;
+       size_t count;
+       loff_t *ppos = &iocb->ki_pos;
+       struct iov_iter iter;
+
+       retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
+       if (retval)
+               return retval;
+       iov_iter_init(&iter, iov, nr_segs, count, 0);
 
        /*
         * Might this read be for a stacking filesystem?  Then when reading
@@ -1436,10 +1448,10 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                                break;
                }
 
-               desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
-               if (desc->error) {
-                       if (desc->error == -EINVAL)
-                               desc->error = 0;
+               error = shmem_getpage(inode, index, &page, sgp, NULL);
+               if (error) {
+                       if (error == -EINVAL)
+                               error = 0;
                        break;
                }
                if (page)
@@ -1483,61 +1495,26 @@ static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_
                /*
                 * Ok, we have the page, and it's up-to-date, so
                 * now we can copy it to user space...
-                *
-                * The actor routine returns how many bytes were actually used..
-                * NOTE! This may not be the same as how much of a user buffer
-                * we filled up (we may be padding etc), so we can only update
-                * "pos" here (the actor routine has to update the user buffer
-                * pointers and the remaining count).
                 */
-               ret = actor(desc, page, offset, nr);
+               ret = copy_page_to_iter(page, offset, nr, &iter);
+               retval += ret;
                offset += ret;
                index += offset >> PAGE_CACHE_SHIFT;
                offset &= ~PAGE_CACHE_MASK;
 
                page_cache_release(page);
-               if (ret != nr || !desc->count)
+               if (!iov_iter_count(&iter))
                        break;
-
+               if (ret < nr) {
+                       error = -EFAULT;
+                       break;
+               }
                cond_resched();
        }
 
        *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
-       file_accessed(filp);
-}
-
-static ssize_t shmem_file_aio_read(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs, loff_t pos)
-{
-       struct file *filp = iocb->ki_filp;
-       ssize_t retval;
-       unsigned long seg;
-       size_t count;
-       loff_t *ppos = &iocb->ki_pos;
-
-       retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
-       if (retval)
-               return retval;
-
-       for (seg = 0; seg < nr_segs; seg++) {
-               read_descriptor_t desc;
-
-               desc.written = 0;
-               desc.arg.buf = iov[seg].iov_base;
-               desc.count = iov[seg].iov_len;
-               if (desc.count == 0)
-                       continue;
-               desc.error = 0;
-               do_shmem_file_read(filp, ppos, &desc, file_read_actor);
-               retval += desc.written;
-               if (desc.error) {
-                       retval = retval ?: desc.error;
-                       break;
-               }
-               if (desc.count > 0)
-                       break;
-       }
-       return retval;
+       file_accessed(file);
+       return retval ? retval : error;
 }
 
 static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
@@ -1576,7 +1553,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
        index = *ppos >> PAGE_CACHE_SHIFT;
        loff = *ppos & ~PAGE_CACHE_MASK;
        req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       nr_pages = min(req_pages, pipe->buffers);
+       nr_pages = min(req_pages, spd.nr_pages_max);
 
        spd.nr_pages = find_get_pages_contig(mapping, index,
                                                nr_pages, spd.pages);
index d7813e6d4cc7c042c44a01692f0993a2b6bad04a..f380af7ea7797e287b222cd57b665fad02b6132e 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -446,6 +446,54 @@ unsigned long vm_commit_limit(void)
        return allowed;
 }
 
+/**
+ * get_cmdline() - copy the cmdline value to a buffer.
+ * @task:     the task whose cmdline value to copy.
+ * @buffer:   the buffer to copy to.
+ * @buflen:   the length of the buffer. Larger cmdline values are truncated
+ *            to this length.
+ * Returns the size of the cmdline field copied. Note that the copy does
+ * not guarantee an ending NULL byte.
+ */
+int get_cmdline(struct task_struct *task, char *buffer, int buflen)
+{
+       int res = 0;
+       unsigned int len;
+       struct mm_struct *mm = get_task_mm(task);
+       if (!mm)
+               goto out;
+       if (!mm->arg_end)
+               goto out_mm;    /* Shh! No looking before we're done */
+
+       len = mm->arg_end - mm->arg_start;
+
+       if (len > buflen)
+               len = buflen;
+
+       res = access_process_vm(task, mm->arg_start, buffer, len, 0);
+
+       /*
+        * If the nul at the end of args has been overwritten, then
+        * assume application is using setproctitle(3).
+        */
+       if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
+               len = strnlen(buffer, res);
+               if (len < res) {
+                       res = len;
+               } else {
+                       len = mm->env_end - mm->env_start;
+                       if (len > buflen - res)
+                               len = buflen - res;
+                       res += access_process_vm(task, mm->env_start,
+                                                buffer+res, len, 0);
+                       res = strnlen(buffer, res);
+               }
+       }
+out_mm:
+       mmput(mm);
+out:
+       return res;
+}
 
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
index 9186550d77a61b84c8f310ad7cd8e602ed116980..0004cbaac4a41d9737e017e257c9083df4969b8a 100644 (file)
@@ -415,9 +415,17 @@ static void p9_free_req(struct p9_client *c, struct p9_req_t *r)
  * req: request received
  *
  */
-void p9_client_cb(struct p9_client *c, struct p9_req_t *req)
+void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status)
 {
        p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag);
+
+       /*
+        * This barrier is needed to make sure any change made to req before
+        * the other thread wakes up will indeed be seen by the waiting side.
+        */
+       smp_wmb();
+       req->status = status;
+
        wake_up(req->wq);
        p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag);
 }
@@ -655,16 +663,13 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
-
        /*
         * if we haven't received a response for oldreq,
         * remove it from the list
         */
-       if (oldreq->status == REQ_STATUS_FLSH) {
-               spin_lock(&c->lock);
-               list_del(&oldreq->req_list);
-               spin_unlock(&c->lock);
-       }
+       if (oldreq->status == REQ_STATUS_SENT)
+               if (c->trans_mod->cancelled)
+                       c->trans_mod->cancelled(c, oldreq);
 
        p9_free_req(c, req);
        return 0;
@@ -751,6 +756,12 @@ again:
        err = wait_event_interruptible(*req->wq,
                                       req->status >= REQ_STATUS_RCVD);
 
+       /*
+        * Make sure our req is coherent with regard to updates in other
+        * threads - echoes to wmb() in the callback
+        */
+       smp_rmb();
+
        if ((err == -ERESTARTSYS) && (c->status == Connected)
                                  && (type == P9_TFLUSH)) {
                sigpending = 1;
index b7bd7f2961bf60f49258bd60a702eadd8d651773..80d08f6664cbb5611eef291b959facd5332a61ef 100644 (file)
@@ -66,20 +66,6 @@ struct p9_fd_opts {
        int privport;
 };
 
-/**
- * struct p9_trans_fd - transport state
- * @rd: reference to file to read from
- * @wr: reference of file to write to
- * @conn: connection state reference
- *
- */
-
-struct p9_trans_fd {
-       struct file *rd;
-       struct file *wr;
-       struct p9_conn *conn;
-};
-
 /*
   * Option Parsing (code inspired by NFS code)
   *  - a little lazy - parse all fd-transport options
@@ -159,6 +145,20 @@ struct p9_conn {
        unsigned long wsched;
 };
 
+/**
+ * struct p9_trans_fd - transport state
+ * @rd: reference to file to read from
+ * @wr: reference of file to write to
+ * @conn: connection state reference
+ *
+ */
+
+struct p9_trans_fd {
+       struct file *rd;
+       struct file *wr;
+       struct p9_conn conn;
+};
+
 static void p9_poll_workfn(struct work_struct *work);
 
 static DEFINE_SPINLOCK(p9_poll_lock);
@@ -212,15 +212,9 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
        m->err = err;
 
        list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
-               req->status = REQ_STATUS_ERROR;
-               if (!req->t_err)
-                       req->t_err = err;
                list_move(&req->req_list, &cancel_list);
        }
        list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
-               req->status = REQ_STATUS_ERROR;
-               if (!req->t_err)
-                       req->t_err = err;
                list_move(&req->req_list, &cancel_list);
        }
        spin_unlock_irqrestore(&m->client->lock, flags);
@@ -228,7 +222,9 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
        list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
                p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
                list_del(&req->req_list);
-               p9_client_cb(m->client, req);
+               if (!req->t_err)
+                       req->t_err = err;
+               p9_client_cb(m->client, req, REQ_STATUS_ERROR);
        }
 }
 
@@ -302,6 +298,7 @@ static void p9_read_work(struct work_struct *work)
 {
        int n, err;
        struct p9_conn *m;
+       int status = REQ_STATUS_ERROR;
 
        m = container_of(work, struct p9_conn, rq);
 
@@ -348,8 +345,7 @@ static void p9_read_work(struct work_struct *work)
                         "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
 
                m->req = p9_tag_lookup(m->client, tag);
-               if (!m->req || (m->req->status != REQ_STATUS_SENT &&
-                                       m->req->status != REQ_STATUS_FLSH)) {
+               if (!m->req || (m->req->status != REQ_STATUS_SENT)) {
                        p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
                                 tag);
                        err = -EIO;
@@ -375,10 +371,10 @@ static void p9_read_work(struct work_struct *work)
                p9_debug(P9_DEBUG_TRANS, "got new packet\n");
                spin_lock(&m->client->lock);
                if (m->req->status != REQ_STATUS_ERROR)
-                       m->req->status = REQ_STATUS_RCVD;
+                       status = REQ_STATUS_RCVD;
                list_del(&m->req->req_list);
                spin_unlock(&m->client->lock);
-               p9_client_cb(m->client, m->req);
+               p9_client_cb(m->client, m->req, status);
                m->rbuf = NULL;
                m->rpos = 0;
                m->rsize = 0;
@@ -573,21 +569,19 @@ p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
 }
 
 /**
- * p9_conn_create - allocate and initialize the per-session mux data
+ * p9_conn_create - initialize the per-session mux data
  * @client: client instance
  *
  * Note: Creates the polling task if this is the first session.
  */
 
-static struct p9_conn *p9_conn_create(struct p9_client *client)
+static void p9_conn_create(struct p9_client *client)
 {
        int n;
-       struct p9_conn *m;
+       struct p9_trans_fd *ts = client->trans;
+       struct p9_conn *m = &ts->conn;
 
        p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
-       m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
-       if (!m)
-               return ERR_PTR(-ENOMEM);
 
        INIT_LIST_HEAD(&m->mux_list);
        m->client = client;
@@ -609,8 +603,6 @@ static struct p9_conn *p9_conn_create(struct p9_client *client)
                p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
                set_bit(Wpending, &m->wsched);
        }
-
-       return m;
 }
 
 /**
@@ -669,7 +661,7 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
 {
        int n;
        struct p9_trans_fd *ts = client->trans;
-       struct p9_conn *m = ts->conn;
+       struct p9_conn *m = &ts->conn;
 
        p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
                 m, current, req->tc, req->tc->id);
@@ -704,14 +696,26 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
                list_del(&req->req_list);
                req->status = REQ_STATUS_FLSHD;
                ret = 0;
-       } else if (req->status == REQ_STATUS_SENT)
-               req->status = REQ_STATUS_FLSH;
-
+       }
        spin_unlock(&client->lock);
 
        return ret;
 }
 
+static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
+{
+       p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
+
+       /* we haven't received a response for oldreq,
+        * remove it from the list.
+        */
+       spin_lock(&client->lock);
+       list_del(&req->req_list);
+       spin_unlock(&client->lock);
+
+       return 0;
+}
+
 /**
  * parse_opts - parse mount options into p9_fd_opts structure
  * @params: options string passed from mount
@@ -780,7 +784,7 @@ static int parse_opts(char *params, struct p9_fd_opts *opts)
 
 static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
 {
-       struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
+       struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
                                           GFP_KERNEL);
        if (!ts)
                return -ENOMEM;
@@ -806,9 +810,8 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
 {
        struct p9_trans_fd *p;
        struct file *file;
-       int ret;
 
-       p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
+       p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
        if (!p)
                return -ENOMEM;
 
@@ -829,20 +832,12 @@ static int p9_socket_open(struct p9_client *client, struct socket *csocket)
 
        p->rd->f_flags |= O_NONBLOCK;
 
-       p->conn = p9_conn_create(client);
-       if (IS_ERR(p->conn)) {
-               ret = PTR_ERR(p->conn);
-               p->conn = NULL;
-               kfree(p);
-               sockfd_put(csocket);
-               sockfd_put(csocket);
-               return ret;
-       }
+       p9_conn_create(client);
        return 0;
 }
 
 /**
- * p9_mux_destroy - cancels all pending requests and frees mux resources
+ * p9_mux_destroy - cancels all pending requests of mux
  * @m: mux to destroy
  *
  */
@@ -859,7 +854,6 @@ static void p9_conn_destroy(struct p9_conn *m)
        p9_conn_cancel(m, -ECONNRESET);
 
        m->client = NULL;
-       kfree(m);
 }
 
 /**
@@ -881,7 +875,7 @@ static void p9_fd_close(struct p9_client *client)
 
        client->status = Disconnected;
 
-       p9_conn_destroy(ts->conn);
+       p9_conn_destroy(&ts->conn);
 
        if (ts->rd)
                fput(ts->rd);
@@ -1033,14 +1027,7 @@ p9_fd_create(struct p9_client *client, const char *addr, char *args)
                return err;
 
        p = (struct p9_trans_fd *) client->trans;
-       p->conn = p9_conn_create(client);
-       if (IS_ERR(p->conn)) {
-               err = PTR_ERR(p->conn);
-               p->conn = NULL;
-               fput(p->rd);
-               fput(p->wr);
-               return err;
-       }
+       p9_conn_create(client);
 
        return 0;
 }
@@ -1053,6 +1040,7 @@ static struct p9_trans_module p9_tcp_trans = {
        .close = p9_fd_close,
        .request = p9_fd_request,
        .cancel = p9_fd_cancel,
+       .cancelled = p9_fd_cancelled,
        .owner = THIS_MODULE,
 };
 
@@ -1064,6 +1052,7 @@ static struct p9_trans_module p9_unix_trans = {
        .close = p9_fd_close,
        .request = p9_fd_request,
        .cancel = p9_fd_cancel,
+       .cancelled = p9_fd_cancelled,
        .owner = THIS_MODULE,
 };
 
@@ -1075,6 +1064,7 @@ static struct p9_trans_module p9_fd_trans = {
        .close = p9_fd_close,
        .request = p9_fd_request,
        .cancel = p9_fd_cancel,
+       .cancelled = p9_fd_cancelled,
        .owner = THIS_MODULE,
 };
 
index 8f68df5d29731cf23b6de1bbee5284c904006c93..14ad43b5cf89a323f3de9d2ca03879f4b4c9bf35 100644 (file)
@@ -193,6 +193,8 @@ static int parse_opts(char *params, struct p9_rdma_opts *opts)
                if (!*p)
                        continue;
                token = match_token(p, tokens, args);
+               if (token == Opt_err)
+                       continue;
                r = match_int(&args[0], &option);
                if (r < 0) {
                        p9_debug(P9_DEBUG_ERROR,
@@ -305,8 +307,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
        }
 
        req->rc = c->rc;
-       req->status = REQ_STATUS_RCVD;
-       p9_client_cb(client, req);
+       p9_client_cb(client, req, REQ_STATUS_RCVD);
 
        return;
 
@@ -511,6 +512,11 @@ dont_need_post_recv:
                goto send_error;
        }
 
+       /* Mark request as `sent' *before* we actually send it,
+        * because doing if after could erase the REQ_STATUS_RCVD
+        * status in case of a very fast reply.
+        */
+       req->status = REQ_STATUS_SENT;
        err = ib_post_send(rdma->qp, &wr, &bad_wr);
        if (err)
                goto send_error;
@@ -520,6 +526,7 @@ dont_need_post_recv:
 
  /* Handle errors that happened during or while preparing the send: */
  send_error:
+       req->status = REQ_STATUS_ERROR;
        kfree(c);
        p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
 
@@ -582,12 +589,24 @@ static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
        return rdma;
 }
 
-/* its not clear to me we can do anything after send has been posted */
 static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
 {
+       /* Nothing to do here.
+        * We will take care of it (if we have to) in rdma_cancelled()
+        */
        return 1;
 }
 
+/* A request has been fully flushed without a reply.
+ * That means we have posted one buffer in excess.
+ */
+static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
+{
+       struct p9_trans_rdma *rdma = client->trans;
+       atomic_inc(&rdma->excess_rc);
+       return 0;
+}
+
 /**
  * trans_create_rdma - Transport method for creating atransport instance
  * @client: client instance
@@ -721,6 +740,7 @@ static struct p9_trans_module p9_rdma_trans = {
        .close = rdma_close,
        .request = rdma_request,
        .cancel = rdma_cancel,
+       .cancelled = rdma_cancelled,
 };
 
 /**
index ac2666c1d01127ab5ac73946377c6edd1a3ffb67..6940d8fe897147cd22f497d4542d2f9ee79e863a 100644 (file)
@@ -164,8 +164,7 @@ static void req_done(struct virtqueue *vq)
                p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc);
                p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
                req = p9_tag_lookup(chan->client, rc->tag);
-               req->status = REQ_STATUS_RCVD;
-               p9_client_cb(chan->client, req);
+               p9_client_cb(chan->client, req, REQ_STATUS_RCVD);
        }
 }
 
index babd8626bf968f584153518d7ecb60e9a4616d39..6b540f1822e0b43c175466615ff78db50b0df0f5 100644 (file)
@@ -139,7 +139,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
        int error;
        int size;
 
-       if (!inode->i_op || !inode->i_op->getxattr)
+       if (!inode->i_op->getxattr)
                return -EOPNOTSUPP;
        desc = init_desc(type);
        if (IS_ERR(desc))
index 996092f21b64a4b71ee1a46acc49a94802e7618e..6e0bd933b6a9a8a815f5d57c147f1d18dfbfec36 100644 (file)
@@ -64,7 +64,7 @@ static int evm_find_protected_xattrs(struct dentry *dentry)
        int error;
        int count = 0;
 
-       if (!inode->i_op || !inode->i_op->getxattr)
+       if (!inode->i_op->getxattr)
                return -EOPNOTSUPP;
 
        for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) {
index aab9fa5a8231e7380097a96092296bdcebfa22f2..90987d15b6fe6c01ca593e185d2ae7a780186c31 100644 (file)
@@ -40,7 +40,7 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
 
        ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
        audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
-                        current->pid,
+                        task_pid_nr(current),
                         from_kuid(&init_user_ns, current_cred()->uid),
                         from_kuid(&init_user_ns, audit_get_loginuid(current)),
                         audit_get_sessionid(current));
index 9a62045e6282467493567a52f546d1e8d269bcd6..69fdf3bc765b3e1deddb374dfa27908fc05df851 100644 (file)
@@ -220,7 +220,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
         */
        BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
 
-       audit_log_format(ab, " pid=%d comm=", tsk->pid);
+       audit_log_format(ab, " pid=%d comm=", task_pid_nr(tsk));
        audit_log_untrustedstring(ab, tsk->comm);
 
        switch (a->type) {
@@ -278,9 +278,12 @@ static void dump_common_audit_data(struct audit_buffer *ab,
        }
        case LSM_AUDIT_DATA_TASK:
                tsk = a->u.tsk;
-               if (tsk && tsk->pid) {
-                       audit_log_format(ab, " pid=%d comm=", tsk->pid);
-                       audit_log_untrustedstring(ab, tsk->comm);
+               if (tsk) {
+                       pid_t pid = task_pid_nr(tsk);
+                       if (pid) {
+                               audit_log_format(ab, " pid=%d comm=", pid);
+                               audit_log_untrustedstring(ab, tsk->comm);
+                       }
                }
                break;
        case LSM_AUDIT_DATA_NET:
index 80a09c37cac8eba4cc26b121da37d2e03929cb55..a3386d119425eb8367194063470efe017eef622b 100644 (file)
@@ -173,7 +173,7 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
                 * Use filesystem name if filesystem does not support rename()
                 * operation.
                 */
-               if (inode->i_op && !inode->i_op->rename)
+               if (!inode->i_op->rename)
                        goto prepend_filesystem_name;
        }
        /* Prepend device name. */
@@ -282,7 +282,7 @@ char *tomoyo_realpath_from_path(struct path *path)
                 * Get local name for filesystems without rename() operation
                 * or dentry without vfsmount.
                 */
-               if (!path->mnt || (inode->i_op && !inode->i_op->rename))
+               if (!path->mnt || !inode->i_op->rename)
                        pos = tomoyo_get_local_path(path->dentry, buf,
                                                    buf_len - 1);
                /* Get absolute name for the rest. */
index 6c2dc3863ac00c65b327ee58a0a7d281fb464740..7e21621e492a4b2c83e9f296d9f472a581eb97b2 100644 (file)
@@ -150,10 +150,8 @@ static void snd_cs8427_free(struct snd_i2c_device *device)
        kfree(device->private_data);
 }
 
-int snd_cs8427_create(struct snd_i2c_bus *bus,
-                     unsigned char addr,
-                     unsigned int reset_timeout,
-                     struct snd_i2c_device **r_cs8427)
+int snd_cs8427_init(struct snd_i2c_bus *bus,
+                   struct snd_i2c_device *device)
 {
        static unsigned char initvals1[] = {
          CS8427_REG_CONTROL1 | CS8427_REG_AUTOINC,
@@ -200,22 +198,10 @@ int snd_cs8427_create(struct snd_i2c_bus *bus,
             Inhibit E->F transfers. */
          CS8427_UD | CS8427_EFTUI | CS8427_DETUI,
        };
+       struct cs8427 *chip = device->private_data;
        int err;
-       struct cs8427 *chip;
-       struct snd_i2c_device *device;
        unsigned char buf[24];
 
-       if ((err = snd_i2c_device_create(bus, "CS8427",
-                                        CS8427_ADDR | (addr & 7),
-                                        &device)) < 0)
-               return err;
-       chip = device->private_data = kzalloc(sizeof(*chip), GFP_KERNEL);
-       if (chip == NULL) {
-               snd_i2c_device_free(device);
-               return -ENOMEM;
-       }
-       device->private_free = snd_cs8427_free;
-       
        snd_i2c_lock(bus);
        err = snd_cs8427_reg_read(device, CS8427_REG_ID_AND_VER);
        if (err != CS8427_VER8427A) {
@@ -264,10 +250,44 @@ int snd_cs8427_create(struct snd_i2c_bus *bus,
        snd_i2c_unlock(bus);
 
        /* turn on run bit and rock'n'roll */
+       snd_cs8427_reset(device);
+
+       return 0;
+
+__fail:
+       snd_i2c_unlock(bus);
+
+       return err;
+}
+EXPORT_SYMBOL(snd_cs8427_init);
+
+int snd_cs8427_create(struct snd_i2c_bus *bus,
+                     unsigned char addr,
+                     unsigned int reset_timeout,
+                     struct snd_i2c_device **r_cs8427)
+{
+       int err;
+       struct cs8427 *chip;
+       struct snd_i2c_device *device;
+
+       err = snd_i2c_device_create(bus, "CS8427", CS8427_ADDR | (addr & 7),
+                                   &device);
+       if (err < 0)
+               return err;
+       chip = device->private_data = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (chip == NULL) {
+               snd_i2c_device_free(device);
+               return -ENOMEM;
+       }
+       device->private_free = snd_cs8427_free;
+
        if (reset_timeout < 1)
                reset_timeout = 1;
        chip->reset_timeout = reset_timeout;
-       snd_cs8427_reset(device);
+
+       err = snd_cs8427_init(bus, device);
+       if (err)
+               goto __fail;
 
 #if 0  // it's nice for read tests
        {
@@ -286,7 +306,6 @@ int snd_cs8427_create(struct snd_i2c_bus *bus,
        return 0;
 
       __fail:
-       snd_i2c_unlock(bus);
        snd_i2c_device_free(device);
        return err < 0 ? err : -EIO;
 }
index a7cc49e960685d97f223accb9916f478a1e22e43..d10ef7675268c53e7b8276004d2b87bd857d9233 100644 (file)
@@ -725,15 +725,4 @@ struct platform_driver au1000_ac97c_driver = {
        .remove         = au1000_ac97_remove,
 };
 
-static int __init au1000_ac97_load(void)
-{
-       return platform_driver_register(&au1000_ac97c_driver);
-}
-
-static void __exit au1000_ac97_unload(void)
-{
-       platform_driver_unregister(&au1000_ac97c_driver);
-}
-
-module_init(au1000_ac97_load);
-module_exit(au1000_ac97_unload);
+module_platform_driver(au1000_ac97c_driver);
index 4918b7145b736ef16982bd72131f2b773c23dd79..ec1ee07df59d2ecd27dab0c2cfb204c1119389f6 100644 (file)
@@ -50,8 +50,6 @@
 #include <linux/pnp.h>
 #include <linux/spinlock.h>
 
-#define DEB(x)
-#define DEB1(x)
 #include "sound_config.h"
 
 #include "ad1848.h"
@@ -1016,8 +1014,6 @@ static void ad1848_close(int dev)
        ad1848_info    *devc = (ad1848_info *) audio_devs[dev]->devc;
        ad1848_port_info *portc = (ad1848_port_info *) audio_devs[dev]->portc;
 
-       DEB(printk("ad1848_close(void)\n"));
-
        devc->intr_active = 0;
        ad1848_halt(dev);
 
index 87910e9921332d770e5cd07d60434dd670b891fa..c2d45a5848bc92d0450ddb04e731636235aac8a0 100644 (file)
@@ -733,19 +733,7 @@ static struct platform_driver amiga_audio_driver = {
        },
 };
 
-static int __init amiga_audio_init(void)
-{
-       return platform_driver_probe(&amiga_audio_driver, amiga_audio_probe);
-}
-
-module_init(amiga_audio_init);
-
-static void __exit amiga_audio_exit(void)
-{
-       platform_driver_unregister(&amiga_audio_driver);
-}
-
-module_exit(amiga_audio_exit);
+module_platform_driver_probe(amiga_audio_driver, amiga_audio_probe);
 
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:amiga-audio");
index c5c24409ceb065b49d8f7bb05d010085e4588d47..4709e592e2ccccf854d8edc2b43304662573fbfa 100644 (file)
@@ -275,7 +275,6 @@ static int opl3_kill_note  (int devno, int voice, int note, int velocity)
         devc->v_alloc->map[voice] = 0;
 
         map = &pv_map[devc->lv_map[voice]];
-        DEB(printk("Kill note %d\n", voice));
 
         if (map->voice_mode == 0)
                 return 0;
@@ -873,8 +872,6 @@ static void opl3_aftertouch(int dev, int voice, int pressure)
 
        map = &pv_map[devc->lv_map[voice]];
 
-       DEB(printk("Aftertouch %d\n", voice));
-
        if (map->voice_mode == 0)
                return;
 
index a0bcb85c39046a8c4878b6367e6bd2cc938e2140..50b5bd5012479a8cf43e0a905649fefaf7562f23 100644 (file)
 
 #include "pas2.h"
 
-#ifndef DEB
-#define DEB(what)              /* (what) */
-#endif
-
 extern int      pas_translate_code;
 extern char     pas_model;
 extern int     *pas_osp;
@@ -120,8 +116,6 @@ pas_mixer_set(int whichDev, unsigned int level)
 {
        int             left, right, devmask, changed, i, mixer = 0;
 
-       DEB(printk("static int pas_mixer_set(int whichDev = %d, unsigned int level = %X)\n", whichDev, level));
-
        left = level & 0x7f;
        right = (level & 0x7f00) >> 8;
 
@@ -207,8 +201,6 @@ pas_mixer_reset(void)
 {
        int             foo;
 
-       DEB(printk("pas2_mixer.c: void pas_mixer_reset(void)\n"));
-
        for (foo = 0; foo < SOUND_MIXER_NRDEVICES; foo++)
                pas_mixer_set(foo, levels[foo]);
 
@@ -220,7 +212,6 @@ static int pas_mixer_ioctl(int dev, unsigned int cmd, void __user *arg)
        int level,v ;
        int __user *p = (int __user *)arg;
 
-       DEB(printk("pas2_mixer.c: int pas_mixer_ioctl(unsigned int cmd = %X, unsigned int arg = %X)\n", cmd, arg));
        if (cmd == SOUND_MIXER_PRIVATE1) { /* Set loudness bit */
                if (get_user(level, p))
                        return -EFAULT;
index 6f13ab4afc6bcf60537adb93936af23ff107db51..474803b52f7d2d932d97d44fd79327e227b9f4c0 100644 (file)
 
 #include "pas2.h"
 
-#ifndef DEB
-#define DEB(WHAT)
-#endif
-
 #define PAS_PCM_INTRBITS (0x08)
 /*
  * Sample buffer timer interrupt enable
@@ -156,8 +152,6 @@ static int pas_audio_ioctl(int dev, unsigned int cmd, void __user *arg)
        int val, ret;
        int __user *p = arg;
 
-       DEB(printk("pas2_pcm.c: static int pas_audio_ioctl(unsigned int cmd = %X, unsigned int arg = %X)\n", cmd, arg));
-
        switch (cmd) 
        {
        case SOUND_PCM_WRITE_RATE:
@@ -204,8 +198,6 @@ static int pas_audio_ioctl(int dev, unsigned int cmd, void __user *arg)
 
 static void pas_audio_reset(int dev)
 {
-       DEB(printk("pas2_pcm.c: static void pas_audio_reset(void)\n"));
-
        pas_write(pas_read(0xF8A) & ~0x40, 0xF8A);      /* Disable PCM */
 }
 
@@ -214,8 +206,6 @@ static int pas_audio_open(int dev, int mode)
        int             err;
        unsigned long   flags;
 
-       DEB(printk("pas2_pcm.c: static int pas_audio_open(int mode = %X)\n", mode));
-
        spin_lock_irqsave(&pas_lock, flags);
        if (pcm_busy)
        {
@@ -239,8 +229,6 @@ static void pas_audio_close(int dev)
 {
        unsigned long   flags;
 
-       DEB(printk("pas2_pcm.c: static void pas_audio_close(void)\n"));
-
        spin_lock_irqsave(&pas_lock, flags);
 
        pas_audio_reset(dev);
@@ -256,8 +244,6 @@ static void pas_audio_output_block(int dev, unsigned long buf, int count,
 {
        unsigned long   flags, cnt;
 
-       DEB(printk("pas2_pcm.c: static void pas_audio_output_block(char *buf = %P, int count = %X)\n", buf, count));
-
        cnt = count;
        if (audio_devs[dev]->dmap_out->dma > 3)
                cnt >>= 1;
@@ -303,8 +289,6 @@ static void pas_audio_start_input(int dev, unsigned long buf, int count,
        unsigned long   flags;
        int             cnt;
 
-       DEB(printk("pas2_pcm.c: static void pas_audio_start_input(char *buf = %P, int count = %X)\n", buf, count));
-
        cnt = count;
        if (audio_devs[dev]->dmap_out->dma > 3)
                cnt >>= 1;
@@ -388,8 +372,6 @@ static struct audio_driver pas_audio_driver =
 
 void __init pas_pcm_init(struct address_info *hw_config)
 {
-       DEB(printk("pas2_pcm.c: long pas_pcm_init()\n"));
-
        pcm_bitsok = 8;
        if (pas_read(0xEF8B) & 0x08)
                pcm_bitsok |= 16;
index 851a1da46be11f63140a8992bd423dc11df06990..3d50fb4236edfa158d15328168f13826db243504 100644 (file)
@@ -226,8 +226,6 @@ int sb_dsp_reset(sb_devc * devc)
 {
        int loopc;
 
-       DEB(printk("Entered sb_dsp_reset()\n"));
-
        if (devc->model == MDL_ESS) return ess_dsp_reset (devc);
 
        /* This is only for non-ESS chips */
@@ -246,8 +244,6 @@ int sb_dsp_reset(sb_devc * devc)
                return 0;       /* Sorry */
        }
 
-       DEB(printk("sb_dsp_reset() OK\n"));
-
        return 1;
 }
 
index 0e7254bde4c271cb9591724c2c8da0210c0d4426..b47a69026f1b08eee5744bd4649702d87594b3c1 100644 (file)
@@ -865,8 +865,6 @@ printk(KERN_INFO "FKS: ess_dsp_reset 1\n");
 ess_show_mixerregs (devc);
 #endif
 
-       DEB(printk("Entered ess_dsp_reset()\n"));
-
        outb(3, DSP_RESET); /* Reset FIFO too */
 
        udelay(10);
@@ -881,8 +879,6 @@ ess_show_mixerregs (devc);
        }
        ess_extended (devc);
 
-       DEB(printk("sb_dsp_reset() OK\n"));
-
 #ifdef FKS_LOGGING
 printk(KERN_INFO "FKS: dsp_reset 2\n");
 ess_show_mixerregs (devc);
index 9b9f7d385134253f3e5ceac1e10caa2f21fe06a7..c0eea1dfe90ff5448cf1c6c43be2ebddbf4edf8d 100644 (file)
@@ -216,8 +216,6 @@ int sequencer_write(int dev, struct file *file, const char __user *buf, int coun
 
        dev = dev >> 4;
 
-       DEB(printk("sequencer_write(dev=%d, count=%d)\n", dev, count));
-
        if (mode == OPEN_READ)
                return -EIO;
 
@@ -959,8 +957,6 @@ int sequencer_open(int dev, struct file *file)
        dev = dev >> 4;
        mode = translate_mode(file);
 
-       DEB(printk("sequencer_open(dev=%d)\n", dev));
-
        if (!sequencer_ok)
        {
 /*             printk("Sound card: sequencer not initialized\n");*/
@@ -1133,8 +1129,6 @@ void sequencer_release(int dev, struct file *file)
 
        dev = dev >> 4;
 
-       DEB(printk("sequencer_release(dev=%d)\n", dev));
-
        /*
         * Wait until the queue is empty (if we don't have nonblock)
         */
index 9d35c4c65b9b0f0f4eb8f1026f89344ae8528415..f2554ab78f5e5df8d3f12902be648c65ae3d85df 100644 (file)
@@ -123,10 +123,6 @@ static inline int translate_mode(struct file *file)
 #include "sound_calls.h"
 #include "dev_table.h"
 
-#ifndef DEB
-#define DEB(x)
-#endif
-
 #ifndef DDB
 #define DDB(x) do {} while (0)
 #endif
index e7780349cc55bec87b6fc3de603b3e29e2c27a54..b70c7c8f9c5d366dc41fce47e47c78339b8b1fd1 100644 (file)
@@ -154,7 +154,6 @@ static ssize_t sound_read(struct file *file, char __user *buf, size_t count, lof
         
        mutex_lock(&soundcard_mutex);
        
-       DEB(printk("sound_read(dev=%d, count=%d)\n", dev, count));
        switch (dev & 0x0f) {
        case SND_DEV_DSP:
        case SND_DEV_DSP16:
@@ -180,7 +179,6 @@ static ssize_t sound_write(struct file *file, const char __user *buf, size_t cou
        int ret = -EINVAL;
        
        mutex_lock(&soundcard_mutex);
-       DEB(printk("sound_write(dev=%d, count=%d)\n", dev, count));
        switch (dev & 0x0f) {
        case SND_DEV_SEQ:
        case SND_DEV_SEQ2:
@@ -206,7 +204,6 @@ static int sound_open(struct inode *inode, struct file *file)
        int dev = iminor(inode);
        int retval;
 
-       DEB(printk("sound_open(dev=%d)\n", dev));
        if ((dev >= SND_NDEVS) || (dev < 0)) {
                printk(KERN_ERR "Invalid minor device %d\n", dev);
                return -ENXIO;
@@ -257,7 +254,6 @@ static int sound_release(struct inode *inode, struct file *file)
        int dev = iminor(inode);
 
        mutex_lock(&soundcard_mutex);
-       DEB(printk("sound_release(dev=%d)\n", dev));
        switch (dev & 0x0f) {
        case SND_DEV_CTL:
                module_put(mixer_devs[dev >> 4]->owner);
@@ -351,7 +347,6 @@ static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        if (!access_ok(VERIFY_WRITE, p, len))
                                return -EFAULT;
        }
-       DEB(printk("sound_ioctl(dev=%d, cmd=0x%x, arg=0x%x)\n", dev, cmd, arg));
        if (cmd == OSS_GETVERSION)
                return __put_user(SOUND_VERSION, (int __user *)p);
        
@@ -409,7 +404,6 @@ static unsigned int sound_poll(struct file *file, poll_table * wait)
        struct inode *inode = file_inode(file);
        int dev = iminor(inode);
 
-       DEB(printk("sound_poll(dev=%d)\n", dev));
        switch (dev & 0x0f) {
        case SND_DEV_SEQ:
        case SND_DEV_SEQ2:
index 5433c6f5eca23cdcba47a6b8031767adaf586ac6..62b8869f5a4c43737dc3e89e9cc408c535676ed2 100644 (file)
@@ -274,19 +274,12 @@ static int reset_uart401(uart401_devc * devc)
                }
        }
 
-
+       /* Flush input before enabling interrupts */
        if (ok)
-       {
-               DEB(printk("Reset UART401 OK\n"));
-       }
+               uart401_input_loop(devc);
        else
                DDB(printk("Reset UART401 failed - No hardware detected.\n"));
 
-       if (ok)
-               uart401_input_loop(devc);       /*
-                                                * Flush input before enabling interrupts
-                                                */
-
        return ok;
 }
 
index 97993e17f46a76d5470269795c5b34998c563246..248b90abb8825a62e9530a0629cbf432898898d3 100644 (file)
@@ -187,13 +187,14 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
                struct azx_dev *azx_dev = &chip->azx_dev[dev];
                dsp_lock(azx_dev);
                if (!azx_dev->opened && !dsp_is_locked(azx_dev)) {
-                       res = azx_dev;
-                       if (res->assigned_key == key) {
-                               res->opened = 1;
-                               res->assigned_key = key;
+                       if (azx_dev->assigned_key == key) {
+                               azx_dev->opened = 1;
+                               azx_dev->assigned_key = key;
                                dsp_unlock(azx_dev);
                                return azx_dev;
                        }
+                       if (!res)
+                               res = azx_dev;
                }
                dsp_unlock(azx_dev);
        }
@@ -1604,7 +1605,7 @@ static void azx_exit_link_reset(struct azx *chip)
 }
 
 /* reset codec link */
-static int azx_reset(struct azx *chip, int full_reset)
+static int azx_reset(struct azx *chip, bool full_reset)
 {
        if (!full_reset)
                goto __skip;
@@ -1701,7 +1702,7 @@ static void azx_int_clear(struct azx *chip)
 /*
  * reset and start the controller registers
  */
-void azx_init_chip(struct azx *chip, int full_reset)
+void azx_init_chip(struct azx *chip, bool full_reset)
 {
        if (chip->initialized)
                return;
@@ -1758,7 +1759,7 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
 
 #ifdef CONFIG_PM_RUNTIME
        if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
-               if (chip->card->dev->power.runtime_status != RPM_ACTIVE)
+               if (!pm_runtime_active(chip->card->dev))
                        return IRQ_NONE;
 #endif
 
@@ -1841,7 +1842,7 @@ static void azx_bus_reset(struct hda_bus *bus)
 
        bus->in_reset = 1;
        azx_stop_chip(chip);
-       azx_init_chip(chip, 1);
+       azx_init_chip(chip, true);
 #ifdef CONFIG_PM
        if (chip->initialized) {
                struct azx_pcm *p;
@@ -1948,7 +1949,7 @@ int azx_codec_create(struct azx *chip, const char *model,
                                 * get back to the sanity state.
                                 */
                                azx_stop_chip(chip);
-                               azx_init_chip(chip, 1);
+                               azx_init_chip(chip, true);
                        }
                }
        }
index 1d2e3be2bae6c32149f8ac256d1086b73e036af2..baf0e77330afc47bc54a3ff1bc5e0006f878d9c2 100644 (file)
@@ -37,7 +37,7 @@ int azx_alloc_stream_pages(struct azx *chip);
 void azx_free_stream_pages(struct azx *chip);
 
 /* Low level azx interface */
-void azx_init_chip(struct azx *chip, int full_reset);
+void azx_init_chip(struct azx *chip, bool full_reset);
 void azx_stop_chip(struct azx *chip);
 void azx_enter_link_reset(struct azx *chip);
 irqreturn_t azx_interrupt(int irq, void *dev_id);
index 77ca894f82845964192cdc20476002f3cdec63a0..d6bca62ef387b92b499dcf5954d5c783543055d1 100644 (file)
@@ -636,7 +636,7 @@ static int azx_resume(struct device *dev)
                return -EIO;
        azx_init_pci(chip);
 
-       azx_init_chip(chip, 1);
+       azx_init_chip(chip, true);
 
        snd_hda_resume(chip->bus);
        snd_power_change_state(card, SNDRV_CTL_POWER_D0);
@@ -689,7 +689,7 @@ static int azx_runtime_resume(struct device *dev)
        status = azx_readw(chip, STATESTS);
 
        azx_init_pci(chip);
-       azx_init_chip(chip, 1);
+       azx_init_chip(chip, true);
 
        bus = chip->bus;
        if (status && bus) {
index ea2351d119f0a0ff5f10b722995eb0e7b4774688..14ae979a92eac7a68185a1ed08882eedf6af735e 100644 (file)
@@ -3026,6 +3026,11 @@ static void alc283_init(struct hda_codec *codec)
        bool hp_pin_sense;
        int val;
 
+       if (!spec->gen.autocfg.hp_outs) {
+               if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
+                       hp_pin = spec->gen.autocfg.line_out_pins[0];
+       }
+
        alc283_restore_default_value(codec);
 
        if (!hp_pin)
@@ -3062,6 +3067,11 @@ static void alc283_shutup(struct hda_codec *codec)
        bool hp_pin_sense;
        int val;
 
+       if (!spec->gen.autocfg.hp_outs) {
+               if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
+                       hp_pin = spec->gen.autocfg.line_out_pins[0];
+       }
+
        if (!hp_pin) {
                alc269_shutup(codec);
                return;
@@ -3085,6 +3095,7 @@ static void alc283_shutup(struct hda_codec *codec)
 
        if (hp_pin_sense)
                msleep(100);
+       alc_auto_setup_eapd(codec, false);
        snd_hda_shutup_pins(codec);
        alc_write_coef_idx(codec, 0x43, 0x9614);
 }
@@ -3361,8 +3372,9 @@ static void alc269_fixup_mic_mute_hook(void *private_data, int enabled)
 
        if (spec->mute_led_polarity)
                enabled = !enabled;
-       pinval = AC_PINCTL_IN_EN |
-               (enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80);
+       pinval = snd_hda_codec_get_pin_target(codec, spec->mute_led_nid);
+       pinval &= ~AC_PINCTL_VREFEN;
+       pinval |= enabled ? AC_PINCTL_VREF_HIZ : AC_PINCTL_VREF_80;
        if (spec->mute_led_nid)
                snd_hda_set_pin_ctl_cache(codec, spec->mute_led_nid, pinval);
 }
@@ -3994,6 +4006,10 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
                spec->gen.mixer_nid = 0;
                break;
        case HDA_FIXUP_ACT_INIT:
+               /* MIC2-VREF control */
+               /* Set to manual mode */
+               val = alc_read_coef_idx(codec, 0x06);
+               alc_write_coef_idx(codec, 0x06, val & ~0x000c);
                /* Enable Line1 input control by verb */
                val = alc_read_coef_idx(codec, 0x1a);
                alc_write_coef_idx(codec, 0x1a, val | (1 << 4));
@@ -4602,6 +4618,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4768,7 +4785,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
        {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
        {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
-       {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-chrome"},
+       {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"},
        {.id = ALC283_FIXUP_SENSE_COMBO_JACK, .name = "alc283-sense-combo"},
        {}
 };
index ed2144eee38ac872d80e263953733639630169b2..496dbd0ad5dbb3247c7845b21de7a37b2fd11d3d 100644 (file)
@@ -579,12 +579,37 @@ static struct snd_ak4xxx_private akm_vx442_priv = {
 #ifdef CONFIG_PM_SLEEP
 static int snd_ice1712_delta_resume(struct snd_ice1712 *ice)
 {
-       unsigned char akm_backup[AK4XXX_IMAGE_SIZE];
+       unsigned char akm_img_bak[AK4XXX_IMAGE_SIZE];
+       unsigned char akm_vol_bak[AK4XXX_IMAGE_SIZE];
+
+       /* init spdif */
+       switch (ice->eeprom.subvendor) {
+       case ICE1712_SUBDEVICE_AUDIOPHILE:
+       case ICE1712_SUBDEVICE_DELTA410:
+       case ICE1712_SUBDEVICE_DELTA1010E:
+       case ICE1712_SUBDEVICE_DELTA1010LT:
+       case ICE1712_SUBDEVICE_VX442:
+       case ICE1712_SUBDEVICE_DELTA66E:
+               snd_cs8427_init(ice->i2c, ice->cs8427);
+               break;
+       case ICE1712_SUBDEVICE_DELTA1010:
+       case ICE1712_SUBDEVICE_MEDIASTATION:
+               /* nothing */
+               break;
+       case ICE1712_SUBDEVICE_DELTADIO2496:
+       case ICE1712_SUBDEVICE_DELTA66:
+               /* Set spdif defaults */
+               snd_ice1712_delta_cs8403_spdif_write(ice, ice->spdif.cs8403_bits);
+               break;
+       }
+
        /* init codec and restore registers */
        if (ice->akm_codecs) {
-               memcpy(akm_backup, ice->akm->images, sizeof(akm_backup));
+               memcpy(akm_img_bak, ice->akm->images, sizeof(akm_img_bak));
+               memcpy(akm_vol_bak, ice->akm->volumes, sizeof(akm_vol_bak));
                snd_akm4xxx_init(ice->akm);
-               memcpy(ice->akm->images, akm_backup, sizeof(akm_backup));
+               memcpy(ice->akm->images, akm_img_bak, sizeof(akm_img_bak));
+               memcpy(ice->akm->volumes, akm_vol_bak, sizeof(akm_vol_bak));
                snd_akm4xxx_reset(ice->akm, 0);
        }
 
index 291672fc4a99f2428c171c9a57955280776df209..d9b9e4595f1717a97e2f19127c0c21057b5d209c 100644 (file)
@@ -685,9 +685,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_pointer(struct snd_pcm_substream *
        if (!(snd_ice1712_read(ice, ICE1712_IREG_PBK_CTRL) & 1))
                return 0;
        ptr = runtime->buffer_size - inw(ice->ddma_port + 4);
+       ptr = bytes_to_frames(substream->runtime, ptr);
        if (ptr == runtime->buffer_size)
                ptr = 0;
-       return bytes_to_frames(substream->runtime, ptr);
+       return ptr;
 }
 
 static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substream *substream)
@@ -704,9 +705,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_ds_pointer(struct snd_pcm_substrea
                addr = ICE1712_DSC_ADDR0;
        ptr = snd_ice1712_ds_read(ice, substream->number * 2, addr) -
                ice->playback_con_virt_addr[substream->number];
+       ptr = bytes_to_frames(substream->runtime, ptr);
        if (ptr == substream->runtime->buffer_size)
                ptr = 0;
-       return bytes_to_frames(substream->runtime, ptr);
+       return ptr;
 }
 
 static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *substream)
@@ -717,9 +719,10 @@ static snd_pcm_uframes_t snd_ice1712_capture_pointer(struct snd_pcm_substream *s
        if (!(snd_ice1712_read(ice, ICE1712_IREG_CAP_CTRL) & 1))
                return 0;
        ptr = inl(ICEREG(ice, CONCAP_ADDR)) - ice->capture_con_virt_addr;
+       ptr = bytes_to_frames(substream->runtime, ptr);
        if (ptr == substream->runtime->buffer_size)
                ptr = 0;
-       return bytes_to_frames(substream->runtime, ptr);
+       return ptr;
 }
 
 static const struct snd_pcm_hardware snd_ice1712_playback = {
@@ -1048,6 +1051,8 @@ __out:
        old = inb(ICEMT(ice, RATE));
        if (!force && old == val)
                goto __out;
+
+       ice->cur_rate = rate;
        outb(val, ICEMT(ice, RATE));
        spin_unlock_irqrestore(&ice->reg_lock, flags);
 
@@ -1114,9 +1119,10 @@ static snd_pcm_uframes_t snd_ice1712_playback_pro_pointer(struct snd_pcm_substre
        if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_PLAYBACK_START))
                return 0;
        ptr = ice->playback_pro_size - (inw(ICEMT(ice, PLAYBACK_SIZE)) << 2);
+       ptr = bytes_to_frames(substream->runtime, ptr);
        if (ptr == substream->runtime->buffer_size)
                ptr = 0;
-       return bytes_to_frames(substream->runtime, ptr);
+       return ptr;
 }
 
 static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substream *substream)
@@ -1127,9 +1133,10 @@ static snd_pcm_uframes_t snd_ice1712_capture_pro_pointer(struct snd_pcm_substrea
        if (!(inl(ICEMT(ice, PLAYBACK_CONTROL)) & ICE1712_CAPTURE_START_SHADOW))
                return 0;
        ptr = ice->capture_pro_size - (inw(ICEMT(ice, CAPTURE_SIZE)) << 2);
+       ptr = bytes_to_frames(substream->runtime, ptr);
        if (ptr == substream->runtime->buffer_size)
                ptr = 0;
-       return bytes_to_frames(substream->runtime, ptr);
+       return ptr;
 }
 
 static const struct snd_pcm_hardware snd_ice1712_playback_pro = {
@@ -2832,6 +2839,12 @@ static int snd_ice1712_suspend(struct device *dev)
        snd_pcm_suspend_all(ice->pcm_ds);
        snd_ac97_suspend(ice->ac97);
 
+       spin_lock_irq(&ice->reg_lock);
+       ice->pm_saved_is_spdif_master = is_spdif_master(ice);
+       ice->pm_saved_spdif_ctrl = inw(ICEMT(ice, ROUTE_SPDOUT));
+       ice->pm_saved_route = inw(ICEMT(ice, ROUTE_PSDOUT03));
+       spin_unlock_irq(&ice->reg_lock);
+
        if (ice->pm_suspend)
                ice->pm_suspend(ice);
 
@@ -2846,6 +2859,7 @@ static int snd_ice1712_resume(struct device *dev)
        struct pci_dev *pci = to_pci_dev(dev);
        struct snd_card *card = dev_get_drvdata(dev);
        struct snd_ice1712 *ice = card->private_data;
+       int rate;
 
        if (!ice->pm_suspend_enabled)
                return 0;
@@ -2860,14 +2874,37 @@ static int snd_ice1712_resume(struct device *dev)
 
        pci_set_master(pci);
 
+       if (ice->cur_rate)
+               rate = ice->cur_rate;
+       else
+               rate = PRO_RATE_DEFAULT;
+
        if (snd_ice1712_chip_init(ice) < 0) {
                snd_card_disconnect(card);
                return -EIO;
        }
 
+       ice->cur_rate = rate;
+
        if (ice->pm_resume)
                ice->pm_resume(ice);
 
+       if (ice->pm_saved_is_spdif_master) {
+               /* switching to external clock via SPDIF */
+               spin_lock_irq(&ice->reg_lock);
+               outb(inb(ICEMT(ice, RATE)) | ICE1712_SPDIF_MASTER,
+                       ICEMT(ice, RATE));
+               spin_unlock_irq(&ice->reg_lock);
+               snd_ice1712_set_input_clock_source(ice, 1);
+       } else {
+               /* internal on-card clock */
+               snd_ice1712_set_pro_rate(ice, rate, 1);
+               snd_ice1712_set_input_clock_source(ice, 0);
+       }
+
+       outw(ice->pm_saved_spdif_ctrl, ICEMT(ice, ROUTE_SPDOUT));
+       outw(ice->pm_saved_route, ICEMT(ice, ROUTE_PSDOUT03));
+
        if (ice->ac97)
                snd_ac97_resume(ice->ac97);
 
index 09f7e773bafb8721619843c187be4c24b06af73e..f500905e9373510d2bcfdb583b0569cec5994708 100644 (file)
@@ -902,7 +902,6 @@ static int alc5623_probe(struct snd_soc_codec *codec)
 {
        struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
        struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int ret;
 
        alc5623_reset(codec);
 
@@ -961,7 +960,7 @@ static int alc5623_probe(struct snd_soc_codec *codec)
                return -EINVAL;
        }
 
-       return ret;
+       return 0;
 }
 
 /* power down chip */
index ec071a6306ef567fb37b8c97981b64d1b3390f9e..85942ca36cbfaf9df6fcc2007e719e2c89b0551c 100644 (file)
@@ -1061,7 +1061,6 @@ static int alc5632_resume(struct snd_soc_codec *codec)
 static int alc5632_probe(struct snd_soc_codec *codec)
 {
        struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec);
-       int ret;
 
        /* power on device  */
        alc5632_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@@ -1075,7 +1074,7 @@ static int alc5632_probe(struct snd_soc_codec *codec)
                return -EINVAL;
        }
 
-       return ret;
+       return 0;
 }
 
 /* power down chip */
@@ -1191,11 +1190,18 @@ static const struct i2c_device_id alc5632_i2c_table[] = {
 };
 MODULE_DEVICE_TABLE(i2c, alc5632_i2c_table);
 
+static const struct of_device_id alc5632_of_match[] = {
+       { .compatible = "realtek,alc5632", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, alc5632_of_match);
+
 /* i2c codec control layer */
 static struct i2c_driver alc5632_i2c_driver = {
        .driver = {
                .name = "alc5632",
                .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(alc5632_of_match),
        },
        .probe = alc5632_i2c_probe,
        .remove =  alc5632_i2c_remove,
index f0ca6bee677159e0bb9a6af0a660bb937b815b7d..460d35547a683d226521591333ce06fe1c5de634 100644 (file)
@@ -1259,7 +1259,7 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
        }
 
        dev_info(&i2c_client->dev, "Cirrus Logic CS42L52, Revision: %02X\n",
-                       reg & 0xFF);
+                reg & CS42L52_CHIP_REV_MASK);
 
        /* Set Platform Data */
        if (cs42l52->pdata.mica_diff_cfg)
index 6fb8f00f4191a7ad890f387c40dc4711dbbfddab..ac445993e6bf98a090b6489fc2e4bd3c3d715366 100644 (file)
@@ -37,7 +37,7 @@
 #define CS42L52_CHIP_REV_A0                    0x00
 #define CS42L52_CHIP_REV_A1                    0x01
 #define CS42L52_CHIP_REV_B0                    0x02
-#define CS42L52_CHIP_REV_MASK                  0x03
+#define CS42L52_CHIP_REV_MASK                  0x07
 
 #define CS42L52_PWRCTL1                                0x02
 #define CS42L52_PWRCTL1_PDN_ALL                        0x9F
index 082299a4e2faccd9f28db0d60b30f8849eff47b6..85020322eee7590ad8ae132f24fc133ea6e6340b 100644 (file)
@@ -495,17 +495,16 @@ int cs42xx8_probe(struct device *dev, struct regmap *regmap)
        regcache_cache_bypass(cs42xx8->regmap, true);
 
        /* Validate the chip ID */
-       regmap_read(cs42xx8->regmap, CS42XX8_CHIPID, &val);
-       if (val < 0) {
-               dev_err(dev, "failed to get device ID: %x", val);
-               ret = -EINVAL;
+       ret = regmap_read(cs42xx8->regmap, CS42XX8_CHIPID, &val);
+       if (ret < 0) {
+               dev_err(dev, "failed to get device ID, ret = %d", ret);
                goto err_enable;
        }
 
        /* The top four bits of the chip ID should be 0000 */
-       if ((val & CS42XX8_CHIPID_CHIP_ID_MASK) != 0x00) {
+       if (((val & CS42XX8_CHIPID_CHIP_ID_MASK) >> 4) != 0x00) {
                dev_err(dev, "unmatched chip ID: %d\n",
-                               val & CS42XX8_CHIPID_CHIP_ID_MASK);
+                       (val & CS42XX8_CHIPID_CHIP_ID_MASK) >> 4);
                ret = -EINVAL;
                goto err_enable;
        }
index 7d168ec71cd70409095ee82fb95ec8a929601f98..48f3fef6848451cecb80c586a742d4ccaa619dbd 100644 (file)
@@ -1571,7 +1571,8 @@ static int da732x_i2c_probe(struct i2c_client *i2c,
        }
 
        dev_info(&i2c->dev, "Revision: %d.%d\n",
-                (reg & DA732X_ID_MAJOR_MASK), (reg & DA732X_ID_MINOR_MASK));
+                (reg & DA732X_ID_MAJOR_MASK) >> 4,
+                (reg & DA732X_ID_MINOR_MASK));
 
        ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_da732x,
                                     da732x_dai, ARRAY_SIZE(da732x_dai));
index 98c6e104357cbe59416f8d602a24ec63e1b68696..f7b0b37aa85827e7cd45a8afda039078c7079f0e 100644 (file)
@@ -2399,11 +2399,18 @@ static const struct i2c_device_id max98090_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, max98090_i2c_id);
 
+static const struct of_device_id max98090_of_match[] = {
+       { .compatible = "maxim,max98090", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max98090_of_match);
+
 static struct i2c_driver max98090_i2c_driver = {
        .driver = {
                .name = "max98090",
                .owner = THIS_MODULE,
                .pm = &max98090_pm,
+               .of_match_table = of_match_ptr(max98090_of_match),
        },
        .probe  = max98090_i2c_probe,
        .remove = max98090_i2c_remove,
index 0061ae6b671673e43615bcd9ebfeef001df487b2..68b4dd622b879364bc0ae26035695a8b7604d9fa 100644 (file)
@@ -2074,6 +2074,14 @@ static const struct i2c_device_id rt5640_i2c_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, rt5640_i2c_id);
 
+#if defined(CONFIG_OF)
+static const struct of_device_id rt5640_of_match[] = {
+       { .compatible = "realtek,rt5640", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, rt5640_of_match);
+#endif
+
 #ifdef CONFIG_ACPI
 static struct acpi_device_id rt5640_acpi_match[] = {
        { "INT33CA", 0 },
@@ -2203,6 +2211,7 @@ static struct i2c_driver rt5640_i2c_driver = {
                .name = "rt5640",
                .owner = THIS_MODULE,
                .acpi_match_table = ACPI_PTR(rt5640_acpi_match),
+               .of_match_table = of_match_ptr(rt5640_of_match),
        },
        .probe = rt5640_i2c_probe,
        .remove   = rt5640_i2c_remove,
index 20fc46092c2c4a396791ea4c46ddf95def3ebfdd..b73c94ebcc2a0de02710b5e9f69c2cbbfdd8b814 100644 (file)
@@ -43,9 +43,16 @@ static const struct i2c_device_id tlv320aic23_id[] = {
 
 MODULE_DEVICE_TABLE(i2c, tlv320aic23_id);
 
+static const struct of_device_id tlv320aic23_of_match[] = {
+       { .compatible = "ti,tlv320aic23", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, tlv320aic23_of_match);
+
 static struct i2c_driver tlv320aic23_i2c_driver = {
        .driver = {
                   .name = "tlv320aic23-codec",
+                  .of_match_table = of_match_ptr(tlv320aic23_of_match),
                   },
        .probe = tlv320aic23_i2c_probe,
        .remove = __exit_p(tlv320aic23_i2c_remove),
index a01ae97c90aae4aba50ccf21184adb1a519f7dcc..4f75cac462d1578eed311a0952b305302b369b95 100644 (file)
@@ -336,7 +336,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
                mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
 
-               mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+               mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
                mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
                break;
 
@@ -344,7 +344,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
                mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
 
-               mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+               mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
                mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
                break;
 
@@ -352,7 +352,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
                mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
 
-               mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
+               mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
                mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
                break;
 
index c4a42311167371446cfd5e24b2c23c418e796627..56da8c8c5960bd0ef99b405560081580a2bd0571 100644 (file)
 
 #include "fsl_sai.h"
 
+#define FSL_SAI_FLAGS (FSL_SAI_CSR_SEIE |\
+                      FSL_SAI_CSR_FEIE)
+
+static irqreturn_t fsl_sai_isr(int irq, void *devid)
+{
+       struct fsl_sai *sai = (struct fsl_sai *)devid;
+       struct device *dev = &sai->pdev->dev;
+       u32 xcsr, mask;
+
+       /* Only handle those what we enabled */
+       mask = (FSL_SAI_FLAGS >> FSL_SAI_CSR_xIE_SHIFT) << FSL_SAI_CSR_xF_SHIFT;
+
+       /* Tx IRQ */
+       regmap_read(sai->regmap, FSL_SAI_TCSR, &xcsr);
+       xcsr &= mask;
+
+       if (xcsr & FSL_SAI_CSR_WSF)
+               dev_dbg(dev, "isr: Start of Tx word detected\n");
+
+       if (xcsr & FSL_SAI_CSR_SEF)
+               dev_warn(dev, "isr: Tx Frame sync error detected\n");
+
+       if (xcsr & FSL_SAI_CSR_FEF) {
+               dev_warn(dev, "isr: Transmit underrun detected\n");
+               /* FIFO reset for safety */
+               xcsr |= FSL_SAI_CSR_FR;
+       }
+
+       if (xcsr & FSL_SAI_CSR_FWF)
+               dev_dbg(dev, "isr: Enabled transmit FIFO is empty\n");
+
+       if (xcsr & FSL_SAI_CSR_FRF)
+               dev_dbg(dev, "isr: Transmit FIFO watermark has been reached\n");
+
+       regmap_update_bits(sai->regmap, FSL_SAI_TCSR,
+                          FSL_SAI_CSR_xF_W_MASK | FSL_SAI_CSR_FR, xcsr);
+
+       /* Rx IRQ */
+       regmap_read(sai->regmap, FSL_SAI_RCSR, &xcsr);
+       xcsr &= mask;
+
+       if (xcsr & FSL_SAI_CSR_WSF)
+               dev_dbg(dev, "isr: Start of Rx word detected\n");
+
+       if (xcsr & FSL_SAI_CSR_SEF)
+               dev_warn(dev, "isr: Rx Frame sync error detected\n");
+
+       if (xcsr & FSL_SAI_CSR_FEF) {
+               dev_warn(dev, "isr: Receive overflow detected\n");
+               /* FIFO reset for safety */
+               xcsr |= FSL_SAI_CSR_FR;
+       }
+
+       if (xcsr & FSL_SAI_CSR_FWF)
+               dev_dbg(dev, "isr: Enabled receive FIFO is full\n");
+
+       if (xcsr & FSL_SAI_CSR_FRF)
+               dev_dbg(dev, "isr: Receive FIFO watermark has been reached\n");
+
+       regmap_update_bits(sai->regmap, FSL_SAI_RCSR,
+                          FSL_SAI_CSR_xF_W_MASK | FSL_SAI_CSR_FR, xcsr);
+
+       return IRQ_HANDLED;
+}
+
 static int fsl_sai_set_dai_sysclk_tr(struct snd_soc_dai *cpu_dai,
                int clk_id, unsigned int freq, int fsl_dir)
 {
@@ -114,7 +179,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
                 * that is, together with the last bit of the previous
                 * data word.
                 */
-               val_cr2 &= ~FSL_SAI_CR2_BCP;
+               val_cr2 |= FSL_SAI_CR2_BCP;
                val_cr4 |= FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP;
                break;
        case SND_SOC_DAIFMT_LEFT_J:
@@ -122,7 +187,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
                 * Frame high, one word length for frame sync,
                 * frame sync asserts with the first bit of the frame.
                 */
-               val_cr2 &= ~FSL_SAI_CR2_BCP;
+               val_cr2 |= FSL_SAI_CR2_BCP;
                val_cr4 &= ~(FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP);
                break;
        case SND_SOC_DAIFMT_DSP_A:
@@ -132,7 +197,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
                 * that is, together with the last bit of the previous
                 * data word.
                 */
-               val_cr2 &= ~FSL_SAI_CR2_BCP;
+               val_cr2 |= FSL_SAI_CR2_BCP;
                val_cr4 &= ~FSL_SAI_CR4_FSP;
                val_cr4 |= FSL_SAI_CR4_FSE;
                sai->is_dsp_mode = true;
@@ -142,7 +207,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
                 * Frame high, one bit for frame sync,
                 * frame sync asserts with the first bit of the frame.
                 */
-               val_cr2 &= ~FSL_SAI_CR2_BCP;
+               val_cr2 |= FSL_SAI_CR2_BCP;
                val_cr4 &= ~(FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP);
                sai->is_dsp_mode = true;
                break;
@@ -373,8 +438,8 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
 {
        struct fsl_sai *sai = dev_get_drvdata(cpu_dai->dev);
 
-       regmap_update_bits(sai->regmap, FSL_SAI_TCSR, 0xffffffff, 0x0);
-       regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 0xffffffff, 0x0);
+       regmap_update_bits(sai->regmap, FSL_SAI_TCSR, 0xffffffff, FSL_SAI_FLAGS);
+       regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 0xffffffff, FSL_SAI_FLAGS);
        regmap_update_bits(sai->regmap, FSL_SAI_TCR1, FSL_SAI_CR1_RFW_MASK,
                           FSL_SAI_MAXBURST_TX * 2);
        regmap_update_bits(sai->regmap, FSL_SAI_RCR1, FSL_SAI_CR1_RFW_MASK,
@@ -490,12 +555,14 @@ static int fsl_sai_probe(struct platform_device *pdev)
        struct fsl_sai *sai;
        struct resource *res;
        void __iomem *base;
-       int ret;
+       int irq, ret;
 
        sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
        if (!sai)
                return -ENOMEM;
 
+       sai->pdev = pdev;
+
        sai->big_endian_regs = of_property_read_bool(np, "big-endian-regs");
        if (sai->big_endian_regs)
                fsl_sai_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG;
@@ -514,6 +581,18 @@ static int fsl_sai_probe(struct platform_device *pdev)
                return PTR_ERR(sai->regmap);
        }
 
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
+               return irq;
+       }
+
+       ret = devm_request_irq(&pdev->dev, irq, fsl_sai_isr, 0, np->name, sai);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to claim irq %u\n", irq);
+               return ret;
+       }
+
        sai->dma_params_rx.addr = res->start + FSL_SAI_RDR;
        sai->dma_params_tx.addr = res->start + FSL_SAI_TDR;
        sai->dma_params_rx.maxburst = FSL_SAI_MAXBURST_RX;
index e432260be598ed2cabebbdadcbb60730d96166ed..a264185c7138508451b602e7ece7d89d354b2ff1 100644 (file)
 
 /* SAI Transmit/Recieve Control Register */
 #define FSL_SAI_CSR_TERE       BIT(31)
+#define FSL_SAI_CSR_FR         BIT(25)
+#define FSL_SAI_CSR_xF_SHIFT   16
+#define FSL_SAI_CSR_xF_W_SHIFT 18
+#define FSL_SAI_CSR_xF_MASK    (0x1f << FSL_SAI_CSR_xF_SHIFT)
+#define FSL_SAI_CSR_xF_W_MASK  (0x7 << FSL_SAI_CSR_xF_W_SHIFT)
+#define FSL_SAI_CSR_WSF                BIT(20)
+#define FSL_SAI_CSR_SEF                BIT(19)
+#define FSL_SAI_CSR_FEF                BIT(18)
 #define FSL_SAI_CSR_FWF                BIT(17)
+#define FSL_SAI_CSR_FRF                BIT(16)
+#define FSL_SAI_CSR_xIE_SHIFT  8
+#define FSL_SAI_CSR_WSIE       BIT(12)
+#define FSL_SAI_CSR_SEIE       BIT(11)
+#define FSL_SAI_CSR_FEIE       BIT(10)
+#define FSL_SAI_CSR_FWIE       BIT(9)
 #define FSL_SAI_CSR_FRIE       BIT(8)
 #define FSL_SAI_CSR_FRDE       BIT(0)
 
 #define FSL_SAI_MAXBURST_RX 6
 
 struct fsl_sai {
+       struct platform_device *pdev;
        struct regmap *regmap;
 
        bool big_endian_regs;
index 4a88e36c82ec1c827d7cd8658e0223991a3b621f..76b072bd4ba25eb9ba926d56cefc3702c45e5a17 100644 (file)
@@ -39,15 +39,15 @@ struct s3c_ac97_info {
 };
 static struct s3c_ac97_info s3c_ac97;
 
-static struct s3c2410_dma_client s3c_dma_client_out = {
+static struct s3c_dma_client s3c_dma_client_out = {
        .name = "AC97 PCMOut"
 };
 
-static struct s3c2410_dma_client s3c_dma_client_in = {
+static struct s3c_dma_client s3c_dma_client_in = {
        .name = "AC97 PCMIn"
 };
 
-static struct s3c2410_dma_client s3c_dma_client_micin = {
+static struct s3c_dma_client s3c_dma_client_micin = {
        .name = "AC97 MicIn"
 };
 
index 225e5378014eca4395d4481c77ddf2c1632b31a3..ad7c0f04f00d59529163e292146b3c6641d391d7 100644 (file)
 
 #include <sound/dmaengine_pcm.h>
 
+struct s3c_dma_client {
+       char *name;
+};
+
 struct s3c_dma_params {
-       struct s3c2410_dma_client *client;      /* stream identifier */
+       struct s3c_dma_client *client;  /* stream identifier */
        int channel;                            /* Channel ID */
        dma_addr_t dma_addr;
        int dma_size;                   /* Size of the DMA transfer */
index 0a9b44c940cee4c962942d23ecccd85466cc56b8..048ead96719984f85b2b1bc048e2c81b30cc9fb3 100644 (file)
@@ -1211,10 +1211,10 @@ static int samsung_i2s_probe(struct platform_device *pdev)
        pri_dai->dma_playback.dma_addr = regs_base + I2STXD;
        pri_dai->dma_capture.dma_addr = regs_base + I2SRXD;
        pri_dai->dma_playback.client =
-               (struct s3c2410_dma_client *)&pri_dai->dma_playback;
+               (struct s3c_dma_client *)&pri_dai->dma_playback;
        pri_dai->dma_playback.ch_name = "tx";
        pri_dai->dma_capture.client =
-               (struct s3c2410_dma_client *)&pri_dai->dma_capture;
+               (struct s3c_dma_client *)&pri_dai->dma_capture;
        pri_dai->dma_capture.ch_name = "rx";
        pri_dai->dma_playback.dma_size = 4;
        pri_dai->dma_capture.dma_size = 4;
@@ -1233,7 +1233,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
                }
                sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
                sec_dai->dma_playback.client =
-                       (struct s3c2410_dma_client *)&sec_dai->dma_playback;
+                       (struct s3c_dma_client *)&sec_dai->dma_playback;
                sec_dai->dma_playback.ch_name = "tx-sec";
 
                if (!np) {
index 6a5e4bf6ac96efaa1b3b3dba063390165eee724e..ab54e297957c105ee60e050a259323b453738fff 100644 (file)
@@ -20,7 +20,6 @@
 #include <sound/pcm_params.h>
 
 #include <linux/platform_data/asoc-s3c.h>
-#include <mach/dma.h>
 
 #include "dma.h"
 #include "pcm.h"
@@ -132,11 +131,11 @@ struct s3c_pcm_info {
        struct s3c_dma_params   *dma_capture;
 };
 
-static struct s3c2410_dma_client s3c_pcm_dma_client_out = {
+static struct s3c_dma_client s3c_pcm_dma_client_out = {
        .name           = "PCM Stereo out"
 };
 
-static struct s3c2410_dma_client s3c_pcm_dma_client_in = {
+static struct s3c_dma_client s3c_pcm_dma_client_in = {
        .name           = "PCM Stereo in"
 };
 
index d0794458963a6ad8de279eaae568a3bd95a32836..e9bb5d7a71ee5409ad6c92419d1f8010201a132b 100644 (file)
 #include "regs-i2s-v2.h"
 #include "s3c2412-i2s.h"
 
-static struct s3c2410_dma_client s3c2412_dma_client_out = {
+static struct s3c_dma_client s3c2412_dma_client_out = {
        .name           = "I2S PCM Stereo out"
 };
 
-static struct s3c2410_dma_client s3c2412_dma_client_in = {
+static struct s3c_dma_client s3c2412_dma_client_in = {
        .name           = "I2S PCM Stereo in"
 };
 
index f31e916dd8c4f78d394efc387f7f6f8ade83fdfd..d7b8457b565046692db8ad1e25b9376ae412d4b9 100644 (file)
 #include "dma.h"
 #include "s3c24xx-i2s.h"
 
-static struct s3c2410_dma_client s3c24xx_dma_client_out = {
+static struct s3c_dma_client s3c24xx_dma_client_out = {
        .name = "I2S PCM Stereo out"
 };
 
-static struct s3c2410_dma_client s3c24xx_dma_client_in = {
+static struct s3c_dma_client s3c24xx_dma_client_in = {
        .name = "I2S PCM Stereo in"
 };
 
index 28487dcc4538b939bd86a82395bb7299c2a5d243..cfe63b7bcc9f6b9a6779b9a88db93612b9e6a420 100644 (file)
@@ -18,7 +18,6 @@
 #include <sound/pcm_params.h>
 
 #include <linux/platform_data/asoc-s3c.h>
-#include <mach/dma.h>
 
 #include "dma.h"
 #include "spdif.h"
@@ -94,7 +93,7 @@ struct samsung_spdif_info {
        struct s3c_dma_params   *dma_playback;
 };
 
-static struct s3c2410_dma_client spdif_dma_client_out = {
+static struct s3c_dma_client spdif_dma_client_out = {
        .name           = "S/PDIF Stereo out",
 };
 
index 49de5c1284f6c7d4df3cecaa227316fa8dbd830a..131336d40492786b2283df01577a1957b0556a66 100644 (file)
@@ -1501,7 +1501,8 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
         * The error should be lower than 2ms since the estimate relies
         * on two reads of a counter updated every ms.
         */
-       if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+       if (printk_ratelimit() &&
+           abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
                dev_dbg(&subs->dev->dev,
                        "delay: estimated %d, actual %d\n",
                        est_delay, subs->last_delay);
index b4ddb748356cdecda1a04a683dfe9d3917a42114..56bfb523c5bb0d277031e8a4b80dfdeb141a1099 100644 (file)
@@ -47,21 +47,22 @@ displays the statistics gathered since it was forked.
 .PP
 .SH FIELD DESCRIPTIONS
 .nf
-\fBpk\fP processor package number.
-\fBcor\fP processor core number.
+\fBPackage\fP processor package number.
+\fBCore\fP processor core number.
 \fBCPU\fP Linux CPU (logical processor) number.
 Note that multiple CPUs per core indicate support for Intel(R) Hyper-Threading Technology.
-\fB%c0\fP percent of the interval that the CPU retired instructions.
-\fBGHz\fP average clock rate while the CPU was in c0 state.
-\fBTSC\fP average GHz that the TSC ran during the entire interval.
-\fB%c1, %c3, %c6, %c7\fP show the percentage residency in hardware core idle states.
-\fBCTMP\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
-\fBPTMP\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
-\fB%pc2, %pc3, %pc6, %pc7\fP percentage residency in hardware package idle states.
-\fBPkg_W\fP Watts consumed by the whole package.
-\fBCor_W\fP Watts consumed by the core part of the package.
-\fBGFX_W\fP Watts consumed by the Graphics part of the package -- available only on client processors.
-\fBRAM_W\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
+\fBAVG_MHz\fP number of cycles executed divided by time elapsed.
+\fB%Buzy\fP percent of the interval that the CPU retired instructions, aka. % of time in "C0" state.
+\fBBzy_MHz\fP average clock rate while the CPU was busy (in "c0" state).
+\fBTSC_MHz\fP average MHz that the TSC ran during the entire interval.
+\fBCPU%c1, CPU%c3, CPU%c6, CPU%c7\fP show the percentage residency in hardware core idle states.
+\fBCoreTmp\fP Degrees Celsius reported by the per-core Digital Thermal Sensor.
+\fBPkgTtmp\fP Degrees Celsius reported by the per-package Package Thermal Monitor.
+\fBPkg%pc2, Pkg%pc3, Pkg%pc6, Pkg%pc7\fP percentage residency in hardware package idle states.
+\fBPkgWatt\fP Watts consumed by the whole package.
+\fBCorWatt\fP Watts consumed by the core part of the package.
+\fBGFXWatt\fP Watts consumed by the Graphics part of the package -- available only on client processors.
+\fBRAMWatt\fP Watts consumed by the DRAM DIMMS -- available only on server processors.
 \fBPKG_%\fP percent of the interval that RAPL throttling was active on the Package.
 \fBRAM_%\fP percent of the interval that RAPL throttling was active on DRAM.
 .fi
@@ -78,29 +79,17 @@ For Watts columns, the summary is a system total.
 Subsequent rows show per-CPU statistics.
 
 .nf
-[root@sandy]# ./turbostat
-cor CPU    %c0  GHz  TSC    %c1    %c3    %c6    %c7 CTMP PTMP   %pc2   %pc3   %pc6   %pc7  Pkg_W  Cor_W GFX_W
-          0.06 0.80 2.29   0.11   0.00   0.00  99.83   47   40   0.26   0.01   0.44  98.78   3.49   0.12  0.14
-  0   0   0.07 0.80 2.29   0.07   0.00   0.00  99.86   40   40   0.26   0.01   0.44  98.78   3.49   0.12  0.14
-  0   4   0.03 0.80 2.29   0.12
-  1   1   0.04 0.80 2.29   0.25   0.01   0.00  99.71   40
-  1   5   0.16 0.80 2.29   0.13
-  2   2   0.05 0.80 2.29   0.06   0.01   0.00  99.88   40
-  2   6   0.03 0.80 2.29   0.08
-  3   3   0.05 0.80 2.29   0.08   0.00   0.00  99.87   47
-  3   7   0.04 0.84 2.29   0.09
-.fi
-.SH SUMMARY EXAMPLE
-The "-s" option prints the column headers just once,
-and then the one line system summary for each sample interval.
-
-.nf
-[root@wsm]# turbostat -S
-   %c0  GHz  TSC    %c1    %c3    %c6 CTMP   %pc3   %pc6
-  1.40 2.81 3.38  10.78  43.47  44.35   42  13.67   2.09
-  1.34 2.90 3.38  11.48  58.96  28.23   41  19.89   0.15
-  1.55 2.72 3.38  26.73  37.66  34.07   42   2.53   2.80
-  1.37 2.83 3.38  16.95  60.05  21.63   42   5.76   0.20
+[root@ivy]# ./turbostat
+    Core     CPU Avg_MHz   %Busy Bzy_MHz TSC_MHz     SMI  CPU%c1  CPU%c3  CPU%c6  CPU%c7 CoreTmp  PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt 
+       -       -       6    0.36    1596    3492       0    0.59    0.01   99.04    0.00      23      24   23.82    0.01   72.47    0.00    6.40    1.01    0.00
+       0       0       9    0.58    1596    3492       0    0.28    0.01   99.13    0.00      23      24   23.82    0.01   72.47    0.00    6.40    1.01    0.00
+       0       4       1    0.07    1596    3492       0    0.79
+       1       1      10    0.65    1596    3492       0    0.59    0.00   98.76    0.00      23
+       1       5       5    0.28    1596    3492       0    0.95
+       2       2      10    0.66    1596    3492       0    0.41    0.01   98.92    0.00      23
+       2       6       2    0.10    1597    3492       0    0.97
+       3       3       3    0.20    1596    3492       0    0.44    0.00   99.37    0.00      23
+       3       7       5    0.31    1596    3492       0    0.33
 .fi
 .SH VERBOSE EXAMPLE
 The "-v" option adds verbosity to the output:
@@ -154,55 +143,35 @@ eg. Here a cycle soaker is run on 1 CPU (see %c0) for a few seconds
 until ^C while the other CPUs are mostly idle:
 
 .nf
-[root@x980 lenb]# ./turbostat cat /dev/zero > /dev/null
+root@ivy: turbostat cat /dev/zero > /dev/null
 ^C
-cor CPU    %c0  GHz  TSC    %c1    %c3    %c6   %pc3   %pc6
-          8.86 3.61 3.38  15.06  31.19  44.89   0.00   0.00
-  0   0   1.46 3.22 3.38  16.84  29.48  52.22   0.00   0.00
-  0   6   0.21 3.06 3.38  18.09
-  1   2   0.53 3.33 3.38   2.80  46.40  50.27
-  1   8   0.89 3.47 3.38   2.44
-  2   4   1.36 3.43 3.38   9.04  23.71  65.89
-  2  10   0.18 2.86 3.38  10.22
-  8   1   0.04 2.87 3.38  99.96   0.01   0.00
-  8   7  99.72 3.63 3.38   0.27
-  9   3   0.31 3.21 3.38   7.64  56.55  35.50
-  9   9   0.08 2.95 3.38   7.88
- 10   5   1.42 3.43 3.38   2.14  30.99  65.44
- 10  11   0.16 2.88 3.38   3.40
+    Core     CPU Avg_MHz   %Busy Bzy_MHz TSC_MHz     SMI  CPU%c1  CPU%c3  CPU%c6  CPU%c7 CoreTmp  PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt 
+       -       -     496   12.75    3886    3492       0   13.16    0.04   74.04    0.00      36      36    0.00    0.00    0.00    0.00   23.15   17.65    0.00
+       0       0      22    0.57    3830    3492       0    0.83    0.02   98.59    0.00      27      36    0.00    0.00    0.00    0.00   23.15   17.65    0.00
+       0       4       9    0.24    3829    3492       0    1.15
+       1       1       4    0.09    3783    3492       0   99.91    0.00    0.00    0.00      36
+       1       5    3880   99.82    3888    3492       0    0.18
+       2       2      17    0.44    3813    3492       0    0.77    0.04   98.75    0.00      28
+       2       6      12    0.32    3823    3492       0    0.89
+       3       3      16    0.43    3844    3492       0    0.63    0.11   98.84    0.00      30
+       3       7       4    0.11    3827    3492       0    0.94
+30.372243 sec
+
 .fi
-Above the cycle soaker drives cpu7 up its 3.6 GHz turbo limit
+Above the cycle soaker drives cpu5 up its 3.8 GHz turbo limit
 while the other processors are generally in various states of idle.
 
-Note that cpu1 and cpu7 are HT siblings within core8.
-As cpu7 is very busy, it prevents its sibling, cpu1,
+Note that cpu1 and cpu5 are HT siblings within core1.
+As cpu5 is very busy, it prevents its sibling, cpu1,
 from entering a c-state deeper than c1.
 
-Note that turbostat reports average GHz of 3.63, while
-the arithmetic average of the GHz column above is lower.
-This is a weighted average, where the weight is %c0.  ie. it is the total number of
-un-halted cycles elapsed per time divided by the number of CPUs.
-.SH SMI COUNTING EXAMPLE
-On Intel Nehalem and newer processors, MSR 0x34 is a System Management Mode Interrupt (SMI) counter.
-This counter is shown by default under the "SMI" column.
-.nf
-[root@x980 ~]# turbostat
-cor CPU    %c0  GHz  TSC SMI    %c1    %c3    %c6 CTMP   %pc3   %pc6
-          0.11 1.91 3.38   0   1.84   0.26  97.79   29   0.82  83.87
-  0   0   0.40 1.63 3.38   0  10.27   0.12  89.20   20   0.82  83.88
-  0   6   0.06 1.63 3.38   0  10.61
-  1   2   0.37 2.63 3.38   0   0.02   0.10  99.51   22
-  1   8   0.01 1.62 3.38   0   0.39
-  2   4   0.07 1.62 3.38   0   0.04   0.07  99.82   23
-  2  10   0.02 1.62 3.38   0   0.09
-  8   1   0.23 1.64 3.38   0   0.10   1.07  98.60   24
-  8   7   0.02 1.64 3.38   0   0.31
-  9   3   0.03 1.62 3.38   0   0.03   0.05  99.89   29
-  9   9   0.02 1.62 3.38   0   0.05
- 10   5   0.07 1.62 3.38   0   0.08   0.12  99.73   27
- 10  11   0.03 1.62 3.38   0   0.13
-^C
-.fi
+Note that the Avg_MHz column reflects the total number of cycles executed
+divided by the measurement interval.  If the %Busy column is 100%,
+then the processor was running at that speed the entire interval.
+The Avg_MHz multiplied by the %Busy results in the Bzy_MHz --
+which is the average frequency while the processor was executing --
+not including any non-busy idle time.
+
 .SH NOTES
 
 .B "turbostat "
index 77eb130168da376ad976d8adb9abcea3f04ee53d..7c9d8e71eb9ec85d1213622fd939cdc9f2b95de0 100644 (file)
@@ -56,7 +56,7 @@ unsigned int do_slm_cstates;
 unsigned int use_c1_residency_msr;
 unsigned int has_aperf;
 unsigned int has_epb;
-unsigned int units = 1000000000;       /* Ghz etc */
+unsigned int units = 1000000;  /* MHz etc */
 unsigned int genuine_intel;
 unsigned int has_invariant_tsc;
 unsigned int do_nehalem_platform_info;
@@ -264,88 +264,93 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
        return 0;
 }
 
+/*
+ * Example Format w/ field column widths:
+ *
+ * Package    Core     CPU Avg_MHz Bzy_MHz TSC_MHz     SMI   %Busy CPU_%c1 CPU_%c3 CPU_%c6 CPU_%c7 CoreTmp  PkgTmp Pkg%pc2 Pkg%pc3 Pkg%pc6 Pkg%pc7 PkgWatt CorWatt GFXWatt
+ * 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567 1234567
+ */
+
 void print_header(void)
 {
        if (show_pkg)
-               outp += sprintf(outp, "pk");
-       if (show_pkg)
-               outp += sprintf(outp, " ");
+               outp += sprintf(outp, "Package ");
        if (show_core)
-               outp += sprintf(outp, "cor");
+               outp += sprintf(outp, "    Core ");
        if (show_cpu)
-               outp += sprintf(outp, " CPU");
-       if (show_pkg || show_core || show_cpu)
-               outp += sprintf(outp, " ");
+               outp += sprintf(outp, "    CPU ");
+       if (has_aperf)
+               outp += sprintf(outp, "Avg_MHz ");
        if (do_nhm_cstates)
-               outp += sprintf(outp, "   %%c0");
+               outp += sprintf(outp, "  %%Busy ");
        if (has_aperf)
-               outp += sprintf(outp, "  GHz");
-       outp += sprintf(outp, "  TSC");
+               outp += sprintf(outp, "Bzy_MHz ");
+       outp += sprintf(outp, "TSC_MHz ");
        if (do_smi)
-               outp += sprintf(outp, " SMI");
+               outp += sprintf(outp, "    SMI ");
        if (extra_delta_offset32)
-               outp += sprintf(outp, "  count 0x%03X", extra_delta_offset32);
+               outp += sprintf(outp, " count 0x%03X ", extra_delta_offset32);
        if (extra_delta_offset64)
-               outp += sprintf(outp, "  COUNT 0x%03X", extra_delta_offset64);
+               outp += sprintf(outp, " COUNT 0x%03X ", extra_delta_offset64);
        if (extra_msr_offset32)
-               outp += sprintf(outp, "   MSR 0x%03X", extra_msr_offset32);
+               outp += sprintf(outp, "  MSR 0x%03X ", extra_msr_offset32);
        if (extra_msr_offset64)
-               outp += sprintf(outp, "           MSR 0x%03X", extra_msr_offset64);
+               outp += sprintf(outp, "          MSR 0x%03X ", extra_msr_offset64);
        if (do_nhm_cstates)
-               outp += sprintf(outp, "    %%c1");
+               outp += sprintf(outp, " CPU%%c1 ");
        if (do_nhm_cstates && !do_slm_cstates)
-               outp += sprintf(outp, "    %%c3");
+               outp += sprintf(outp, " CPU%%c3 ");
        if (do_nhm_cstates)
-               outp += sprintf(outp, "    %%c6");
+               outp += sprintf(outp, " CPU%%c6 ");
        if (do_snb_cstates)
-               outp += sprintf(outp, "    %%c7");
+               outp += sprintf(outp, " CPU%%c7 ");
 
        if (do_dts)
-               outp += sprintf(outp, " CTMP");
+               outp += sprintf(outp, "CoreTmp ");
        if (do_ptm)
-               outp += sprintf(outp, " PTMP");
+               outp += sprintf(outp, " PkgTmp ");
 
        if (do_snb_cstates)
-               outp += sprintf(outp, "   %%pc2");
+               outp += sprintf(outp, "Pkg%%pc2 ");
        if (do_nhm_cstates && !do_slm_cstates)
-               outp += sprintf(outp, "   %%pc3");
+               outp += sprintf(outp, "Pkg%%pc3 ");
        if (do_nhm_cstates && !do_slm_cstates)
-               outp += sprintf(outp, "   %%pc6");
+               outp += sprintf(outp, "Pkg%%pc6 ");
        if (do_snb_cstates)
-               outp += sprintf(outp, "   %%pc7");
+               outp += sprintf(outp, "Pkg%%pc7 ");
        if (do_c8_c9_c10) {
-               outp += sprintf(outp, "   %%pc8");
-               outp += sprintf(outp, "   %%pc9");
-               outp += sprintf(outp, "  %%pc10");
+               outp += sprintf(outp, "Pkg%%pc8 ");
+               outp += sprintf(outp, "Pkg%%pc9 ");
+               outp += sprintf(outp, "Pk%%pc10 ");
        }
 
        if (do_rapl && !rapl_joules) {
                if (do_rapl & RAPL_PKG)
-                       outp += sprintf(outp, "  Pkg_W");
+                       outp += sprintf(outp, "PkgWatt ");
                if (do_rapl & RAPL_CORES)
-                       outp += sprintf(outp, "  Cor_W");
+                       outp += sprintf(outp, "CorWatt ");
                if (do_rapl & RAPL_GFX)
-                       outp += sprintf(outp, " GFX_W");
+                       outp += sprintf(outp, "GFXWatt ");
                if (do_rapl & RAPL_DRAM)
-                       outp += sprintf(outp, " RAM_W");
+                       outp += sprintf(outp, "RAMWatt ");
                if (do_rapl & RAPL_PKG_PERF_STATUS)
-                       outp += sprintf(outp, " PKG_%%");
+                       outp += sprintf(outp, "  PKG_%% ");
                if (do_rapl & RAPL_DRAM_PERF_STATUS)
-                       outp += sprintf(outp, " RAM_%%");
+                       outp += sprintf(outp, "  RAM_%% ");
        } else {
                if (do_rapl & RAPL_PKG)
-                       outp += sprintf(outp, "  Pkg_J");
+                       outp += sprintf(outp, "  Pkg_J ");
                if (do_rapl & RAPL_CORES)
-                       outp += sprintf(outp, "  Cor_J");
+                       outp += sprintf(outp, "  Cor_J ");
                if (do_rapl & RAPL_GFX)
-                       outp += sprintf(outp, " GFX_J");
+                       outp += sprintf(outp, "  GFX_J ");
                if (do_rapl & RAPL_DRAM)
-                       outp += sprintf(outp, " RAM_W");
+                       outp += sprintf(outp, "  RAM_W ");
                if (do_rapl & RAPL_PKG_PERF_STATUS)
-                       outp += sprintf(outp, " PKG_%%");
+                       outp += sprintf(outp, "  PKG_%% ");
                if (do_rapl & RAPL_DRAM_PERF_STATUS)
-                       outp += sprintf(outp, " RAM_%%");
-               outp += sprintf(outp, " time");
+                       outp += sprintf(outp, "  RAM_%% ");
+               outp += sprintf(outp, "  time ");
 
        }
        outp += sprintf(outp, "\n");
@@ -410,25 +415,12 @@ int dump_counters(struct thread_data *t, struct core_data *c,
 
 /*
  * column formatting convention & formats
- * package: "pk" 2 columns %2d
- * core: "cor" 3 columns %3d
- * CPU: "CPU" 3 columns %3d
- * Pkg_W: %6.2
- * Cor_W: %6.2
- * GFX_W: %5.2
- * RAM_W: %5.2
- * GHz: "GHz" 3 columns %3.2
- * TSC: "TSC" 3 columns %3.2
- * SMI: "SMI" 4 columns %4d
- * percentage " %pc3" %6.2
- * Perf Status percentage: %5.2
- * "CTMP" 4 columns %4d
  */
 int format_counters(struct thread_data *t, struct core_data *c,
        struct pkg_data *p)
 {
        double interval_float;
-       char *fmt5, *fmt6;
+       char *fmt8;
 
         /* if showing only 1st thread in core and this isn't one, bail out */
        if (show_core_only && !(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
@@ -443,65 +435,52 @@ int format_counters(struct thread_data *t, struct core_data *c,
        /* topo columns, print blanks on 1st (average) line */
        if (t == &average.threads) {
                if (show_pkg)
-                       outp += sprintf(outp, "  ");
-               if (show_pkg && show_core)
-                       outp += sprintf(outp, " ");
+                       outp += sprintf(outp, "       -");
                if (show_core)
-                       outp += sprintf(outp, "   ");
+                       outp += sprintf(outp, "       -");
                if (show_cpu)
-                       outp += sprintf(outp, " " "   ");
+                       outp += sprintf(outp, "       -");
        } else {
                if (show_pkg) {
                        if (p)
-                               outp += sprintf(outp, "%2d", p->package_id);
+                               outp += sprintf(outp, "%8d", p->package_id);
                        else
-                               outp += sprintf(outp, "  ");
+                               outp += sprintf(outp, "       -");
                }
-               if (show_pkg && show_core)
-                       outp += sprintf(outp, " ");
                if (show_core) {
                        if (c)
-                               outp += sprintf(outp, "%3d", c->core_id);
+                               outp += sprintf(outp, "%8d", c->core_id);
                        else
-                               outp += sprintf(outp, "   ");
+                               outp += sprintf(outp, "       -");
                }
                if (show_cpu)
-                       outp += sprintf(outp, " %3d", t->cpu_id);
+                       outp += sprintf(outp, "%8d", t->cpu_id);
        }
+
+       /* AvgMHz */
+       if (has_aperf)
+               outp += sprintf(outp, "%8.0f",
+                       1.0 / units * t->aperf / interval_float);
+
        /* %c0 */
        if (do_nhm_cstates) {
-               if (show_pkg || show_core || show_cpu)
-                       outp += sprintf(outp, " ");
                if (!skip_c0)
-                       outp += sprintf(outp, "%6.2f", 100.0 * t->mperf/t->tsc);
+                       outp += sprintf(outp, "%8.2f", 100.0 * t->mperf/t->tsc);
                else
-                       outp += sprintf(outp, "  ****");
+                       outp += sprintf(outp, "********");
        }
 
-       /* GHz */
-       if (has_aperf) {
-               if (!aperf_mperf_unstable) {
-                       outp += sprintf(outp, " %3.2f",
-                               1.0 * t->tsc / units * t->aperf /
-                               t->mperf / interval_float);
-               } else {
-                       if (t->aperf > t->tsc || t->mperf > t->tsc) {
-                               outp += sprintf(outp, " ***");
-                       } else {
-                               outp += sprintf(outp, "%3.1f*",
-                                       1.0 * t->tsc /
-                                       units * t->aperf /
-                                       t->mperf / interval_float);
-                       }
-               }
-       }
+       /* BzyMHz */
+       if (has_aperf)
+               outp += sprintf(outp, "%8.0f",
+                       1.0 * t->tsc / units * t->aperf / t->mperf / interval_float);
 
        /* TSC */
-       outp += sprintf(outp, "%5.2f", 1.0 * t->tsc/units/interval_float);
+       outp += sprintf(outp, "%8.0f", 1.0 * t->tsc/units/interval_float);
 
        /* SMI */
        if (do_smi)
-               outp += sprintf(outp, "%4d", t->smi_count);
+               outp += sprintf(outp, "%8d", t->smi_count);
 
        /* delta */
        if (extra_delta_offset32)
@@ -520,9 +499,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
 
        if (do_nhm_cstates) {
                if (!skip_c1)
-                       outp += sprintf(outp, " %6.2f", 100.0 * t->c1/t->tsc);
+                       outp += sprintf(outp, "%8.2f", 100.0 * t->c1/t->tsc);
                else
-                       outp += sprintf(outp, "  ****");
+                       outp += sprintf(outp, "********");
        }
 
        /* print per-core data only for 1st thread in core */
@@ -530,79 +509,76 @@ int format_counters(struct thread_data *t, struct core_data *c,
                goto done;
 
        if (do_nhm_cstates && !do_slm_cstates)
-               outp += sprintf(outp, " %6.2f", 100.0 * c->c3/t->tsc);
+               outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
        if (do_nhm_cstates)
-               outp += sprintf(outp, " %6.2f", 100.0 * c->c6/t->tsc);
+               outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
        if (do_snb_cstates)
-               outp += sprintf(outp, " %6.2f", 100.0 * c->c7/t->tsc);
+               outp += sprintf(outp, "%8.2f", 100.0 * c->c7/t->tsc);
 
        if (do_dts)
-               outp += sprintf(outp, " %4d", c->core_temp_c);
+               outp += sprintf(outp, "%8d", c->core_temp_c);
 
        /* print per-package data only for 1st core in package */
        if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
                goto done;
 
        if (do_ptm)
-               outp += sprintf(outp, " %4d", p->pkg_temp_c);
+               outp += sprintf(outp, "%8d", p->pkg_temp_c);
 
        if (do_snb_cstates)
-               outp += sprintf(outp, " %6.2f", 100.0 * p->pc2/t->tsc);
+               outp += sprintf(outp, "%8.2f", 100.0 * p->pc2/t->tsc);
        if (do_nhm_cstates && !do_slm_cstates)
-               outp += sprintf(outp, " %6.2f", 100.0 * p->pc3/t->tsc);
+               outp += sprintf(outp, "%8.2f", 100.0 * p->pc3/t->tsc);
        if (do_nhm_cstates && !do_slm_cstates)
-               outp += sprintf(outp, " %6.2f", 100.0 * p->pc6/t->tsc);
+               outp += sprintf(outp, "%8.2f", 100.0 * p->pc6/t->tsc);
        if (do_snb_cstates)
-               outp += sprintf(outp, " %6.2f", 100.0 * p->pc7/t->tsc);
+               outp += sprintf(outp, "%8.2f", 100.0 * p->pc7/t->tsc);
        if (do_c8_c9_c10) {
-               outp += sprintf(outp, " %6.2f", 100.0 * p->pc8/t->tsc);
-               outp += sprintf(outp, " %6.2f", 100.0 * p->pc9/t->tsc);
-               outp += sprintf(outp, " %6.2f", 100.0 * p->pc10/t->tsc);
+               outp += sprintf(outp, "%8.2f", 100.0 * p->pc8/t->tsc);
+               outp += sprintf(outp, "%8.2f", 100.0 * p->pc9/t->tsc);
+               outp += sprintf(outp, "%8.2f", 100.0 * p->pc10/t->tsc);
        }
 
        /*
         * If measurement interval exceeds minimum RAPL Joule Counter range,
         * indicate that results are suspect by printing "**" in fraction place.
         */
-       if (interval_float < rapl_joule_counter_range) {
-               fmt5 = " %5.2f";
-               fmt6 = " %6.2f";
-       } else {
-               fmt5 = " %3.0f**";
-               fmt6 = " %4.0f**";
-       }
+       if (interval_float < rapl_joule_counter_range)
+               fmt8 = "%8.2f";
+       else
+               fmt8 = " %6.0f**";
 
        if (do_rapl && !rapl_joules) {
                if (do_rapl & RAPL_PKG)
-                       outp += sprintf(outp, fmt6, p->energy_pkg * rapl_energy_units / interval_float);
+                       outp += sprintf(outp, fmt8, p->energy_pkg * rapl_energy_units / interval_float);
                if (do_rapl & RAPL_CORES)
-                       outp += sprintf(outp, fmt6, p->energy_cores * rapl_energy_units / interval_float);
+                       outp += sprintf(outp, fmt8, p->energy_cores * rapl_energy_units / interval_float);
                if (do_rapl & RAPL_GFX)
-                       outp += sprintf(outp, fmt5, p->energy_gfx * rapl_energy_units / interval_float);
+                       outp += sprintf(outp, fmt8, p->energy_gfx * rapl_energy_units / interval_float);
                if (do_rapl & RAPL_DRAM)
-                       outp += sprintf(outp, fmt5, p->energy_dram * rapl_energy_units / interval_float);
+                       outp += sprintf(outp, fmt8, p->energy_dram * rapl_energy_units / interval_float);
                if (do_rapl & RAPL_PKG_PERF_STATUS)
-                       outp += sprintf(outp, fmt5, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
+                       outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
                if (do_rapl & RAPL_DRAM_PERF_STATUS)
-                       outp += sprintf(outp, fmt5, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
+                       outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
        } else {
                if (do_rapl & RAPL_PKG)
-                       outp += sprintf(outp, fmt6,
+                       outp += sprintf(outp, fmt8,
                                        p->energy_pkg * rapl_energy_units);
                if (do_rapl & RAPL_CORES)
-                       outp += sprintf(outp, fmt6,
+                       outp += sprintf(outp, fmt8,
                                        p->energy_cores * rapl_energy_units);
                if (do_rapl & RAPL_GFX)
-                       outp += sprintf(outp, fmt5,
+                       outp += sprintf(outp, fmt8,
                                        p->energy_gfx * rapl_energy_units);
                if (do_rapl & RAPL_DRAM)
-                       outp += sprintf(outp, fmt5,
+                       outp += sprintf(outp, fmt8,
                                        p->energy_dram * rapl_energy_units);
                if (do_rapl & RAPL_PKG_PERF_STATUS)
-                       outp += sprintf(outp, fmt5, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
+                       outp += sprintf(outp, fmt8, 100.0 * p->rapl_pkg_perf_status * rapl_time_units / interval_float);
                if (do_rapl & RAPL_DRAM_PERF_STATUS)
-                       outp += sprintf(outp, fmt5, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
-       outp += sprintf(outp, fmt5, interval_float);
+                       outp += sprintf(outp, fmt8, 100.0 * p->rapl_dram_perf_status * rapl_time_units / interval_float);
+       outp += sprintf(outp, fmt8, interval_float);
 
        }
 done:
@@ -1516,6 +1492,9 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
        case 0x46:      /* HSW */
        case 0x37:      /* BYT */
        case 0x4D:      /* AVN */
+       case 0x3D:      /* BDW */
+       case 0x4F:      /* BDX */
+       case 0x56:      /* BDX-DE */
                return 1;
        case 0x2E:      /* Nehalem-EX Xeon - Beckton */
        case 0x2F:      /* Westmere-EX Xeon - Eagleton */
@@ -1629,9 +1608,12 @@ void rapl_probe(unsigned int family, unsigned int model)
        case 0x3C:      /* HSW */
        case 0x45:      /* HSW */
        case 0x46:      /* HSW */
+       case 0x3D:      /* BDW */
                do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO;
                break;
        case 0x3F:      /* HSX */
+       case 0x4F:      /* BDX */
+       case 0x56:      /* BDX-DE */
                do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
                break;
        case 0x2D:
@@ -1875,6 +1857,9 @@ int is_snb(unsigned int family, unsigned int model)
        case 0x3F:      /* HSW */
        case 0x45:      /* HSW */
        case 0x46:      /* HSW */
+       case 0x3D:      /* BDW */
+       case 0x4F:      /* BDX */
+       case 0x56:      /* BDX-DE */
                return 1;
        }
        return 0;
@@ -1886,7 +1871,8 @@ int has_c8_c9_c10(unsigned int family, unsigned int model)
                return 0;
 
        switch (model) {
-       case 0x45:
+       case 0x45:      /* HSW */
+       case 0x3D:      /* BDW */
                return 1;
        }
        return 0;
@@ -2455,7 +2441,7 @@ int main(int argc, char **argv)
        cmdline(argc, argv);
 
        if (verbose)
-               fprintf(stderr, "turbostat v3.6 Dec 2, 2013"
+               fprintf(stderr, "turbostat v3.7 Feb 6, 2014"
                        " - Len Brown <lenb@kernel.org>\n");
 
        turbostat_init();
index 5081e809821fad168b4a13932fb621787a1f533a..22fa819a9b6a7ba06b9c788bfc7b84ba0b07a0ea 100644 (file)
@@ -277,7 +277,7 @@ int kvm_timer_hyp_init(void)
 
        host_vtimer_irq = ppi;
 
-       err = register_cpu_notifier(&kvm_timer_cpu_nb);
+       err = __register_cpu_notifier(&kvm_timer_cpu_nb);
        if (err) {
                kvm_err("Cannot register timer CPU notifier\n");
                goto out_free;
index 8ca405cd7c1afce8fbbf38a51bf5cd75b2f32168..47b29834a6b61def09f6340013cc9b2927c03cd9 100644 (file)
@@ -1496,7 +1496,7 @@ int kvm_vgic_hyp_init(void)
                goto out;
        }
 
-       ret = register_cpu_notifier(&vgic_cpu_nb);
+       ret = __register_cpu_notifier(&vgic_cpu_nb);
        if (ret) {
                kvm_err("Cannot register vgic CPU notifier\n");
                goto out_free_irq;