summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/firewire-cdev103
-rw-r--r--Documentation/ABI/stable/sysfs-bus-firewire122
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus8
-rw-r--r--Documentation/ABI/testing/sysfs-driver-hid-wiimote10
-rw-r--r--Documentation/devicetree/bindings/arm/primecell.txt21
-rw-r--r--Documentation/devicetree/bindings/crypto/fsl-sec2.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/sec.txt)2
-rw-r--r--Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt22
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt46
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio_nvidia.txt8
-rw-r--r--Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt22
-rw-r--r--Documentation/devicetree/bindings/spi/spi_nvidia.txt5
-rw-r--r--Documentation/devicetree/bindings/tty/serial/of-serial.txt36
-rw-r--r--Documentation/feature-removal-schedule.txt10
-rw-r--r--Documentation/filesystems/ubifs.txt28
-rw-r--r--Documentation/mmc/00-INDEX2
-rw-r--r--Documentation/mmc/mmc-async-req.txt87
-rw-r--r--Documentation/power/devices.txt14
-rw-r--r--Documentation/power/opp.txt2
-rw-r--r--Documentation/power/runtime_pm.txt229
-rw-r--r--Documentation/spi/ep93xx_spi10
-rw-r--r--Documentation/spi/pxa2xx5
-rw-r--r--Documentation/virtual/lguest/lguest.c47
-rw-r--r--Documentation/x86/boot.txt2
-rw-r--r--MAINTAINERS24
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/configs/mmp2_defconfig9
-rw-r--r--arch/arm/mach-ep93xx/Makefile4
-rw-r--r--arch/arm/mach-ep93xx/core.c33
-rw-r--r--arch/arm/mach-ep93xx/dma-m2p.c411
-rw-r--r--arch/arm/mach-ep93xx/dma.c108
-rw-r--r--arch/arm/mach-ep93xx/include/mach/dma.h190
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h1
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h2
-rw-r--r--arch/arm/mach-imx/clock-imx25.c7
-rw-r--r--arch/arm/mach-imx/mach-apf9328.c2
-rw-r--r--arch/arm/mach-imx/mach-armadillo5x0.c2
-rw-r--r--arch/arm/mach-imx/mach-bug.c2
-rw-r--r--arch/arm/mach-imx/mach-cpuimx27.c2
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c2
-rw-r--r--arch/arm/mach-imx/mach-eukrea_cpuimx25.c2
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c2
-rw-r--r--arch/arm/mach-imx/mach-imx27ipcam.c2
-rw-r--r--arch/arm/mach-imx/mach-imx27lite.c2
-rw-r--r--arch/arm/mach-imx/mach-kzm_arm11_01.c2
-rw-r--r--arch/arm/mach-imx/mach-mx1ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx25_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-mx27_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-mx27ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31ads.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31lilly.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31lite.c2
-rw-r--r--arch/arm/mach-imx/mach-mx31moboard.c2
-rw-r--r--arch/arm/mach-imx/mach-mx35_3ds.c2
-rw-r--r--arch/arm/mach-imx/mach-mxt_td60.c2
-rw-r--r--arch/arm/mach-imx/mach-pca100.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm038.c2
-rw-r--r--arch/arm/mach-imx/mach-pcm043.c2
-rw-r--r--arch/arm/mach-imx/mach-qong.c2
-rw-r--r--arch/arm/mach-imx/mach-scb9328.c2
-rw-r--r--arch/arm/mach-imx/mach-vpr200.c2
-rw-r--r--arch/arm/mach-imx/mm-imx1.c21
-rw-r--r--arch/arm/mach-imx/mm-imx21.c21
-rw-r--r--arch/arm/mach-imx/mm-imx25.c17
-rw-r--r--arch/arm/mach-imx/mm-imx27.c22
-rw-r--r--arch/arm/mach-imx/mm-imx31.c15
-rw-r--r--arch/arm/mach-imx/mm-imx35.c16
-rw-r--r--arch/arm/mach-mmp/brownstone.c10
-rw-r--r--arch/arm/mach-mmp/include/mach/mmp2.h2
-rw-r--r--arch/arm/mach-mmp/jasper.c2
-rw-r--r--arch/arm/mach-mmp/mmp2.c16
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51.c2
-rw-r--r--arch/arm/mach-mx5/board-cpuimx51sd.c2
-rw-r--r--arch/arm/mach-mx5/board-mx50_rdp.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_3ds.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_babbage.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikamx.c2
-rw-r--r--arch/arm/mach-mx5/board-mx51_efikasb.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_evk.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_loco.c2
-rw-r--r--arch/arm/mach-mx5/board-mx53_smd.c2
-rw-r--r--arch/arm/mach-mx5/clock-mx51-mx53.c11
-rw-r--r--arch/arm/mach-mx5/devices.c64
-rw-r--r--arch/arm/mach-mx5/mm-mx50.c22
-rw-r--r--arch/arm/mach-mx5/mm.c27
-rw-r--r--arch/arm/mach-mxs/Makefile2
-rw-r--r--arch/arm/mach-mxs/devices.c11
-rw-r--r--arch/arm/mach-mxs/devices/Makefile1
-rw-r--r--arch/arm/mach-mxs/devices/platform-gpio-mxs.c53
-rw-r--r--arch/arm/mach-mxs/gpio.c331
-rw-r--r--arch/arm/mach-mxs/gpio.h34
-rw-r--r--arch/arm/mach-mxs/include/mach/devices-common.h2
-rw-r--r--arch/arm/mach-mxs/mach-mx28evk.c1
-rw-r--r--arch/arm/mach-mxs/mm-mx23.c1
-rw-r--r--arch/arm/mach-mxs/mm-mx28.c1
-rw-r--r--arch/arm/mach-omap1/gpio15xx.c22
-rw-r--r--arch/arm/mach-omap1/gpio16xx.c28
-rw-r--r--arch/arm/mach-omap1/gpio7xx.c27
-rw-r--r--arch/arm/mach-omap1/pm_bus.c14
-rw-r--r--arch/arm/mach-omap2/gpio.c34
-rw-r--r--arch/arm/mach-omap2/serial.c1
-rw-r--r--arch/arm/mach-s3c2410/include/mach/spi-gpio.h28
-rw-r--r--arch/arm/mach-s3c2410/mach-qt2410.c34
-rw-r--r--arch/arm/mach-s3c2412/mach-jive.c41
-rw-r--r--arch/arm/mach-s3c2440/mach-gta02.c1
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c5
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c5
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c1
-rw-r--r--arch/arm/mach-shmobile/include/mach/sh7372.h29
-rw-r--r--arch/arm/mach-shmobile/pm-sh7372.c160
-rw-r--r--arch/arm/mach-shmobile/pm_runtime.c22
-rw-r--r--arch/arm/mach-shmobile/setup-sh7372.c11
-rw-r--r--arch/arm/mach-tegra/Makefile1
-rw-r--r--arch/arm/plat-mxc/Makefile2
-rw-r--r--arch/arm/plat-mxc/devices.c11
-rw-r--r--arch/arm/plat-mxc/devices/Makefile1
-rw-r--r--arch/arm/plat-mxc/devices/platform-gpio-mxc.c32
-rw-r--r--arch/arm/plat-mxc/devices/platform-spi_imx.c12
-rw-r--r--arch/arm/plat-mxc/gpio.c361
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h12
-rw-r--r--arch/arm/plat-mxc/include/mach/devices-common.h2
-rw-r--r--arch/arm/plat-mxc/include/mach/gpio.h27
-rw-r--r--arch/arm/plat-mxc/include/mach/irqs.h21
-rw-r--r--arch/arm/plat-omap/include/plat/gpio.h20
-rw-r--r--arch/arm/plat-omap/include/plat/omap_device.h9
-rw-r--r--arch/arm/plat-omap/omap_device.c53
-rw-r--r--arch/arm/plat-pxa/include/plat/sdhci.h35
-rw-r--r--arch/microblaze/include/asm/pci-bridge.h13
-rw-r--r--arch/microblaze/include/asm/pci.h3
-rw-r--r--arch/microblaze/include/asm/prom.h15
-rw-r--r--arch/microblaze/pci/Makefile2
-rw-r--r--arch/microblaze/pci/pci-common.c112
-rw-r--r--arch/microblaze/pci/pci_32.c432
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h29
-rw-r--r--arch/powerpc/include/asm/pci.h3
-rw-r--r--arch/powerpc/include/asm/prom.h14
-rw-r--r--arch/powerpc/kernel/pci-common.c11
-rw-r--r--arch/powerpc/kernel/pci_32.c150
-rw-r--r--arch/powerpc/kernel/pci_dn.c47
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c9
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig8
-rw-r--r--arch/powerpc/platforms/52xx/Makefile1
-rw-r--r--arch/powerpc/platforms/powermac/pci.c3
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/sh/kernel/cpu/shmobile/pm_runtime.c6
-rw-r--r--arch/sparc/include/asm/pci_32.h3
-rw-r--r--arch/sparc/include/asm/pci_64.h3
-rw-r--r--arch/sparc/include/asm/ptrace.h1
-rw-r--r--arch/sparc/kernel/pci.c8
-rw-r--r--arch/sparc/kernel/pcic.c8
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/lguest_hcall.h1
-rw-r--r--arch/x86/include/asm/percpu.h11
-rw-r--r--arch/x86/include/asm/prom.h11
-rw-r--r--arch/x86/include/asm/xen/pci.h5
-rw-r--r--arch/x86/kernel/asm-offsets_32.c1
-rw-r--r--arch/x86/kernel/devicetree.c60
-rw-r--r--arch/x86/kernel/reboot.c24
-rw-r--r--arch/x86/lguest/boot.c36
-rw-r--r--arch/x86/lguest/i386_head.S35
-rw-r--r--arch/x86/pci/xen.c371
-rw-r--r--arch/x86/xen/Makefile2
-rw-r--r--arch/x86/xen/enlighten.c8
-rw-r--r--arch/x86/xen/platform-pci-unplug.c2
-rw-r--r--arch/x86/xen/vga.c67
-rw-r--r--arch/x86/xen/xen-ops.h11
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c234
-rw-r--r--drivers/base/power/domain.c1273
-rw-r--r--drivers/base/power/generic_ops.c98
-rw-r--r--drivers/base/power/main.c65
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/base/power/runtime.c89
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xsysace.c98
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/connector/cn_proc.c35
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/ep93xx_dma.c1355
-rw-r--r--drivers/firewire/core-cdev.c20
-rw-r--r--drivers/firewire/net.c5
-rw-r--r--drivers/firewire/ohci.c42
-rw-r--r--drivers/gpio/Kconfig66
-rw-r--r--drivers/gpio/Makefile87
-rw-r--r--drivers/gpio/gpio-74x164.c (renamed from drivers/gpio/74x164.c)33
-rw-r--r--drivers/gpio/gpio-ab8500.c (renamed from drivers/gpio/ab8500-gpio.c)0
-rw-r--r--drivers/gpio/gpio-adp5520.c (renamed from drivers/gpio/adp5520-gpio.c)0
-rw-r--r--drivers/gpio/gpio-adp5588.c (renamed from drivers/gpio/adp5588-gpio.c)0
-rw-r--r--drivers/gpio/gpio-bt8xx.c (renamed from drivers/gpio/bt8xxgpio.c)0
-rw-r--r--drivers/gpio/gpio-cs5535.c (renamed from drivers/gpio/cs5535-gpio.c)0
-rw-r--r--drivers/gpio/gpio-da9052.c277
-rw-r--r--drivers/gpio/gpio-ep93xx.c (renamed from arch/arm/mach-ep93xx/gpio.c)243
-rw-r--r--drivers/gpio/gpio-exynos4.c5
-rw-r--r--drivers/gpio/gpio-generic.c (renamed from drivers/gpio/basic_mmio_gpio.c)6
-rw-r--r--drivers/gpio/gpio-it8761e.c (renamed from drivers/gpio/it8761e_gpio.c)2
-rw-r--r--drivers/gpio/gpio-janz-ttl.c (renamed from drivers/gpio/janz-ttl.c)0
-rw-r--r--drivers/gpio/gpio-langwell.c (renamed from drivers/gpio/langwell_gpio.c)4
-rw-r--r--drivers/gpio/gpio-max7300.c (renamed from drivers/gpio/max7300.c)2
-rw-r--r--drivers/gpio/gpio-max7301.c (renamed from drivers/gpio/max7301.c)2
-rw-r--r--drivers/gpio/gpio-max730x.c (renamed from drivers/gpio/max730x.c)2
-rw-r--r--drivers/gpio/gpio-max732x.c (renamed from drivers/gpio/max732x.c)2
-rw-r--r--drivers/gpio/gpio-mc33880.c (renamed from drivers/gpio/mc33880.c)2
-rw-r--r--drivers/gpio/gpio-mcp23s08.c (renamed from drivers/gpio/mcp23s08.c)291
-rw-r--r--drivers/gpio/gpio-ml-ioh.c (renamed from drivers/gpio/ml_ioh_gpio.c)2
-rw-r--r--drivers/gpio/gpio-mpc5200.c (renamed from arch/powerpc/platforms/52xx/mpc52xx_gpio.c)12
-rw-r--r--drivers/gpio/gpio-mxc.c460
-rw-r--r--drivers/gpio/gpio-mxs.c289
-rw-r--r--drivers/gpio/gpio-omap.c723
-rw-r--r--drivers/gpio/gpio-pca953x.c (renamed from drivers/gpio/pca953x.c)105
-rw-r--r--drivers/gpio/gpio-pcf857x.c (renamed from drivers/gpio/pcf857x.c)2
-rw-r--r--drivers/gpio/gpio-pch.c (renamed from drivers/gpio/pch_gpio.c)0
-rw-r--r--drivers/gpio/gpio-pl061.c (renamed from drivers/gpio/pl061.c)4
-rw-r--r--drivers/gpio/gpio-plat-samsung.c3
-rw-r--r--drivers/gpio/gpio-rdc321x.c (renamed from drivers/gpio/rdc321x-gpio.c)0
-rw-r--r--drivers/gpio/gpio-s5pc100.c5
-rw-r--r--drivers/gpio/gpio-s5pv210.c5
-rw-r--r--drivers/gpio/gpio-sch.c (renamed from drivers/gpio/sch_gpio.c)2
-rw-r--r--drivers/gpio/gpio-stmpe.c (renamed from drivers/gpio/stmpe-gpio.c)0
-rw-r--r--drivers/gpio/gpio-sx150x.c (renamed from drivers/gpio/sx150x.c)0
-rw-r--r--drivers/gpio/gpio-tc3589x.c (renamed from drivers/gpio/tc3589x-gpio.c)0
-rw-r--r--drivers/gpio/gpio-tegra.c (renamed from arch/arm/mach-tegra/gpio.c)10
-rw-r--r--drivers/gpio/gpio-timberdale.c (renamed from drivers/gpio/timbgpio.c)2
-rw-r--r--drivers/gpio/gpio-tps65910.c (renamed from drivers/gpio/tps65910-gpio.c)2
-rw-r--r--drivers/gpio/gpio-twl4030.c (renamed from drivers/gpio/twl4030-gpio.c)2
-rw-r--r--drivers/gpio/gpio-u300.c5
-rw-r--r--drivers/gpio/gpio-ucb1400.c (renamed from drivers/gpio/ucb1400_gpio.c)0
-rw-r--r--drivers/gpio/gpio-vr41xx.c (renamed from drivers/gpio/vr41xx_giu.c)2
-rw-r--r--drivers/gpio/gpio-vx855.c (renamed from drivers/gpio/vx855_gpio.c)0
-rw-r--r--drivers/gpio/gpio-wm831x.c (renamed from drivers/gpio/wm831x-gpio.c)2
-rw-r--r--drivers/gpio/gpio-wm8350.c (renamed from drivers/gpio/wm8350-gpiolib.c)2
-rw-r--r--drivers/gpio/gpio-wm8994.c (renamed from drivers/gpio/wm8994-gpio.c)2
-rw-r--r--drivers/gpio/gpio-xilinx.c (renamed from drivers/gpio/xilinx_gpio.c)0
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c71
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c15
-rw-r--r--drivers/hid/Kconfig44
-rw-r--r--drivers/hid/Makefile3
-rw-r--r--drivers/hid/hid-axff.c1
-rw-r--r--drivers/hid/hid-core.c13
-rw-r--r--drivers/hid/hid-emsff.c7
-rw-r--r--drivers/hid/hid-holtekff.c240
-rw-r--r--drivers/hid/hid-ids.h15
-rw-r--r--drivers/hid/hid-lg.c74
-rw-r--r--drivers/hid/hid-microsoft.c28
-rw-r--r--drivers/hid/hid-multitouch.c4
-rw-r--r--drivers/hid/hid-prodikeys.c17
-rw-r--r--drivers/hid/hid-roccat-arvo.c21
-rw-r--r--drivers/hid/hid-roccat-arvo.h13
-rw-r--r--drivers/hid/hid-roccat-common.c20
-rw-r--r--drivers/hid/hid-roccat-common.h4
-rw-r--r--drivers/hid/hid-roccat-kone.c56
-rw-r--r--drivers/hid/hid-roccat-kone.h2
-rw-r--r--drivers/hid/hid-roccat-koneplus.c49
-rw-r--r--drivers/hid/hid-roccat-koneplus.h23
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c25
-rw-r--r--drivers/hid/hid-roccat-kovaplus.h9
-rw-r--r--drivers/hid/hid-roccat-pyra.c25
-rw-r--r--drivers/hid/hid-roccat-pyra.h9
-rw-r--r--drivers/hid/hid-sony.c35
-rw-r--r--drivers/hid/hid-speedlink.c89
-rw-r--r--drivers/hid/hid-uclogic.c195
-rw-r--r--drivers/hid/hid-wiimote.c489
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/cma.c84
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c198
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c68
-rw-r--r--drivers/infiniband/hw/mlx4/main.c21
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c276
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h93
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c43
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c175
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c101
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c43
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c35
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c77
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c49
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c33
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c11
-rw-r--r--drivers/infiniband/hw/qib/qib.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c72
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c78
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h143
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c14
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c2
-rw-r--r--drivers/leds/Kconfig19
-rw-r--r--drivers/leds/leds-gpio.c6
-rw-r--r--drivers/lguest/core.c2
-rw-r--r--drivers/lguest/interrupts_and_traps.c10
-rw-r--r--drivers/lguest/lg.h2
-rw-r--r--drivers/lguest/lguest_device.c37
-rw-r--r--drivers/lguest/lguest_user.c17
-rw-r--r--drivers/lguest/page_tables.c282
-rw-r--r--drivers/lguest/x86/core.c107
-rw-r--r--drivers/mmc/card/block.c681
-rw-r--r--drivers/mmc/card/mmc_test.c498
-rw-r--r--drivers/mmc/card/queue.c217
-rw-r--r--drivers/mmc/card/queue.h33
-rw-r--r--drivers/mmc/core/core.c197
-rw-r--r--drivers/mmc/core/sd.c68
-rw-r--r--drivers/mmc/core/sdio_bus.c8
-rw-r--r--drivers/mmc/host/Kconfig84
-rw-r--r--drivers/mmc/host/Makefile25
-rw-r--r--drivers/mmc/host/at91_mci.c3
-rw-r--r--drivers/mmc/host/at91_mci.h (renamed from arch/arm/mach-at91/include/mach/at91_mci.h)2
-rw-r--r--drivers/mmc/host/atmel-mci.c63
-rw-r--r--drivers/mmc/host/dw_mmc.c446
-rw-r--r--drivers/mmc/host/dw_mmc.h17
-rw-r--r--drivers/mmc/host/mmci.c147
-rw-r--r--drivers/mmc/host/mmci.h8
-rw-r--r--drivers/mmc/host/mxs-mmc.c30
-rw-r--r--drivers/mmc/host/omap_hsmmc.c671
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c44
-rw-r--r--drivers/mmc/host/sdhci-dove.c43
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c125
-rw-r--r--drivers/mmc/host/sdhci-of-core.c253
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c86
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c67
-rw-r--r--drivers/mmc/host/sdhci-of.h42
-rw-r--r--drivers/mmc/host/sdhci-pci.c54
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c216
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h90
-rw-r--r--drivers/mmc/host/sdhci-pxa.c303
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c244
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c289
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c117
-rw-r--r--drivers/mmc/host/sdhci.c34
-rw-r--r--drivers/mmc/host/sh_mmcif.c27
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c36
-rw-r--r--drivers/mmc/host/tmio_mmc.h53
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c7
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c205
-rw-r--r--drivers/mtd/ubi/build.c28
-rw-r--r--drivers/mtd/ubi/debug.c269
-rw-r--r--drivers/mtd/ubi/debug.h113
-rw-r--r--drivers/mtd/ubi/io.c20
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/ubi.h8
-rw-r--r--drivers/mtd/ubi/vmt.c2
-rw-r--r--drivers/mtd/ubi/vtbl.c18
-rw-r--r--drivers/mtd/ubi/wl.c42
-rw-r--r--drivers/net/bna/bfa_cee.c65
-rw-r--r--drivers/net/bna/bfa_cee.h3
-rw-r--r--drivers/net/bna/bfa_cs.h (renamed from drivers/net/bna/bfa_sm.h)78
-rw-r--r--drivers/net/bna/bfa_defs.h5
-rw-r--r--drivers/net/bna/bfa_defs_mfg_comm.h20
-rw-r--r--drivers/net/bna/bfa_defs_status.h134
-rw-r--r--drivers/net/bna/bfa_ioc.c157
-rw-r--r--drivers/net/bna/bfa_ioc.h51
-rw-r--r--drivers/net/bna/bfa_wc.h69
-rw-r--r--drivers/net/bna/bfi.h20
-rw-r--r--drivers/net/bna/bna.h18
-rw-r--r--drivers/net/bna/bna_ctrl.c45
-rw-r--r--drivers/net/bna/bna_hw.h92
-rw-r--r--drivers/net/bna/bna_txrx.c44
-rw-r--r--drivers/net/bna/bna_types.h58
-rw-r--r--drivers/net/bna/bnad.c64
-rw-r--r--drivers/net/bna/bnad.h27
-rw-r--r--drivers/net/bna/bnad_ethtool.c2
-rw-r--r--drivers/net/bna/cna.h2
-rw-r--r--drivers/net/mlx4/en_ethtool.c9
-rw-r--r--drivers/net/mlx4/en_main.c3
-rw-r--r--drivers/net/mlx4/en_netdev.c5
-rw-r--r--drivers/net/mlx4/en_port.c6
-rw-r--r--drivers/net/mlx4/en_selftest.c3
-rw-r--r--drivers/net/mlx4/fw.c39
-rw-r--r--drivers/net/mlx4/fw.h8
-rw-r--r--drivers/net/mlx4/main.c58
-rw-r--r--drivers/net/mlx4/mcg.c17
-rw-r--r--drivers/net/mlx4/mlx4.h5
-rw-r--r--drivers/net/mlx4/port.c8
-rw-r--r--drivers/net/r8169.c7
-rw-r--r--drivers/net/wan/sbni.c5
-rw-r--r--drivers/of/Kconfig8
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/address.c18
-rw-r--r--drivers/of/base.c65
-rw-r--r--drivers/of/gpio.c11
-rw-r--r--drivers/of/of_pci.c112
-rw-r--r--drivers/of/of_pci_irq.c92
-rw-r--r--drivers/of/platform.c196
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c2
-rw-r--r--drivers/pci/of.c61
-rw-r--r--drivers/pci/pci-driver.c18
-rw-r--r--drivers/pci/probe.c7
-rw-r--r--drivers/pci/quirks.c23
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/cio/css.c8
-rw-r--r--drivers/scsi/scsi_pm.c8
-rw-r--r--drivers/spi/Kconfig48
-rw-r--r--drivers/spi/Makefile111
-rw-r--r--drivers/spi/atmel_spi.h167
-rw-r--r--drivers/spi/spi-altera.c (renamed from drivers/spi/spi_altera.c)0
-rw-r--r--drivers/spi/spi-ath79.c (renamed from drivers/spi/ath79_spi.c)2
-rw-r--r--drivers/spi/spi-atmel.c (renamed from drivers/spi/atmel_spi.c)155
-rw-r--r--drivers/spi/spi-au1550.c (renamed from drivers/spi/au1550_spi.c)2
-rw-r--r--drivers/spi/spi-bfin-sport.c (renamed from drivers/spi/spi_bfin_sport.c)0
-rw-r--r--drivers/spi/spi-bfin5xx.c (renamed from drivers/spi/spi_bfin5xx.c)218
-rw-r--r--drivers/spi/spi-bitbang-txrx.h (renamed from drivers/spi/spi_bitbang_txrx.h)0
-rw-r--r--drivers/spi/spi-bitbang.c (renamed from drivers/spi/spi_bitbang.c)8
-rw-r--r--drivers/spi/spi-butterfly.c (renamed from drivers/spi/spi_butterfly.c)4
-rw-r--r--drivers/spi/spi-coldfire-qspi.c (renamed from drivers/spi/coldfire_qspi.c)0
-rw-r--r--drivers/spi/spi-davinci.c (renamed from drivers/spi/davinci_spi.c)0
-rw-r--r--drivers/spi/spi-dw-mid.c (renamed from drivers/spi/dw_spi_mid.c)4
-rw-r--r--drivers/spi/spi-dw-mmio.c (renamed from drivers/spi/dw_spi_mmio.c)4
-rw-r--r--drivers/spi/spi-dw-pci.c (renamed from drivers/spi/dw_spi_pci.c)4
-rw-r--r--drivers/spi/spi-dw.c (renamed from drivers/spi/dw_spi.c)8
-rw-r--r--drivers/spi/spi-dw.h (renamed from drivers/spi/dw_spi.h)1
-rw-r--r--drivers/spi/spi-ep93xx.c (renamed from drivers/spi/ep93xx_spi.c)303
-rw-r--r--drivers/spi/spi-fsl-espi.c (renamed from drivers/spi/spi_fsl_espi.c)2
-rw-r--r--drivers/spi/spi-fsl-lib.c (renamed from drivers/spi/spi_fsl_lib.c)2
-rw-r--r--drivers/spi/spi-fsl-lib.h (renamed from drivers/spi/spi_fsl_lib.h)0
-rw-r--r--drivers/spi/spi-fsl-spi.c (renamed from drivers/spi/spi_fsl_spi.c)30
-rw-r--r--drivers/spi/spi-gpio.c (renamed from drivers/spi/spi_gpio.c)6
-rw-r--r--drivers/spi/spi-imx.c (renamed from drivers/spi/spi_imx.c)466
-rw-r--r--drivers/spi/spi-lm70llp.c (renamed from drivers/spi/spi_lm70llp.c)4
-rw-r--r--drivers/spi/spi-mpc512x-psc.c (renamed from drivers/spi/mpc512x_psc_spi.c)0
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c (renamed from drivers/spi/mpc52xx_psc_spi.c)0
-rw-r--r--drivers/spi/spi-mpc52xx.c (renamed from drivers/spi/mpc52xx_spi.c)0
-rw-r--r--drivers/spi/spi-nuc900.c (renamed from drivers/spi/spi_nuc900.c)5
-rw-r--r--drivers/spi/spi-oc-tiny.c (renamed from drivers/spi/spi_oc_tiny.c)0
-rw-r--r--drivers/spi/spi-omap-100k.c (renamed from drivers/spi/omap_spi_100k.c)0
-rw-r--r--drivers/spi/spi-omap-uwire.c (renamed from drivers/spi/omap_uwire.c)2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c (renamed from drivers/spi/omap2_mcspi.c)10
-rw-r--r--drivers/spi/spi-orion.c (renamed from drivers/spi/orion_spi.c)8
-rw-r--r--drivers/spi/spi-pl022.c (renamed from drivers/spi/amba-pl022.c)111
-rw-r--r--drivers/spi/spi-ppc4xx.c (renamed from drivers/spi/spi_ppc4xx.c)2
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c (renamed from drivers/spi/pxa2xx_spi_pci.c)0
-rw-r--r--drivers/spi/spi-pxa2xx.c (renamed from drivers/spi/pxa2xx_spi.c)0
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.S (renamed from drivers/spi/spi_s3c24xx_fiq.S)2
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.h (renamed from drivers/spi/spi_s3c24xx_fiq.h)0
-rw-r--r--drivers/spi/spi-s3c24xx.c (renamed from drivers/spi/spi_s3c24xx.c)5
-rw-r--r--drivers/spi/spi-s3c64xx.c (renamed from drivers/spi/spi_s3c64xx.c)3
-rw-r--r--drivers/spi/spi-sh-msiof.c (renamed from drivers/spi/spi_sh_msiof.c)0
-rw-r--r--drivers/spi/spi-sh-sci.c (renamed from drivers/spi/spi_sh_sci.c)2
-rw-r--r--drivers/spi/spi-sh.c (renamed from drivers/spi/spi_sh.c)0
-rw-r--r--drivers/spi/spi-stmp.c (renamed from drivers/spi/spi_stmp.c)0
-rw-r--r--drivers/spi/spi-tegra.c (renamed from drivers/spi/spi_tegra.c)20
-rw-r--r--drivers/spi/spi-ti-ssp.c (renamed from drivers/spi/ti-ssp-spi.c)0
-rw-r--r--drivers/spi/spi-tle62x0.c (renamed from drivers/spi/tle62x0.c)2
-rw-r--r--drivers/spi/spi-topcliff-pch.c (renamed from drivers/spi/spi_topcliff_pch.c)1158
-rw-r--r--drivers/spi/spi-txx9.c (renamed from drivers/spi/spi_txx9.c)2
-rw-r--r--drivers/spi/spi-xilinx.c (renamed from drivers/spi/xilinx_spi.c)0
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/spi/spi_s3c24xx_gpio.c201
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/target/loopback/Kconfig6
-rw-r--r--drivers/target/loopback/tcm_loop.c207
-rw-r--r--drivers/target/loopback/tcm_loop.h6
-rw-r--r--drivers/target/target_core_alua.c426
-rw-r--r--drivers/target/target_core_cdb.c457
-rw-r--r--drivers/target/target_core_configfs.c667
-rw-r--r--drivers/target/target_core_device.c819
-rw-r--r--drivers/target/target_core_fabric_configfs.c122
-rw-r--r--drivers/target/target_core_fabric_lib.c27
-rw-r--r--drivers/target/target_core_file.c149
-rw-r--r--drivers/target/target_core_file.h4
-rw-r--r--drivers/target/target_core_hba.c37
-rw-r--r--drivers/target/target_core_iblock.c199
-rw-r--r--drivers/target/target_core_iblock.h9
-rw-r--r--drivers/target/target_core_pr.c862
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c316
-rw-r--r--drivers/target/target_core_pscsi.h4
-rw-r--r--drivers/target/target_core_rd.c483
-rw-r--r--drivers/target/target_core_rd.h4
-rw-r--r--drivers/target/target_core_scdb.c20
-rw-r--r--drivers/target/target_core_scdb.h10
-rw-r--r--drivers/target/target_core_stat.c112
-rw-r--r--drivers/target/target_core_tmr.c185
-rw-r--r--drivers/target/target_core_tpg.c206
-rw-r--r--drivers/target/target_core_transport.c3626
-rw-r--r--drivers/target/target_core_ua.c62
-rw-r--r--drivers/target/tcm_fc/Makefile17
-rw-r--r--drivers/target/tcm_fc/tcm_fc.h25
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c114
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c47
-rw-r--r--drivers/target/tcm_fc/tfc_io.c130
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c18
-rw-r--r--drivers/tty/serial/of_serial.c43
-rw-r--r--drivers/xen/Kconfig46
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/events.c7
-rw-r--r--drivers/xen/tmem.c170
-rw-r--r--drivers/xen/xen-balloon.c2
-rw-r--r--drivers/xen/xen-pciback/Makefile7
-rw-r--r--drivers/xen/xen-pciback/conf_space.c438
-rw-r--r--drivers/xen/xen-pciback/conf_space.h126
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c207
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c386
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.c140
-rw-r--r--drivers/xen/xen-pciback/conf_space_quirks.h33
-rw-r--r--drivers/xen/xen-pciback/passthrough.c194
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c1376
-rw-r--r--drivers/xen/xen-pciback/pciback.h183
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c384
-rw-r--r--drivers/xen/xen-pciback/vpci.c259
-rw-r--r--drivers/xen/xen-pciback/xenbus.c749
-rw-r--r--drivers/xen/xen-selfballoon.c485
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c44
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h2
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c9
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c6
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/dlm/ast.c265
-rw-r--r--fs/dlm/ast.h15
-rw-r--r--fs/dlm/config.c75
-rw-r--r--fs/dlm/config.h2
-rw-r--r--fs/dlm/dlm_internal.h29
-rw-r--r--fs/dlm/lock.c225
-rw-r--r--fs/dlm/lockspace.c177
-rw-r--r--fs/dlm/lowcomms.c9
-rw-r--r--fs/dlm/memory.c22
-rw-r--r--fs/dlm/memory.h2
-rw-r--r--fs/dlm/recoverd.c12
-rw-r--r--fs/dlm/user.c12
-rw-r--r--fs/exec.c27
-rw-r--r--fs/fscache/page.c14
-rw-r--r--fs/gfs2/bmap.c12
-rw-r--r--fs/gfs2/dir.c221
-rw-r--r--fs/gfs2/dir.h1
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/glock.c39
-rw-r--r--fs/gfs2/glock.h6
-rw-r--r--fs/gfs2/glops.c7
-rw-r--r--fs/gfs2/incore.h3
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/ops_fstype.c1
-rw-r--r--fs/gfs2/rgrp.c52
-rw-r--r--fs/gfs2/rgrp.h4
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/hfsplus/brec.c4
-rw-r--r--fs/hfsplus/catalog.c14
-rw-r--r--fs/hfsplus/dir.c8
-rw-r--r--fs/hfsplus/extents.c50
-rw-r--r--fs/hfsplus/hfsplus_fs.h18
-rw-r--r--fs/hfsplus/inode.c12
-rw-r--r--fs/hfsplus/part_tbl.c32
-rw-r--r--fs/hfsplus/super.c43
-rw-r--r--fs/hfsplus/unicode.c35
-rw-r--r--fs/hfsplus/wrapper.c92
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/ubifs/commit.c8
-rw-r--r--fs/ubifs/debug.c762
-rw-r--r--fs/ubifs/debug.h241
-rw-r--r--fs/ubifs/dir.c16
-rw-r--r--fs/ubifs/file.c2
-rw-r--r--fs/ubifs/io.c168
-rw-r--r--fs/ubifs/log.c6
-rw-r--r--fs/ubifs/lprops.c8
-rw-r--r--fs/ubifs/lpt.c37
-rw-r--r--fs/ubifs/lpt_commit.c40
-rw-r--r--fs/ubifs/misc.h103
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/recovery.c43
-rw-r--r--fs/ubifs/replay.c3
-rw-r--r--fs/ubifs/sb.c6
-rw-r--r--fs/ubifs/scan.c4
-rw-r--r--fs/ubifs/super.c6
-rw-r--r--fs/ubifs/tnc.c26
-rw-r--r--fs/ubifs/tnc_commit.c145
-rw-r--r--fs/ubifs/ubifs.h21
-rw-r--r--fs/xfs/Makefile2
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c20
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c79
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h64
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c433
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h7
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c36
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c10
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h8
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h60
-rw-r--r--fs/xfs/quota/xfs_dquot.c48
-rw-r--r--fs/xfs/quota/xfs_dquot.h6
-rw-r--r--fs/xfs/quota/xfs_qm.c49
-rw-r--r--fs/xfs/quota/xfs_qm.h6
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c355
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c15
-rw-r--r--fs/xfs/xfs.h1
-rw-r--r--fs/xfs/xfs_alloc.c14
-rw-r--r--fs/xfs/xfs_alloc_btree.c84
-rw-r--r--fs/xfs/xfs_arch.h136
-rw-r--r--fs/xfs/xfs_attr.c41
-rw-r--r--fs/xfs/xfs_attr_leaf.c60
-rw-r--r--fs/xfs/xfs_bmap.c41
-rw-r--r--fs/xfs/xfs_bmap_btree.c106
-rw-r--r--fs/xfs/xfs_btree.c29
-rw-r--r--fs/xfs/xfs_btree.h38
-rw-r--r--fs/xfs/xfs_btree_trace.c249
-rw-r--r--fs/xfs/xfs_btree_trace.h99
-rw-r--r--fs/xfs/xfs_buf_item.c75
-rw-r--r--fs/xfs/xfs_da_btree.c272
-rw-r--r--fs/xfs/xfs_da_btree.h13
-rw-r--r--fs/xfs/xfs_dir2.c140
-rw-r--r--fs/xfs/xfs_dir2.h54
-rw-r--r--fs/xfs/xfs_dir2_block.c253
-rw-r--r--fs/xfs/xfs_dir2_block.h92
-rw-r--r--fs/xfs/xfs_dir2_data.c327
-rw-r--r--fs/xfs/xfs_dir2_data.h184
-rw-r--r--fs/xfs/xfs_dir2_format.h597
-rw-r--r--fs/xfs/xfs_dir2_leaf.c417
-rw-r--r--fs/xfs/xfs_dir2_leaf.h253
-rw-r--r--fs/xfs/xfs_dir2_node.c201
-rw-r--r--fs/xfs/xfs_dir2_node.h100
-rw-r--r--fs/xfs/xfs_dir2_priv.h135
-rw-r--r--fs/xfs/xfs_dir2_sf.c338
-rw-r--r--fs/xfs/xfs_dir2_sf.h171
-rw-r--r--fs/xfs/xfs_fs.h5
-rw-r--r--fs/xfs/xfs_ialloc.c14
-rw-r--r--fs/xfs/xfs_ialloc_btree.c75
-rw-r--r--fs/xfs/xfs_iget.c1
-rw-r--r--fs/xfs/xfs_inode.c537
-rw-r--r--fs/xfs/xfs_inode.h25
-rw-r--r--fs/xfs/xfs_inode_item.c17
-rw-r--r--fs/xfs/xfs_inum.h11
-rw-r--r--fs/xfs/xfs_log.c64
-rw-r--r--fs/xfs/xfs_log_recover.c38
-rw-r--r--fs/xfs/xfs_mount.c71
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--fs/xfs/xfs_trans.c27
-rw-r--r--fs/xfs/xfs_trans_ail.c214
-rw-r--r--fs/xfs/xfs_trans_buf.c118
-rw-r--r--fs/xfs/xfs_trans_inode.c9
-rw-r--r--fs/xfs/xfs_trans_priv.h14
-rw-r--r--fs/xfs/xfs_vnodeops.c479
-rw-r--r--fs/xfs/xfs_vnodeops.h3
-rw-r--r--include/linux/cn_proc.h13
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/firewire-cdev.h78
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/lguest.h2
-rw-r--r--include/linux/mfd/tmio.h8
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h60
-rw-r--r--include/linux/mlx4/qp.h8
-rw-r--r--include/linux/mmc/boot.h6
-rw-r--r--include/linux/mmc/card.h2
-rw-r--r--include/linux/mmc/core.h9
-rw-r--r--include/linux/mmc/dw_mmc.h28
-rw-r--r--include/linux/mmc/host.h26
-rw-r--r--include/linux/mmc/ioctl.h2
-rw-r--r--include/linux/mmc/mmc.h17
-rw-r--r--include/linux/mmc/pm.h2
-rw-r--r--include/linux/mmc/sd.h7
-rw-r--r--include/linux/mmc/sdhci-pltfm.h35
-rw-r--r--include/linux/mmc/sdhci-spear.h6
-rw-r--r--include/linux/mmc/sdhci.h6
-rw-r--r--include/linux/mmc/sdio.h7
-rw-r--r--include/linux/mmc/sdio_func.h7
-rw-r--r--include/linux/mmc/sdio_ids.h6
-rw-r--r--include/linux/mmc/sh_mmcif.h6
-rw-r--r--include/linux/mmc/sh_mobile_sdhi.h6
-rw-r--r--include/linux/mmc/tmio.h8
-rw-r--r--include/linux/mtd/ubi.h14
-rw-r--r--include/linux/of.h29
-rw-r--r--include/linux/of_address.h5
-rw-r--r--include/linux/of_gpio.h42
-rw-r--r--include/linux/of_pci.h5
-rw-r--r--include/linux/of_platform.h40
-rw-r--r--include/linux/opp.h8
-rw-r--r--include/linux/pci.h28
-rw-r--r--include/linux/platform_data/pxa_sdhci.h60
-rw-r--r--include/linux/pm.h10
-rw-r--r--include/linux/pm_domain.h108
-rw-r--r--include/linux/pm_runtime.h38
-rw-r--r--include/linux/ptrace.h104
-rw-r--r--include/linux/sched.h69
-rw-r--r--include/linux/slab.h20
-rw-r--r--include/linux/slab_def.h52
-rw-r--r--include/linux/slob_def.h10
-rw-r--r--include/linux/slub_def.h23
-rw-r--r--include/linux/spi/74x164.h2
-rw-r--r--include/linux/spi/mcp23s08.h9
-rw-r--r--include/linux/suspend.h8
-rw-r--r--include/linux/tracehook.h385
-rw-r--r--include/linux/workqueue.h18
-rw-r--r--include/rdma/ib_pma.h156
-rw-r--r--include/rdma/ib_verbs.h3
-rw-r--r--include/target/target_core_base.h228
-rw-r--r--include/target/target_core_device.h6
-rw-r--r--include/target/target_core_fabric_ops.h9
-rw-r--r--include/target/target_core_transport.h43
-rw-r--r--include/xen/balloon.h10
-rw-r--r--include/xen/events.h2
-rw-r--r--include/xen/hvc-console.h4
-rw-r--r--include/xen/interface/xen.h39
-rw-r--r--include/xen/tmem.h5
-rw-r--r--include/xen/xenbus.h2
-rw-r--r--kernel/exit.c91
-rw-r--r--kernel/fork.c33
-rw-r--r--kernel/power/Kconfig8
-rw-r--r--kernel/power/main.c5
-rw-r--r--kernel/power/suspend.c20
-rw-r--r--kernel/ptrace.c197
-rw-r--r--kernel/rcutree_plugin.h53
-rw-r--r--kernel/sched.c233
-rw-r--r--kernel/sched_fair.c46
-rw-r--r--kernel/sched_features.h2
-rw-r--r--kernel/signal.c444
-rw-r--r--kernel/softirq.c12
-rw-r--r--kernel/workqueue.c81
-rw-r--r--mm/nommu.c3
-rw-r--r--mm/oom_kill.c3
-rw-r--r--mm/slab.c17
-rw-r--r--mm/slob.c6
-rw-r--r--mm/slub.c105
-rw-r--r--net/bridge/br_if.c2
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/bridge/br_private.h1
-rw-r--r--net/bridge/br_private_stp.h3
-rw-r--r--net/bridge/br_stp.c31
-rw-r--r--net/bridge/br_stp_bpdu.c15
-rw-r--r--net/bridge/br_stp_if.c3
-rw-r--r--net/bridge/br_stp_timer.c1
-rw-r--r--net/core/link_watch.c2
-rw-r--r--security/apparmor/domain.c2
-rw-r--r--security/selinux/hooks.c4
-rw-r--r--sound/soc/ep93xx/ep93xx-ac97.c4
-rw-r--r--sound/soc/ep93xx/ep93xx-i2s.c4
-rw-r--r--sound/soc/ep93xx/ep93xx-pcm.c137
748 files changed, 32145 insertions, 22484 deletions
diff --git a/Documentation/ABI/stable/firewire-cdev b/Documentation/ABI/stable/firewire-cdev
new file mode 100644
index 00000000000..16d03082736
--- /dev/null
+++ b/Documentation/ABI/stable/firewire-cdev
@@ -0,0 +1,103 @@
+What: /dev/fw[0-9]+
+Date: May 2007
+KernelVersion: 2.6.22
+Contact: linux1394-devel@lists.sourceforge.net
+Description:
+ The character device files /dev/fw* are the interface between
+ firewire-core and IEEE 1394 device drivers implemented in
+ userspace. The ioctl(2)- and read(2)-based ABI is defined and
+ documented in <linux/firewire-cdev.h>.
+
+ This ABI offers most of the features which firewire-core also
+ exposes to kernelspace IEEE 1394 drivers.
+
+ Each /dev/fw* is associated with one IEEE 1394 node, which can
+ be remote or local nodes. Operations on a /dev/fw* file have
+ different scope:
+ - The 1394 node which is associated with the file:
+ - Asynchronous request transmission
+ - Get the Configuration ROM
+ - Query node ID
+ - Query maximum speed of the path between this node
+ and local node
+ - The 1394 bus (i.e. "card") to which the node is attached to:
+ - Isochronous stream transmission and reception
+ - Asynchronous stream transmission and reception
+ - Asynchronous broadcast request transmission
+ - PHY packet transmission and reception
+ - Allocate, reallocate, deallocate isochronous
+ resources (channels, bandwidth) at the bus's IRM
+ - Query node IDs of local node, root node, IRM, bus
+ manager
+ - Query cycle time
+ - Bus reset initiation, bus reset event reception
+ - All 1394 buses:
+ - Allocation of IEEE 1212 address ranges on the local
+ link layers, reception of inbound requests to such
+ an address range, asynchronous response transmission
+ to inbound requests
+ - Addition of descriptors or directories to the local
+ nodes' Configuration ROM
+
+ Due to the different scope of operations and in order to let
+ userland implement different access permission models, some
+ operations are restricted to /dev/fw* files that are associated
+ with a local node:
+ - Addition of descriptors or directories to the local
+ nodes' Configuration ROM
+ - PHY packet transmission and reception
+
+ A /dev/fw* file remains associated with one particular node
+ during its entire life time. Bus topology changes, and hence
+ node ID changes, are tracked by firewire-core. ABI users do not
+ need to be aware of topology.
+
+ The following file operations are supported:
+
+ open(2)
+ Currently the only useful flags are O_RDWR.
+
+ ioctl(2)
+ Initiate various actions. Some take immediate effect, others
+ are performed asynchronously while or after the ioctl returns.
+ See the inline documentation in <linux/firewire-cdev.h> for
+ descriptions of all ioctls.
+
+ poll(2), select(2), epoll_wait(2) etc.
+ Watch for events to become available to be read.
+
+ read(2)
+ Receive various events. There are solicited events like
+ outbound asynchronous transaction completion or isochronous
+ buffer completion, and unsolicited events such as bus resets,
+ request reception, or PHY packet reception. Always use a read
+ buffer which is large enough to receive the largest event that
+ could ever arrive. See <linux/firewire-cdev.h> for descriptions
+ of all event types and for which ioctls affect reception of
+ events.
+
+ mmap(2)
+ Allocate a DMA buffer for isochronous reception or transmission
+ and map it into the process address space. The arguments should
+ be used as follows: addr = NULL, length = the desired buffer
+ size, i.e. number of packets times size of largest packet,
+ prot = at least PROT_READ for reception and at least PROT_WRITE
+ for transmission, flags = MAP_SHARED, fd = the handle to the
+ /dev/fw*, offset = 0.
+
+ Isochronous reception works in packet-per-buffer fashion except
+ for multichannel reception which works in buffer-fill mode.
+
+ munmap(2)
+ Unmap the isochronous I/O buffer from the process address space.
+
+ close(2)
+ Besides stopping and freeing I/O contexts that were associated
+ with the file descriptor, back out any changes to the local
+ nodes' Configuration ROM. Deallocate isochronous channels and
+ bandwidth at the IRM that were marked for kernel-assisted
+ re- and deallocation.
+
+Users: libraw1394
+ libdc1394
+ tools like jujuutils, fwhack, ...
diff --git a/Documentation/ABI/stable/sysfs-bus-firewire b/Documentation/ABI/stable/sysfs-bus-firewire
new file mode 100644
index 00000000000..3d484e5dc84
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-bus-firewire
@@ -0,0 +1,122 @@
+What: /sys/bus/firewire/devices/fw[0-9]+/
+Date: May 2007
+KernelVersion: 2.6.22
+Contact: linux1394-devel@lists.sourceforge.net
+Description:
+ IEEE 1394 node device attributes.
+ Read-only. Mutable during the node device's lifetime.
+ See IEEE 1212 for semantic definitions.
+
+ config_rom
+ Contents of the Configuration ROM register.
+ Binary attribute; an array of host-endian u32.
+
+ guid
+ The node's EUI-64 in the bus information block of
+ Configuration ROM.
+ Hexadecimal string representation of an u64.
+
+
+What: /sys/bus/firewire/devices/fw[0-9]+/units
+Date: June 2009
+KernelVersion: 2.6.31
+Contact: linux1394-devel@lists.sourceforge.net
+Description:
+ IEEE 1394 node device attribute.
+ Read-only. Mutable during the node device's lifetime.
+ See IEEE 1212 for semantic definitions.
+
+ units
+ Summary of all units present in an IEEE 1394 node.
+ Contains space-separated tuples of specifier_id and
+ version of each unit present in the node. Specifier_id
+ and version are hexadecimal string representations of
+ u24 of the respective unit directory entries.
+ Specifier_id and version within each tuple are separated
+ by a colon.
+
+Users: udev rules to set ownership and access permissions or ACLs of
+ /dev/fw[0-9]+ character device files
+
+
+What: /sys/bus/firewire/devices/fw[0-9]+[.][0-9]+/
+Date: May 2007
+KernelVersion: 2.6.22
+Contact: linux1394-devel@lists.sourceforge.net
+Description:
+ IEEE 1394 unit device attributes.
+ Read-only. Immutable during the unit device's lifetime.
+ See IEEE 1212 for semantic definitions.
+
+ modalias
+ Same as MODALIAS in the uevent at device creation.
+
+ rom_index
+ Offset of the unit directory within the parent device's
+ (node device's) Configuration ROM, in quadlets.
+ Decimal string representation.
+
+
+What: /sys/bus/firewire/devices/*/
+Date: May 2007
+KernelVersion: 2.6.22
+Contact: linux1394-devel@lists.sourceforge.net
+Description:
+ Attributes common to IEEE 1394 node devices and unit devices.
+ Read-only. Mutable during the node device's lifetime.
+ Immutable during the unit device's lifetime.
+ See IEEE 1212 for semantic definitions.
+
+ These attributes are only created if the root directory of an
+ IEEE 1394 node or the unit directory of an IEEE 1394 unit
+ actually contains according entries.
+
+ hardware_version
+ Hexadecimal string representation of an u24.
+
+ hardware_version_name
+ Contents of a respective textual descriptor leaf.
+
+ model
+ Hexadecimal string representation of an u24.
+
+ model_name
+ Contents of a respective textual descriptor leaf.
+
+ specifier_id
+ Hexadecimal string representation of an u24.
+ Mandatory in unit directories according to IEEE 1212.
+
+ vendor
+ Hexadecimal string representation of an u24.
+ Mandatory in the root directory according to IEEE 1212.
+
+ vendor_name
+ Contents of a respective textual descriptor leaf.
+
+ version
+ Hexadecimal string representation of an u24.
+ Mandatory in unit directories according to IEEE 1212.
+
+
+What: /sys/bus/firewire/drivers/sbp2/fw*/host*/target*/*:*:*:*/ieee1394_id
+ formerly
+ /sys/bus/ieee1394/drivers/sbp2/fw*/host*/target*/*:*:*:*/ieee1394_id
+Date: Feb 2004
+KernelVersion: 2.6.4
+Contact: linux1394-devel@lists.sourceforge.net
+Description:
+ SCSI target port identifier and logical unit identifier of a
+ logical unit of an SBP-2 target. The identifiers are specified
+ in SAM-2...SAM-4 annex A. They are persistent and world-wide
+ unique properties the SBP-2 attached target.
+
+ Read-only attribute, immutable during the target's lifetime.
+ Format, as exposed by firewire-sbp2 since 2.6.22, May 2007:
+ Colon-separated hexadecimal string representations of
+ u64 EUI-64 : u24 directory_ID : u16 LUN
+ without 0x prefixes, without whitespace. The former sbp2 driver
+ (removed in 2.6.37 after being superseded by firewire-sbp2) used
+ a somewhat shorter format which was not as close to SAM.
+
+Users: udev rules to create /dev/disk/by-id/ symlinks
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus b/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
index c1b53b8bc2a..65e6e5dd67e 100644
--- a/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
+++ b/Documentation/ABI/testing/sysfs-driver-hid-roccat-koneplus
@@ -92,6 +92,14 @@ Description: The mouse has a tracking- and a distance-control-unit. These
This file is writeonly.
Users: http://roccat.sourceforge.net
+What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/talk
+Date: May 2011
+Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
+Description: Used to active some easy* functions of the mouse from outside.
+ The data has to be 16 bytes long.
+ This file is writeonly.
+Users: http://roccat.sourceforge.net
+
What: /sys/bus/usb/devices/<busnum>-<devnum>:<config num>.<interface num>/<hid-bus>:<vendor-id>:<product-id>.<num>/koneplus/roccatkoneplus<minor>/tcu
Date: October 2010
Contact: Stefan Achatz <erazor_de@users.sourceforge.net>
diff --git a/Documentation/ABI/testing/sysfs-driver-hid-wiimote b/Documentation/ABI/testing/sysfs-driver-hid-wiimote
new file mode 100644
index 00000000000..5d5a16ea57c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-driver-hid-wiimote
@@ -0,0 +1,10 @@
+What: /sys/bus/hid/drivers/wiimote/<dev>/led1
+What: /sys/bus/hid/drivers/wiimote/<dev>/led2
+What: /sys/bus/hid/drivers/wiimote/<dev>/led3
+What: /sys/bus/hid/drivers/wiimote/<dev>/led4
+Date: July 2011
+KernelVersion: 3.1
+Contact: David Herrmann <dh.herrmann@googlemail.com>
+Description: Make it possible to set/get current led state. Reading from it
+ returns 0 if led is off and 1 if it is on. Writing 0 to it
+ disables the led, writing 1 enables it.
diff --git a/Documentation/devicetree/bindings/arm/primecell.txt b/Documentation/devicetree/bindings/arm/primecell.txt
new file mode 100644
index 00000000000..1d5d7a870ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/primecell.txt
@@ -0,0 +1,21 @@
+* ARM Primecell Peripherals
+
+ARM, Ltd. Primecell peripherals have a standard id register that can be used to
+identify the peripheral type, vendor, and revision. This value can be used for
+driver matching.
+
+Required properties:
+
+- compatible : should be a specific value for peripheral and "arm,primecell"
+
+Optional properties:
+
+- arm,primecell-periphid : Value to override the h/w value with
+
+Example:
+
+serial@fff36000 {
+ compatible = "arm,pl011", "arm,primecell";
+ arm,primecell-periphid = <0x00341011>;
+};
+
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/sec.txt b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
index 2b6f2d45c45..38988ef1336 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/sec.txt
+++ b/Documentation/devicetree/bindings/crypto/fsl-sec2.txt
@@ -1,4 +1,4 @@
-Freescale SoC SEC Security Engines
+Freescale SoC SEC Security Engines versions 2.x-3.x
Required properties:
diff --git a/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
new file mode 100644
index 00000000000..4363ae4b3c1
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/fsl-imx-gpio.txt
@@ -0,0 +1,22 @@
+* Freescale i.MX/MXC GPIO controller
+
+Required properties:
+- compatible : Should be "fsl,<soc>-gpio"
+- reg : Address and length of the register set for the device
+- interrupts : Should be the port interrupt shared by all 32 pins, if
+ one number. If two numbers, the first one is the interrupt shared
+ by low 16 pins and the second one is for high 16 pins.
+- gpio-controller : Marks the device node as a gpio controller.
+- #gpio-cells : Should be two. The first cell is the pin number and
+ the second cell is used to specify optional parameters (currently
+ unused).
+
+Example:
+
+gpio0: gpio@73f84000 {
+ compatible = "fsl,imx51-gpio", "fsl,imx31-gpio";
+ reg = <0x73f84000 0x4000>;
+ interrupts = <50 51>;
+ gpio-controller;
+ #gpio-cells = <2>;
+};
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index edaa84d288a..4e16ba4feab 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -4,17 +4,45 @@ Specifying GPIO information for devices
1) gpios property
-----------------
-Nodes that makes use of GPIOs should define them using `gpios' property,
-format of which is: <&gpio-controller1-phandle gpio1-specifier
- &gpio-controller2-phandle gpio2-specifier
- 0 /* holes are permitted, means no GPIO 3 */
- &gpio-controller4-phandle gpio4-specifier
- ...>;
+Nodes that makes use of GPIOs should specify them using one or more
+properties, each containing a 'gpio-list':
-Note that gpio-specifier length is controller dependent.
+ gpio-list ::= <single-gpio> [gpio-list]
+ single-gpio ::= <gpio-phandle> <gpio-specifier>
+ gpio-phandle : phandle to gpio controller node
+ gpio-specifier : Array of #gpio-cells specifying specific gpio
+ (controller specific)
+
+GPIO properties should be named "[<name>-]gpios". Exact
+meaning of each gpios property must be documented in the device tree
+binding for each device.
+
+For example, the following could be used to describe gpios pins to use
+as chip select lines; with chip selects 0, 1 and 3 populated, and chip
+select 2 left empty:
+
+ gpio1: gpio1 {
+ gpio-controller
+ #gpio-cells = <2>;
+ };
+ gpio2: gpio2 {
+ gpio-controller
+ #gpio-cells = <1>;
+ };
+ [...]
+ chipsel-gpios = <&gpio1 12 0>,
+ <&gpio1 13 0>,
+ <0>, /* holes are permitted, means no GPIO 2 */
+ <&gpio2 2>;
+
+Note that gpio-specifier length is controller dependent. In the
+above example, &gpio1 uses 2 cells to specify a gpio, while &gpio2
+only uses one.
gpio-specifier may encode: bank, pin position inside the bank,
whether pin is open-drain and whether pin is logically inverted.
+Exact meaning of each specifier cell is controller specific, and must
+be documented in the device tree binding for the device.
Example of the node using GPIOs:
@@ -28,8 +56,8 @@ and empty GPIO flags as accepted by the "qe_pio_e" gpio-controller.
2) gpio-controller nodes
------------------------
-Every GPIO controller node must have #gpio-cells property defined,
-this information will be used to translate gpio-specifiers.
+Every GPIO controller node must both an empty "gpio-controller"
+property, and have #gpio-cells contain the size of the gpio-specifier.
Example of two SOC GPIO banks defined as gpio-controller nodes:
diff --git a/Documentation/devicetree/bindings/gpio/gpio_nvidia.txt b/Documentation/devicetree/bindings/gpio/gpio_nvidia.txt
new file mode 100644
index 00000000000..eb4b530d64e
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio_nvidia.txt
@@ -0,0 +1,8 @@
+NVIDIA Tegra 2 GPIO controller
+
+Required properties:
+- compatible : "nvidia,tegra20-gpio"
+- #gpio-cells : Should be two. The first cell is the pin number and the
+ second cell is used to specify optional parameters:
+ - bit 0 specifies polarity (0 for normal, 1 for inverted)
+- gpio-controller : Marks the device node as a GPIO controller.
diff --git a/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
new file mode 100644
index 00000000000..9841057d112
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/fsl-imx-cspi.txt
@@ -0,0 +1,22 @@
+* Freescale (Enhanced) Configurable Serial Peripheral Interface
+ (CSPI/eCSPI) for i.MX
+
+Required properties:
+- compatible : Should be "fsl,<soc>-cspi" or "fsl,<soc>-ecspi"
+- reg : Offset and length of the register set for the device
+- interrupts : Should contain CSPI/eCSPI interrupt
+- fsl,spi-num-chipselects : Contains the number of the chipselect
+- cs-gpios : Specifies the gpio pins to be used for chipselects.
+
+Example:
+
+ecspi@70010000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,imx51-ecspi";
+ reg = <0x70010000 0x4000>;
+ interrupts = <36>;
+ fsl,spi-num-chipselects = <2>;
+ cs-gpios = <&gpio3 24 0>, /* GPIO4_24 */
+ <&gpio3 25 0>; /* GPIO4_25 */
+};
diff --git a/Documentation/devicetree/bindings/spi/spi_nvidia.txt b/Documentation/devicetree/bindings/spi/spi_nvidia.txt
new file mode 100644
index 00000000000..6b9e5189669
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi_nvidia.txt
@@ -0,0 +1,5 @@
+NVIDIA Tegra 2 SPI device
+
+Required properties:
+- compatible : should be "nvidia,tegra20-spi".
+- gpios : should specify GPIOs used for chipselect.
diff --git a/Documentation/devicetree/bindings/tty/serial/of-serial.txt b/Documentation/devicetree/bindings/tty/serial/of-serial.txt
new file mode 100644
index 00000000000..b8b27b0aca1
--- /dev/null
+++ b/Documentation/devicetree/bindings/tty/serial/of-serial.txt
@@ -0,0 +1,36 @@
+* UART (Universal Asynchronous Receiver/Transmitter)
+
+Required properties:
+- compatible : one of:
+ - "ns8250"
+ - "ns16450"
+ - "ns16550a"
+ - "ns16550"
+ - "ns16750"
+ - "ns16850"
+ - "nvidia,tegra20-uart"
+ - "ibm,qpace-nwp-serial"
+ - "serial" if the port type is unknown.
+- reg : offset and length of the register set for the device.
+- interrupts : should contain uart interrupt.
+- clock-frequency : the input clock frequency for the UART.
+
+Optional properties:
+- current-speed : the current active speed of the UART.
+- reg-offset : offset to apply to the mapbase from the start of the registers.
+- reg-shift : quantity to shift the register offsets by.
+- reg-io-width : the size (in bytes) of the IO accesses that should be
+ performed on the device. There are some systems that require 32-bit
+ accesses to the UART (e.g. TI davinci).
+- used-by-rtas : set to indicate that the port is in use by the OpenFirmware
+ RTAS and should not be registered.
+
+Example:
+
+ uart@80230000 {
+ compatible = "ns8250";
+ reg = <0x80230000 0x100>;
+ clock-frequency = <3686400>;
+ interrupts = <10>;
+ reg-shift = <2>;
+ };
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index b1c921c2751..d59e71df5c5 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -501,16 +501,6 @@ Who: NeilBrown <neilb@suse.de>
----------------------------
-What: cancel_rearming_delayed_work[queue]()
-When: 2.6.39
-
-Why: The functions have been superceded by cancel_delayed_work_sync()
- quite some time ago. The conversion is trivial and there is no
- in-kernel user left.
-Who: Tejun Heo <tj@kernel.org>
-
-----------------------------
-
What: Legacy, non-standard chassis intrusion detection interface.
When: June 2011
Why: The adm9240, w83792d and w83793 hardware monitoring drivers have
diff --git a/Documentation/filesystems/ubifs.txt b/Documentation/filesystems/ubifs.txt
index 8e4fab639d9..a0a61d2f389 100644
--- a/Documentation/filesystems/ubifs.txt
+++ b/Documentation/filesystems/ubifs.txt
@@ -111,34 +111,6 @@ The following is an example of the kernel boot arguments to attach mtd0
to UBI and mount volume "rootfs":
ubi.mtd=0 root=ubi0:rootfs rootfstype=ubifs
-
-Module Parameters for Debugging
-===============================
-
-When UBIFS has been compiled with debugging enabled, there are 2 module
-parameters that are available to control aspects of testing and debugging.
-
-debug_chks Selects extra checks that UBIFS can do while running:
-
- Check Flag value
-
- General checks 1
- Check Tree Node Cache (TNC) 2
- Check indexing tree size 4
- Check orphan area 8
- Check old indexing tree 16
- Check LEB properties (lprops) 32
- Check leaf nodes and inodes 64
-
-debug_tsts Selects a mode of testing, as follows:
-
- Test mode Flag value
-
- Failure mode for recovery testing 4
-
-For example, set debug_chks to 3 to enable general and TNC checks.
-
-
References
==========
diff --git a/Documentation/mmc/00-INDEX b/Documentation/mmc/00-INDEX
index 93dd7a71407..a9ba6720ffd 100644
--- a/Documentation/mmc/00-INDEX
+++ b/Documentation/mmc/00-INDEX
@@ -4,3 +4,5 @@ mmc-dev-attrs.txt
- info on SD and MMC device attributes
mmc-dev-parts.txt
- info on SD and MMC device partitions
+mmc-async-req.txt
+ - info on mmc asynchronous requests
diff --git a/Documentation/mmc/mmc-async-req.txt b/Documentation/mmc/mmc-async-req.txt
new file mode 100644
index 00000000000..ae1907b10e4
--- /dev/null
+++ b/Documentation/mmc/mmc-async-req.txt
@@ -0,0 +1,87 @@
+Rationale
+=========
+
+How significant is the cache maintenance overhead?
+It depends. Fast eMMC and multiple cache levels with speculative cache
+pre-fetch makes the cache overhead relatively significant. If the DMA
+preparations for the next request are done in parallel with the current
+transfer, the DMA preparation overhead would not affect the MMC performance.
+The intention of non-blocking (asynchronous) MMC requests is to minimize the
+time between when an MMC request ends and another MMC request begins.
+Using mmc_wait_for_req(), the MMC controller is idle while dma_map_sg and
+dma_unmap_sg are processing. Using non-blocking MMC requests makes it
+possible to prepare the caches for next job in parallel with an active
+MMC request.
+
+MMC block driver
+================
+
+The mmc_blk_issue_rw_rq() in the MMC block driver is made non-blocking.
+The increase in throughput is proportional to the time it takes to
+prepare (major part of preparations are dma_map_sg() and dma_unmap_sg())
+a request and how fast the memory is. The faster the MMC/SD is the
+more significant the prepare request time becomes. Roughly the expected
+performance gain is 5% for large writes and 10% on large reads on a L2 cache
+platform. In power save mode, when clocks run on a lower frequency, the DMA
+preparation may cost even more. As long as these slower preparations are run
+in parallel with the transfer performance won't be affected.
+
+Details on measurements from IOZone and mmc_test
+================================================
+
+https://wiki.linaro.org/WorkingGroups/Kernel/Specs/StoragePerfMMC-async-req
+
+MMC core API extension
+======================
+
+There is one new public function mmc_start_req().
+It starts a new MMC command request for a host. The function isn't
+truly non-blocking. If there is an ongoing async request it waits
+for completion of that request and starts the new one and returns. It
+doesn't wait for the new request to complete. If there is no ongoing
+request it starts the new request and returns immediately.
+
+MMC host extensions
+===================
+
+There are two optional members in the mmc_host_ops -- pre_req() and
+post_req() -- that the host driver may implement in order to move work
+to before and after the actual mmc_host_ops.request() function is called.
+In the DMA case pre_req() may do dma_map_sg() and prepare the DMA
+descriptor, and post_req() runs the dma_unmap_sg().
+
+Optimize for the first request
+==============================
+
+The first request in a series of requests can't be prepared in parallel
+with the previous transfer, since there is no previous request.
+The argument is_first_req in pre_req() indicates that there is no previous
+request. The host driver may optimize for this scenario to minimize
+the performance loss. A way to optimize for this is to split the current
+request in two chunks, prepare the first chunk and start the request,
+and finally prepare the second chunk and start the transfer.
+
+Pseudocode to handle is_first_req scenario with minimal prepare overhead:
+
+if (is_first_req && req->size > threshold)
+ /* start MMC transfer for the complete transfer size */
+ mmc_start_command(MMC_CMD_TRANSFER_FULL_SIZE);
+
+ /*
+ * Begin to prepare DMA while cmd is being processed by MMC.
+ * The first chunk of the request should take the same time
+ * to prepare as the "MMC process command time".
+ * If prepare time exceeds MMC cmd time
+ * the transfer is delayed, guesstimate max 4k as first chunk size.
+ */
+ prepare_1st_chunk_for_dma(req);
+ /* flush pending desc to the DMAC (dmaengine.h) */
+ dma_issue_pending(req->dma_desc);
+
+ prepare_2nd_chunk_for_dma(req);
+ /*
+ * The second issue_pending should be called before MMC runs out
+ * of the first chunk. If the MMC runs out of the first data chunk
+ * before this call, the transfer is delayed.
+ */
+ dma_issue_pending(req->dma_desc);
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 64565aac6e4..3384d5996be 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -506,8 +506,8 @@ routines. Nevertheless, different callback pointers are used in case there is a
situation where it actually matters.
-Device Power Domains
---------------------
+Device Power Management Domains
+-------------------------------
Sometimes devices share reference clocks or other power resources. In those
cases it generally is not possible to put devices into low-power states
individually. Instead, a set of devices sharing a power resource can be put
@@ -516,8 +516,8 @@ power resource. Of course, they also need to be put into the full-power state
together, by turning the shared power resource on. A set of devices with this
property is often referred to as a power domain.
-Support for power domains is provided through the pwr_domain field of struct
-device. This field is a pointer to an object of type struct dev_power_domain,
+Support for power domains is provided through the pm_domain field of struct
+device. This field is a pointer to an object of type struct dev_pm_domain,
defined in include/linux/pm.h, providing a set of power management callbacks
analogous to the subsystem-level and device driver callbacks that are executed
for the given device during all power transitions, instead of the respective
@@ -604,7 +604,7 @@ state temporarily, for example so that its system wakeup capability can be
disabled. This all depends on the hardware and the design of the subsystem and
device driver in question.
-During system-wide resume from a sleep state it's best to put devices into the
-full-power state, as explained in Documentation/power/runtime_pm.txt. Refer to
-that document for more information regarding this particular issue as well as
+During system-wide resume from a sleep state it's easiest to put devices into
+the full-power state, as explained in Documentation/power/runtime_pm.txt. Refer
+to that document for more information regarding this particular issue as well as
for information on the device runtime power management framework in general.
diff --git a/Documentation/power/opp.txt b/Documentation/power/opp.txt
index 5ae70a12c1e..3035d00757a 100644
--- a/Documentation/power/opp.txt
+++ b/Documentation/power/opp.txt
@@ -321,6 +321,8 @@ opp_init_cpufreq_table - cpufreq framework typically is initialized with
addition to CONFIG_PM as power management feature is required to
dynamically scale voltage and frequency in a system.
+opp_free_cpufreq_table - Free up the table allocated by opp_init_cpufreq_table
+
7. Data Structures
==================
Typically an SoC contains multiple voltage domains which are variable. Each
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index b24875b1ced..14dd3c6ad97 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -1,39 +1,39 @@
-Run-time Power Management Framework for I/O Devices
+Runtime Power Management Framework for I/O Devices
(C) 2009-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
(C) 2010 Alan Stern <stern@rowland.harvard.edu>
1. Introduction
-Support for run-time power management (run-time PM) of I/O devices is provided
+Support for runtime power management (runtime PM) of I/O devices is provided
at the power management core (PM core) level by means of:
* The power management workqueue pm_wq in which bus types and device drivers can
put their PM-related work items. It is strongly recommended that pm_wq be
- used for queuing all work items related to run-time PM, because this allows
+ used for queuing all work items related to runtime PM, because this allows
them to be synchronized with system-wide power transitions (suspend to RAM,
hibernation and resume from system sleep states). pm_wq is declared in
include/linux/pm_runtime.h and defined in kernel/power/main.c.
-* A number of run-time PM fields in the 'power' member of 'struct device' (which
+* A number of runtime PM fields in the 'power' member of 'struct device' (which
is of the type 'struct dev_pm_info', defined in include/linux/pm.h) that can
- be used for synchronizing run-time PM operations with one another.
+ be used for synchronizing runtime PM operations with one another.
-* Three device run-time PM callbacks in 'struct dev_pm_ops' (defined in
+* Three device runtime PM callbacks in 'struct dev_pm_ops' (defined in
include/linux/pm.h).
* A set of helper functions defined in drivers/base/power/runtime.c that can be
- used for carrying out run-time PM operations in such a way that the
+ used for carrying out runtime PM operations in such a way that the
synchronization between them is taken care of by the PM core. Bus types and
device drivers are encouraged to use these functions.
-The run-time PM callbacks present in 'struct dev_pm_ops', the device run-time PM
+The runtime PM callbacks present in 'struct dev_pm_ops', the device runtime PM
fields of 'struct dev_pm_info' and the core helper functions provided for
-run-time PM are described below.
+runtime PM are described below.
-2. Device Run-time PM Callbacks
+2. Device Runtime PM Callbacks
-There are three device run-time PM callbacks defined in 'struct dev_pm_ops':
+There are three device runtime PM callbacks defined in 'struct dev_pm_ops':
struct dev_pm_ops {
...
@@ -72,11 +72,11 @@ knows what to do to handle the device).
not mean that the device has been put into a low power state. It is
supposed to mean, however, that the device will not process data and will
not communicate with the CPU(s) and RAM until the subsystem-level resume
- callback is executed for it. The run-time PM status of a device after
+ callback is executed for it. The runtime PM status of a device after
successful execution of the subsystem-level suspend callback is 'suspended'.
* If the subsystem-level suspend callback returns -EBUSY or -EAGAIN,
- the device's run-time PM status is 'active', which means that the device
+ the device's runtime PM status is 'active', which means that the device
_must_ be fully operational afterwards.
* If the subsystem-level suspend callback returns an error code different
@@ -104,7 +104,7 @@ the device).
* Once the subsystem-level resume callback has completed successfully, the PM
core regards the device as fully operational, which means that the device
- _must_ be able to complete I/O operations as needed. The run-time PM status
+ _must_ be able to complete I/O operations as needed. The runtime PM status
of the device is then 'active'.
* If the subsystem-level resume callback returns an error code, the PM core
@@ -130,7 +130,7 @@ device in that case. The value returned by this callback is ignored by the PM
core.
The helper functions provided by the PM core, described in Section 4, guarantee
-that the following constraints are met with respect to the bus type's run-time
+that the following constraints are met with respect to the bus type's runtime
PM callbacks:
(1) The callbacks are mutually exclusive (e.g. it is forbidden to execute
@@ -142,7 +142,7 @@ PM callbacks:
(2) ->runtime_idle() and ->runtime_suspend() can only be executed for 'active'
devices (i.e. the PM core will only execute ->runtime_idle() or
- ->runtime_suspend() for the devices the run-time PM status of which is
+ ->runtime_suspend() for the devices the runtime PM status of which is
'active').
(3) ->runtime_idle() and ->runtime_suspend() can only be executed for a device
@@ -151,7 +151,7 @@ PM callbacks:
flag of which is set.
(4) ->runtime_resume() can only be executed for 'suspended' devices (i.e. the
- PM core will only execute ->runtime_resume() for the devices the run-time
+ PM core will only execute ->runtime_resume() for the devices the runtime
PM status of which is 'suspended').
Additionally, the helper functions provided by the PM core obey the following
@@ -171,9 +171,9 @@ rules:
scheduled requests to execute the other callbacks for the same device,
except for scheduled autosuspends.
-3. Run-time PM Device Fields
+3. Runtime PM Device Fields
-The following device run-time PM fields are present in 'struct dev_pm_info', as
+The following device runtime PM fields are present in 'struct dev_pm_info', as
defined in include/linux/pm.h:
struct timer_list suspend_timer;
@@ -205,7 +205,7 @@ defined in include/linux/pm.h:
unsigned int disable_depth;
- used for disabling the helper funcions (they work normally if this is
- equal to zero); the initial value of it is 1 (i.e. run-time PM is
+ equal to zero); the initial value of it is 1 (i.e. runtime PM is
initially disabled for all devices)
unsigned int runtime_error;
@@ -229,10 +229,10 @@ defined in include/linux/pm.h:
suspend to complete; means "start a resume as soon as you've suspended"
unsigned int run_wake;
- - set if the device is capable of generating run-time wake-up events
+ - set if the device is capable of generating runtime wake-up events
enum rpm_status runtime_status;
- - the run-time PM status of the device; this field's initial value is
+ - the runtime PM status of the device; this field's initial value is
RPM_SUSPENDED, which means that each device is initially regarded by the
PM core as 'suspended', regardless of its real hardware status
@@ -243,7 +243,7 @@ defined in include/linux/pm.h:
and pm_runtime_forbid() helper functions
unsigned int no_callbacks;
- - indicates that the device does not use the run-time PM callbacks (see
+ - indicates that the device does not use the runtime PM callbacks (see
Section 8); it may be modified only by the pm_runtime_no_callbacks()
helper function
@@ -270,16 +270,16 @@ defined in include/linux/pm.h:
All of the above fields are members of the 'power' member of 'struct device'.
-4. Run-time PM Device Helper Functions
+4. Runtime PM Device Helper Functions
-The following run-time PM helper functions are defined in
+The following runtime PM helper functions are defined in
drivers/base/power/runtime.c and include/linux/pm_runtime.h:
void pm_runtime_init(struct device *dev);
- - initialize the device run-time PM fields in 'struct dev_pm_info'
+ - initialize the device runtime PM fields in 'struct dev_pm_info'
void pm_runtime_remove(struct device *dev);
- - make sure that the run-time PM of the device will be disabled after
+ - make sure that the runtime PM of the device will be disabled after
removing the device from device hierarchy
int pm_runtime_idle(struct device *dev);
@@ -289,9 +289,10 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
int pm_runtime_suspend(struct device *dev);
- execute the subsystem-level suspend callback for the device; returns 0 on
- success, 1 if the device's run-time PM status was already 'suspended', or
+ success, 1 if the device's runtime PM status was already 'suspended', or
error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt
- to suspend the device again in future
+ to suspend the device again in future and -EACCES means that
+ 'power.disable_depth' is different from 0
int pm_runtime_autosuspend(struct device *dev);
- same as pm_runtime_suspend() except that the autosuspend delay is taken
@@ -301,10 +302,11 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
int pm_runtime_resume(struct device *dev);
- execute the subsystem-level resume callback for the device; returns 0 on
- success, 1 if the device's run-time PM status was already 'active' or
+ success, 1 if the device's runtime PM status was already 'active' or
error code on failure, where -EAGAIN means it may be safe to attempt to
resume the device again in future, but 'power.runtime_error' should be
- checked additionally
+ checked additionally, and -EACCES means that 'power.disable_depth' is
+ different from 0
int pm_request_idle(struct device *dev);
- submit a request to execute the subsystem-level idle callback for the
@@ -321,7 +323,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
device in future, where 'delay' is the time to wait before queuing up a
suspend work item in pm_wq, in milliseconds (if 'delay' is zero, the work
item is queued up immediately); returns 0 on success, 1 if the device's PM
- run-time status was already 'suspended', or error code if the request
+ runtime status was already 'suspended', or error code if the request
hasn't been scheduled (or queued up if 'delay' is 0); if the execution of
->runtime_suspend() is already scheduled and not yet expired, the new
value of 'delay' will be used as the time to wait
@@ -329,7 +331,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
int pm_request_resume(struct device *dev);
- submit a request to execute the subsystem-level resume callback for the
device (the request is represented by a work item in pm_wq); returns 0 on
- success, 1 if the device's run-time PM status was already 'active', or
+ success, 1 if the device's runtime PM status was already 'active', or
error code if the request hasn't been queued up
void pm_runtime_get_noresume(struct device *dev);
@@ -367,22 +369,32 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
pm_runtime_autosuspend(dev) and return its result
void pm_runtime_enable(struct device *dev);
- - enable the run-time PM helper functions to run the device bus type's
- run-time PM callbacks described in Section 2
+ - decrement the device's 'power.disable_depth' field; if that field is equal
+ to zero, the runtime PM helper functions can execute subsystem-level
+ callbacks described in Section 2 for the device
int pm_runtime_disable(struct device *dev);
- - prevent the run-time PM helper functions from running subsystem-level
- run-time PM callbacks for the device, make sure that all of the pending
- run-time PM operations on the device are either completed or canceled;
+ - increment the device's 'power.disable_depth' field (if the value of that
+ field was previously zero, this prevents subsystem-level runtime PM
+ callbacks from being run for the device), make sure that all of the pending
+ runtime PM operations on the device are either completed or canceled;
returns 1 if there was a resume request pending and it was necessary to
execute the subsystem-level resume callback for the device to satisfy that
request, otherwise 0 is returned
+ int pm_runtime_barrier(struct device *dev);
+ - check if there's a resume request pending for the device and resume it
+ (synchronously) in that case, cancel any other pending runtime PM requests
+ regarding it and wait for all runtime PM operations on it in progress to
+ complete; returns 1 if there was a resume request pending and it was
+ necessary to execute the subsystem-level resume callback for the device to
+ satisfy that request, otherwise 0 is returned
+
void pm_suspend_ignore_children(struct device *dev, bool enable);
- set/unset the power.ignore_children flag of the device
int pm_runtime_set_active(struct device *dev);
- - clear the device's 'power.runtime_error' flag, set the device's run-time
+ - clear the device's 'power.runtime_error' flag, set the device's runtime
PM status to 'active' and update its parent's counter of 'active'
children as appropriate (it is only valid to use this function if
'power.runtime_error' is set or 'power.disable_depth' is greater than
@@ -390,7 +402,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
which is not active and the 'power.ignore_children' flag of which is unset
void pm_runtime_set_suspended(struct device *dev);
- - clear the device's 'power.runtime_error' flag, set the device's run-time
+ - clear the device's 'power.runtime_error' flag, set the device's runtime
PM status to 'suspended' and update its parent's counter of 'active'
children as appropriate (it is only valid to use this function if
'power.runtime_error' is set or 'power.disable_depth' is greater than
@@ -400,6 +412,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
- return true if the device's runtime PM status is 'suspended' and its
'power.disable_depth' field is equal to zero, or false otherwise
+ bool pm_runtime_status_suspended(struct device *dev);
+ - return true if the device's runtime PM status is 'suspended'
+
void pm_runtime_allow(struct device *dev);
- set the power.runtime_auto flag for the device and decrease its usage
counter (used by the /sys/devices/.../power/control interface to
@@ -411,7 +426,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
effectively prevent the device from being power managed at run time)
void pm_runtime_no_callbacks(struct device *dev);
- - set the power.no_callbacks flag for the device and remove the run-time
+ - set the power.no_callbacks flag for the device and remove the runtime
PM attributes from /sys/devices/.../power (or prevent them from being
added when the device is registered)
@@ -431,7 +446,7 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
- set the power.autosuspend_delay value to 'delay' (expressed in
- milliseconds); if 'delay' is negative then run-time suspends are
+ milliseconds); if 'delay' is negative then runtime suspends are
prevented
unsigned long pm_runtime_autosuspend_expiration(struct device *dev);
@@ -470,35 +485,35 @@ pm_runtime_resume()
pm_runtime_get_sync()
pm_runtime_put_sync_suspend()
-5. Run-time PM Initialization, Device Probing and Removal
+5. Runtime PM Initialization, Device Probing and Removal
-Initially, the run-time PM is disabled for all devices, which means that the
-majority of the run-time PM helper funtions described in Section 4 will return
+Initially, the runtime PM is disabled for all devices, which means that the
+majority of the runtime PM helper funtions described in Section 4 will return
-EAGAIN until pm_runtime_enable() is called for the device.
-In addition to that, the initial run-time PM status of all devices is
+In addition to that, the initial runtime PM status of all devices is
'suspended', but it need not reflect the actual physical state of the device.
Thus, if the device is initially active (i.e. it is able to process I/O), its
-run-time PM status must be changed to 'active', with the help of
+runtime PM status must be changed to 'active', with the help of
pm_runtime_set_active(), before pm_runtime_enable() is called for the device.
-However, if the device has a parent and the parent's run-time PM is enabled,
+However, if the device has a parent and the parent's runtime PM is enabled,
calling pm_runtime_set_active() for the device will affect the parent, unless
the parent's 'power.ignore_children' flag is set. Namely, in that case the
parent won't be able to suspend at run time, using the PM core's helper
functions, as long as the child's status is 'active', even if the child's
-run-time PM is still disabled (i.e. pm_runtime_enable() hasn't been called for
+runtime PM is still disabled (i.e. pm_runtime_enable() hasn't been called for
the child yet or pm_runtime_disable() has been called for it). For this reason,
once pm_runtime_set_active() has been called for the device, pm_runtime_enable()
-should be called for it too as soon as reasonably possible or its run-time PM
+should be called for it too as soon as reasonably possible or its runtime PM
status should be changed back to 'suspended' with the help of
pm_runtime_set_suspended().
-If the default initial run-time PM status of the device (i.e. 'suspended')
+If the default initial runtime PM status of the device (i.e. 'suspended')
reflects the actual state of the device, its bus type's or its driver's
->probe() callback will likely need to wake it up using one of the PM core's
helper functions described in Section 4. In that case, pm_runtime_resume()
-should be used. Of course, for this purpose the device's run-time PM has to be
+should be used. Of course, for this purpose the device's runtime PM has to be
enabled earlier by calling pm_runtime_enable().
If the device bus type's or driver's ->probe() callback runs
@@ -529,33 +544,33 @@ The user space can effectively disallow the driver of the device to power manage
it at run time by changing the value of its /sys/devices/.../power/control
attribute to "on", which causes pm_runtime_forbid() to be called. In principle,
this mechanism may also be used by the driver to effectively turn off the
-run-time power management of the device until the user space turns it on.
-Namely, during the initialization the driver can make sure that the run-time PM
+runtime power management of the device until the user space turns it on.
+Namely, during the initialization the driver can make sure that the runtime PM
status of the device is 'active' and call pm_runtime_forbid(). It should be
noted, however, that if the user space has already intentionally changed the
value of /sys/devices/.../power/control to "auto" to allow the driver to power
manage the device at run time, the driver may confuse it by using
pm_runtime_forbid() this way.
-6. Run-time PM and System Sleep
+6. Runtime PM and System Sleep
-Run-time PM and system sleep (i.e., system suspend and hibernation, also known
+Runtime PM and system sleep (i.e., system suspend and hibernation, also known
as suspend-to-RAM and suspend-to-disk) interact with each other in a couple of
ways. If a device is active when a system sleep starts, everything is
straightforward. But what should happen if the device is already suspended?
-The device may have different wake-up settings for run-time PM and system sleep.
-For example, remote wake-up may be enabled for run-time suspend but disallowed
+The device may have different wake-up settings for runtime PM and system sleep.
+For example, remote wake-up may be enabled for runtime suspend but disallowed
for system sleep (device_may_wakeup(dev) returns 'false'). When this happens,
the subsystem-level system suspend callback is responsible for changing the
device's wake-up setting (it may leave that to the device driver's system
suspend routine). It may be necessary to resume the device and suspend it again
in order to do so. The same is true if the driver uses different power levels
-or other settings for run-time suspend and system sleep.
+or other settings for runtime suspend and system sleep.
-During system resume, devices generally should be brought back to full power,
-even if they were suspended before the system sleep began. There are several
-reasons for this, including:
+During system resume, the simplest approach is to bring all devices back to full
+power, even if they had been suspended before the system suspend began. There
+are several reasons for this, including:
* The device might need to switch power levels, wake-up settings, etc.
@@ -570,18 +585,50 @@ reasons for this, including:
* The device might need to be reset.
* Even though the device was suspended, if its usage counter was > 0 then most
- likely it would need a run-time resume in the near future anyway.
-
- * Always going back to full power is simplest.
+ likely it would need a runtime resume in the near future anyway.
-If the device was suspended before the sleep began, then its run-time PM status
-will have to be updated to reflect the actual post-system sleep status. The way
-to do this is:
+If the device had been suspended before the system suspend began and it's
+brought back to full power during resume, then its runtime PM status will have
+to be updated to reflect the actual post-system sleep status. The way to do
+this is:
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+The PM core always increments the runtime usage counter before calling the
+->suspend() callback and decrements it after calling the ->resume() callback.
+Hence disabling runtime PM temporarily like this will not cause any runtime
+suspend attempts to be permanently lost. If the usage count goes to zero
+following the return of the ->resume() callback, the ->runtime_idle() callback
+will be invoked as usual.
+
+On some systems, however, system sleep is not entered through a global firmware
+or hardware operation. Instead, all hardware components are put into low-power
+states directly by the kernel in a coordinated way. Then, the system sleep
+state effectively follows from the states the hardware components end up in
+and the system is woken up from that state by a hardware interrupt or a similar
+mechanism entirely under the kernel's control. As a result, the kernel never
+gives control away and the states of all devices during resume are precisely
+known to it. If that is the case and none of the situations listed above takes
+place (in particular, if the system is not waking up from hibernation), it may
+be more efficient to leave the devices that had been suspended before the system
+suspend began in the suspended state.
+
+The PM core does its best to reduce the probability of race conditions between
+the runtime PM and system suspend/resume (and hibernation) callbacks by carrying
+out the following operations:
+
+ * During system suspend it calls pm_runtime_get_noresume() and
+ pm_runtime_barrier() for every device right before executing the
+ subsystem-level .suspend() callback for it. In addition to that it calls
+ pm_runtime_disable() for every device right after executing the
+ subsystem-level .suspend() callback for it.
+
+ * During system resume it calls pm_runtime_enable() and pm_runtime_put_sync()
+ for every device right before and right after executing the subsystem-level
+ .resume() callback for it, respectively.
+
7. Generic subsystem callbacks
Subsystems may wish to conserve code space by using the set of generic power
@@ -606,40 +653,68 @@ driver/base/power/generic_ops.c:
callback provided by its driver and return its result, or return 0 if not
defined
+ int pm_generic_suspend_noirq(struct device *dev);
+ - if pm_runtime_suspended(dev) returns "false", invoke the ->suspend_noirq()
+ callback provided by the device's driver and return its result, or return
+ 0 if not defined
+
int pm_generic_resume(struct device *dev);
- invoke the ->resume() callback provided by the driver of this device and,
if successful, change the device's runtime PM status to 'active'
+ int pm_generic_resume_noirq(struct device *dev);
+ - invoke the ->resume_noirq() callback provided by the driver of this device
+
int pm_generic_freeze(struct device *dev);
- if the device has not been suspended at run time, invoke the ->freeze()
callback provided by its driver and return its result, or return 0 if not
defined
+ int pm_generic_freeze_noirq(struct device *dev);
+ - if pm_runtime_suspended(dev) returns "false", invoke the ->freeze_noirq()
+ callback provided by the device's driver and return its result, or return
+ 0 if not defined
+
int pm_generic_thaw(struct device *dev);
- if the device has not been suspended at run time, invoke the ->thaw()
callback provided by its driver and return its result, or return 0 if not
defined
+ int pm_generic_thaw_noirq(struct device *dev);
+ - if pm_runtime_suspended(dev) returns "false", invoke the ->thaw_noirq()
+ callback provided by the device's driver and return its result, or return
+ 0 if not defined
+
int pm_generic_poweroff(struct device *dev);
- if the device has not been suspended at run time, invoke the ->poweroff()
callback provided by its driver and return its result, or return 0 if not
defined
+ int pm_generic_poweroff_noirq(struct device *dev);
+ - if pm_runtime_suspended(dev) returns "false", run the ->poweroff_noirq()
+ callback provided by the device's driver and return its result, or return
+ 0 if not defined
+
int pm_generic_restore(struct device *dev);
- invoke the ->restore() callback provided by the driver of this device and,
if successful, change the device's runtime PM status to 'active'
+ int pm_generic_restore_noirq(struct device *dev);
+ - invoke the ->restore_noirq() callback provided by the device's driver
+
These functions can be assigned to the ->runtime_idle(), ->runtime_suspend(),
-->runtime_resume(), ->suspend(), ->resume(), ->freeze(), ->thaw(), ->poweroff(),
-or ->restore() callback pointers in the subsystem-level dev_pm_ops structures.
+->runtime_resume(), ->suspend(), ->suspend_noirq(), ->resume(),
+->resume_noirq(), ->freeze(), ->freeze_noirq(), ->thaw(), ->thaw_noirq(),
+->poweroff(), ->poweroff_noirq(), ->restore(), ->restore_noirq() callback
+pointers in the subsystem-level dev_pm_ops structures.
If a subsystem wishes to use all of them at the same time, it can simply assign
the GENERIC_SUBSYS_PM_OPS macro, defined in include/linux/pm.h, to its
dev_pm_ops structure pointer.
Device drivers that wish to use the same function as a system suspend, freeze,
-poweroff and run-time suspend callback, and similarly for system resume, thaw,
-restore, and run-time resume, can achieve this with the help of the
+poweroff and runtime suspend callback, and similarly for system resume, thaw,
+restore, and runtime resume, can achieve this with the help of the
UNIVERSAL_DEV_PM_OPS macro defined in include/linux/pm.h (possibly setting its
last argument to NULL).
@@ -649,7 +724,7 @@ Some "devices" are only logical sub-devices of their parent and cannot be
power-managed on their own. (The prototype example is a USB interface. Entire
USB devices can go into low-power mode or send wake-up requests, but neither is
possible for individual interfaces.) The drivers for these devices have no
-need of run-time PM callbacks; if the callbacks did exist, ->runtime_suspend()
+need of runtime PM callbacks; if the callbacks did exist, ->runtime_suspend()
and ->runtime_resume() would always return 0 without doing anything else and
->runtime_idle() would always call pm_runtime_suspend().
@@ -657,7 +732,7 @@ Subsystems can tell the PM core about these devices by calling
pm_runtime_no_callbacks(). This should be done after the device structure is
initialized and before it is registered (although after device registration is
also okay). The routine will set the device's power.no_callbacks flag and
-prevent the non-debugging run-time PM sysfs attributes from being created.
+prevent the non-debugging runtime PM sysfs attributes from being created.
When power.no_callbacks is set, the PM core will not invoke the
->runtime_idle(), ->runtime_suspend(), or ->runtime_resume() callbacks.
@@ -665,7 +740,7 @@ Instead it will assume that suspends and resumes always succeed and that idle
devices should be suspended.
As a consequence, the PM core will never directly inform the device's subsystem
-or driver about run-time power changes. Instead, the driver for the device's
+or driver about runtime power changes. Instead, the driver for the device's
parent must take responsibility for telling the device's driver when the
parent's power state changes.
@@ -676,13 +751,13 @@ A device should be put in a low-power state only when there's some reason to
think it will remain in that state for a substantial time. A common heuristic
says that a device which hasn't been used for a while is liable to remain
unused; following this advice, drivers should not allow devices to be suspended
-at run-time until they have been inactive for some minimum period. Even when
+at runtime until they have been inactive for some minimum period. Even when
the heuristic ends up being non-optimal, it will still prevent devices from
"bouncing" too rapidly between low-power and full-power states.
The term "autosuspend" is an historical remnant. It doesn't mean that the
device is automatically suspended (the subsystem or driver still has to call
-the appropriate PM routines); rather it means that run-time suspends will
+the appropriate PM routines); rather it means that runtime suspends will
automatically be delayed until the desired period of inactivity has elapsed.
Inactivity is determined based on the power.last_busy field. Drivers should
diff --git a/Documentation/spi/ep93xx_spi b/Documentation/spi/ep93xx_spi
index 6325f5b4863..d8eb01c15db 100644
--- a/Documentation/spi/ep93xx_spi
+++ b/Documentation/spi/ep93xx_spi
@@ -88,6 +88,16 @@ static void __init ts72xx_init_machine(void)
ARRAY_SIZE(ts72xx_spi_devices));
}
+The driver can use DMA for the transfers also. In this case ts72xx_spi_info
+becomes:
+
+static struct ep93xx_spi_info ts72xx_spi_info = {
+ .num_chipselect = ARRAY_SIZE(ts72xx_spi_devices),
+ .use_dma = true;
+};
+
+Note that CONFIG_EP93XX_DMA should be enabled as well.
+
Thanks to
=========
Martin Guy, H. Hartley Sweeten and others who helped me during development of
diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx
index 493dada5737..00511e08db7 100644
--- a/Documentation/spi/pxa2xx
+++ b/Documentation/spi/pxa2xx
@@ -22,15 +22,11 @@ Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
found in include/linux/spi/pxa2xx_spi.h:
struct pxa2xx_spi_master {
- enum pxa_ssp_type ssp_type;
u32 clock_enable;
u16 num_chipselect;
u8 enable_dma;
};
-The "pxa2xx_spi_master.ssp_type" field must have a value between 1 and 3 and
-informs the driver which features a particular SSP supports.
-
The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the
corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See
the "PXA2xx Developer Manual" section "Clocks and Power Management".
@@ -61,7 +57,6 @@ static struct resource pxa_spi_nssp_resources[] = {
};
static struct pxa2xx_spi_master pxa_nssp_master_info = {
- .ssp_type = PXA25x_NSSP, /* Type of SSP */
.clock_enable = CKEN_NSSP, /* NSSP Peripheral clock */
.num_chipselect = 1, /* Matches the number of chips attached to NSSP */
.enable_dma = 1, /* Enables NSSP DMA */
diff --git a/Documentation/virtual/lguest/lguest.c b/Documentation/virtual/lguest/lguest.c
index cd9d6af61d0..043bd7df313 100644
--- a/Documentation/virtual/lguest/lguest.c
+++ b/Documentation/virtual/lguest/lguest.c
@@ -51,7 +51,7 @@
#include <asm/bootparam.h>
#include "../../../include/linux/lguest_launcher.h"
/*L:110
- * We can ignore the 42 include files we need for this program, but I do want
+ * We can ignore the 43 include files we need for this program, but I do want
* to draw attention to the use of kernel-style types.
*
* As Linus said, "C is a Spartan language, and so should your naming be." I
@@ -65,7 +65,6 @@ typedef uint16_t u16;
typedef uint8_t u8;
/*:*/
-#define PAGE_PRESENT 0x7 /* Present, RW, Execute */
#define BRIDGE_PFX "bridge:"
#ifndef SIOCBRADDIF
#define SIOCBRADDIF 0x89a2 /* add interface to bridge */
@@ -861,8 +860,10 @@ static void console_output(struct virtqueue *vq)
/* writev can return a partial write, so we loop here. */
while (!iov_empty(iov, out)) {
int len = writev(STDOUT_FILENO, iov, out);
- if (len <= 0)
- err(1, "Write to stdout gave %i", len);
+ if (len <= 0) {
+ warn("Write to stdout gave %i (%d)", len, errno);
+ break;
+ }
iov_consume(iov, out, len);
}
@@ -898,7 +899,7 @@ static void net_output(struct virtqueue *vq)
* same format: what a coincidence!
*/
if (writev(net_info->tunfd, iov, out) < 0)
- errx(1, "Write to tun failed?");
+ warnx("Write to tun failed (%d)?", errno);
/*
* Done with that one; wait_for_vq_desc() will send the interrupt if
@@ -955,7 +956,7 @@ static void net_input(struct virtqueue *vq)
*/
len = readv(net_info->tunfd, iov, in);
if (len <= 0)
- err(1, "Failed to read from tun.");
+ warn("Failed to read from tun (%d).", errno);
/*
* Mark that packet buffer as used, but don't interrupt here. We want
@@ -1093,9 +1094,10 @@ static void update_device_status(struct device *dev)
warnx("Device %s configuration FAILED", dev->name);
if (dev->running)
reset_device(dev);
- } else if (dev->desc->status & VIRTIO_CONFIG_S_DRIVER_OK) {
- if (!dev->running)
- start_device(dev);
+ } else {
+ if (dev->running)
+ err(1, "Device %s features finalized twice", dev->name);
+ start_device(dev);
}
}
@@ -1120,25 +1122,11 @@ static void handle_output(unsigned long addr)
return;
}
- /*
- * Devices *can* be used before status is set to DRIVER_OK.
- * The original plan was that they would never do this: they
- * would always finish setting up their status bits before
- * actually touching the virtqueues. In practice, we allowed
- * them to, and they do (eg. the disk probes for partition
- * tables as part of initialization).
- *
- * If we see this, we start the device: once it's running, we
- * expect the device to catch all the notifications.
- */
+ /* Devices should not be used before features are finalized. */
for (vq = i->vq; vq; vq = vq->next) {
if (addr != vq->config.pfn*getpagesize())
continue;
- if (i->running)
- errx(1, "Notification on running %s", i->name);
- /* This just calls create_thread() for each virtqueue */
- start_device(i);
- return;
+ errx(1, "Notification on %s before setup!", i->name);
}
}
@@ -1370,7 +1358,7 @@ static void setup_console(void)
* --sharenet=<name> option which opens or creates a named pipe. This can be
* used to send packets to another guest in a 1:1 manner.
*
- * More sopisticated is to use one of the tools developed for project like UML
+ * More sophisticated is to use one of the tools developed for project like UML
* to do networking.
*
* Faster is to do virtio bonding in kernel. Doing this 1:1 would be
@@ -1380,7 +1368,7 @@ static void setup_console(void)
* multiple inter-guest channels behind one interface, although it would
* require some manner of hotplugging new virtio channels.
*
- * Finally, we could implement a virtio network switch in the kernel.
+ * Finally, we could use a virtio network switch in the kernel, ie. vhost.
:*/
static u32 str2ip(const char *ipaddr)
@@ -2017,10 +2005,7 @@ int main(int argc, char *argv[])
/* Tell the entry path not to try to reload segment registers. */
boot->hdr.loadflags |= KEEP_SEGMENTS;
- /*
- * We tell the kernel to initialize the Guest: this returns the open
- * /dev/lguest file descriptor.
- */
+ /* We tell the kernel to initialize the Guest. */
tell_kernel(start);
/* Ensure that we terminate if a device-servicing child dies. */
diff --git a/Documentation/x86/boot.txt b/Documentation/x86/boot.txt
index 9b7221a86df..7c3a8801b7c 100644
--- a/Documentation/x86/boot.txt
+++ b/Documentation/x86/boot.txt
@@ -674,7 +674,7 @@ Protocol: 2.10+
Field name: init_size
Type: read
-Offset/size: 0x25c/4
+Offset/size: 0x260/4
This field indicates the amount of linear contiguous memory starting
at the kernel runtime start address that the kernel needs before it
diff --git a/MAINTAINERS b/MAINTAINERS
index 380d2d5f08c..a116b35c9a1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1,4 +1,5 @@
+
List of maintainers and how to submit kernel changes
Please try to follow the guidelines below. This will make things
@@ -3425,10 +3426,9 @@ S: Maintained
F: drivers/net/ipg.*
IPATH DRIVER
-M: Ralph Campbell <infinipath@qlogic.com>
+M: Mike Marciniszyn <infinipath@qlogic.com>
L: linux-rdma@vger.kernel.org
-T: git git://git.qlogic.com/ipath-linux-2.6
-S: Supported
+S: Maintained
F: drivers/infiniband/hw/ipath/
IPMI SUBSYSTEM
@@ -4590,9 +4590,8 @@ S: Maintained
F: drivers/mmc/host/omap.c
OMAP HS MMC SUPPORT
-M: Madhusudhan Chikkature <madhu.cr@ti.com>
L: linux-omap@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/mmc/host/omap_hsmmc.c
OMAP RANDOM NUMBER GENERATOR SUPPORT
@@ -5158,6 +5157,12 @@ M: Robert Jarzmik <robert.jarzmik@free.fr>
L: rtc-linux@googlegroups.com
S: Maintained
+QIB DRIVER
+M: Mike Marciniszyn <infinipath@qlogic.com>
+L: linux-rdma@vger.kernel.org
+S: Supported
+F: drivers/infiniband/hw/qib/
+
QLOGIC QLA1280 SCSI DRIVER
M: Michael Reed <mdr@sgi.com>
L: linux-scsi@vger.kernel.org
@@ -6248,9 +6253,14 @@ F: drivers/char/toshiba.c
F: include/linux/toshiba.h
TMIO MMC DRIVER
+M: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
M: Ian Molton <ian@mnementh.co.uk>
+L: linux-mmc@vger.kernel.org
S: Maintained
-F: drivers/mmc/host/tmio_mmc.*
+F: drivers/mmc/host/tmio_mmc*
+F: drivers/mmc/host/sh_mobile_sdhi.c
+F: include/linux/mmc/tmio.h
+F: include/linux/mmc/sh_mobile_sdhi.h
TMPFS (SHMEM FILESYSTEM)
M: Hugh Dickins <hughd@google.com>
@@ -6327,7 +6337,7 @@ F: drivers/scsi/u14-34f.c
UBI FILE SYSTEM (UBIFS)
M: Artem Bityutskiy <dedekind1@gmail.com>
-M: Adrian Hunter <adrian.hunter@nokia.com>
+M: Adrian Hunter <adrian.hunter@intel.com>
L: linux-mtd@lists.infradead.org
T: git git://git.infradead.org/ubifs-2.6.git
W: http://www.linux-mtd.infradead.org/doc/ubifs.html
diff --git a/Makefile b/Makefile
index 60d91f76c2f..6a5bdad524a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Sneaky Weasel
# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9adc278a22a..e04fa9d7637 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -642,6 +642,7 @@ config ARCH_SHMOBILE
select NO_IOPORT
select SPARSE_IRQ
select MULTI_IRQ_HANDLER
+ select PM_GENERIC_DOMAINS if PM
help
Support for Renesas's SH-Mobile and R-Mobile ARM platforms.
diff --git a/arch/arm/configs/mmp2_defconfig b/arch/arm/configs/mmp2_defconfig
index 47ad3b1a4fe..5a584520db2 100644
--- a/arch/arm/configs/mmp2_defconfig
+++ b/arch/arm/configs/mmp2_defconfig
@@ -8,6 +8,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_ARCH_MMP=y
+CONFIG_MACH_BROWNSTONE=y
CONFIG_MACH_FLINT=y
CONFIG_MACH_MARVELL_JASPER=y
CONFIG_HIGH_RES_TIMERS=y
@@ -63,10 +64,16 @@ CONFIG_BACKLIGHT_MAX8925=y
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_MAX8925=y
+CONFIG_MMC=y
# CONFIG_DNOTIFY is not set
CONFIG_INOTIFY=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
CONFIG_JFFS2_FS=y
CONFIG_CRAMFS=y
CONFIG_NFS_FS=y
@@ -81,7 +88,7 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_DEBUG_INFO=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
-CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_DYNAMIC_DEBUG is not set
CONFIG_DEBUG_USER=y
CONFIG_DEBUG_ERRORS=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/arm/mach-ep93xx/Makefile b/arch/arm/mach-ep93xx/Makefile
index 33ee2c863d1..3cedcf2d39e 100644
--- a/arch/arm/mach-ep93xx/Makefile
+++ b/arch/arm/mach-ep93xx/Makefile
@@ -1,11 +1,13 @@
#
# Makefile for the linux kernel.
#
-obj-y := core.o clock.o dma-m2p.o gpio.o
+obj-y := core.o clock.o
obj-m :=
obj-n :=
obj- :=
+obj-$(CONFIG_EP93XX_DMA) += dma.o
+
obj-$(CONFIG_MACH_ADSSPHERE) += adssphere.o
obj-$(CONFIG_MACH_EDB93XX) += edb93xx.o
obj-$(CONFIG_MACH_GESBC9312) += gesbc9312.o
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 6659a0d137a..c60f081e930 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -174,14 +174,10 @@ struct sys_timer ep93xx_timer = {
/*************************************************************************
* EP93xx IRQ handling
*************************************************************************/
-extern void ep93xx_gpio_init_irq(void);
-
void __init ep93xx_init_irq(void)
{
vic_init(EP93XX_VIC1_BASE, 0, EP93XX_VIC1_VALID_IRQ_MASK, 0);
vic_init(EP93XX_VIC2_BASE, 32, EP93XX_VIC2_VALID_IRQ_MASK, 0);
-
- ep93xx_gpio_init_irq();
}
@@ -241,6 +237,24 @@ unsigned int ep93xx_chip_revision(void)
}
/*************************************************************************
+ * EP93xx GPIO
+ *************************************************************************/
+static struct resource ep93xx_gpio_resource[] = {
+ {
+ .start = EP93XX_GPIO_PHYS_BASE,
+ .end = EP93XX_GPIO_PHYS_BASE + 0xcc - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ep93xx_gpio_device = {
+ .name = "gpio-ep93xx",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ep93xx_gpio_resource),
+ .resource = ep93xx_gpio_resource,
+};
+
+/*************************************************************************
* EP93xx peripheral handling
*************************************************************************/
#define EP93XX_UART_MCR_OFFSET (0x0100)
@@ -492,11 +506,15 @@ static struct resource ep93xx_spi_resources[] = {
},
};
+static u64 ep93xx_spi_dma_mask = DMA_BIT_MASK(32);
+
static struct platform_device ep93xx_spi_device = {
.name = "ep93xx-spi",
.id = 0,
.dev = {
- .platform_data = &ep93xx_spi_master_data,
+ .platform_data = &ep93xx_spi_master_data,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
+ .dma_mask = &ep93xx_spi_dma_mask,
},
.num_resources = ARRAY_SIZE(ep93xx_spi_resources),
.resource = ep93xx_spi_resources,
@@ -870,14 +888,13 @@ void __init ep93xx_register_ac97(void)
platform_device_register(&ep93xx_pcm_device);
}
-extern void ep93xx_gpio_init(void);
-
void __init ep93xx_init_devices(void)
{
/* Disallow access to MaverickCrunch initially */
ep93xx_devcfg_clear_bits(EP93XX_SYSCON_DEVCFG_CPENA);
- ep93xx_gpio_init();
+ /* Get the GPIO working early, other devices need it */
+ platform_device_register(&ep93xx_gpio_device);
amba_device_register(&uart1_device, &iomem_resource);
amba_device_register(&uart2_device, &iomem_resource);
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c
deleted file mode 100644
index a696d354b1f..00000000000
--- a/arch/arm/mach-ep93xx/dma-m2p.c
+++ /dev/null
@@ -1,411 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/dma-m2p.c
- * M2P DMA handling for Cirrus EP93xx chips.
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- * Copyright (C) 2006 Applied Data Systems
- *
- * Copyright (C) 2009 Ryan Mallon <ryan@bluewatersys.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-/*
- * On the EP93xx chip the following peripherals my be allocated to the 10
- * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
- *
- * I2S contains 3 Tx and 3 Rx DMA Channels
- * AAC contains 3 Tx and 3 Rx DMA Channels
- * UART1 contains 1 Tx and 1 Rx DMA Channels
- * UART2 contains 1 Tx and 1 Rx DMA Channels
- * UART3 contains 1 Tx and 1 Rx DMA Channels
- * IrDA contains 1 Tx and 1 Rx DMA Channels
- *
- * SSP and IDE use the Memory to Memory (M2M) channels and are not covered
- * with this implementation.
- */
-
-#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/io.h>
-
-#include <mach/dma.h>
-#include <mach/hardware.h>
-
-#define M2P_CONTROL 0x00
-#define M2P_CONTROL_STALL_IRQ_EN (1 << 0)
-#define M2P_CONTROL_NFB_IRQ_EN (1 << 1)
-#define M2P_CONTROL_ERROR_IRQ_EN (1 << 3)
-#define M2P_CONTROL_ENABLE (1 << 4)
-#define M2P_INTERRUPT 0x04
-#define M2P_INTERRUPT_STALL (1 << 0)
-#define M2P_INTERRUPT_NFB (1 << 1)
-#define M2P_INTERRUPT_ERROR (1 << 3)
-#define M2P_PPALLOC 0x08
-#define M2P_STATUS 0x0c
-#define M2P_REMAIN 0x14
-#define M2P_MAXCNT0 0x20
-#define M2P_BASE0 0x24
-#define M2P_MAXCNT1 0x30
-#define M2P_BASE1 0x34
-
-#define STATE_IDLE 0 /* Channel is inactive. */
-#define STATE_STALL 1 /* Channel is active, no buffers pending. */
-#define STATE_ON 2 /* Channel is active, one buffer pending. */
-#define STATE_NEXT 3 /* Channel is active, two buffers pending. */
-
-struct m2p_channel {
- char *name;
- void __iomem *base;
- int irq;
-
- struct clk *clk;
- spinlock_t lock;
-
- void *client;
- unsigned next_slot:1;
- struct ep93xx_dma_buffer *buffer_xfer;
- struct ep93xx_dma_buffer *buffer_next;
- struct list_head buffers_pending;
-};
-
-static struct m2p_channel m2p_rx[] = {
- {"m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1},
- {"m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3},
- {"m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5},
- {"m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7},
- {"m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9},
- {NULL},
-};
-
-static struct m2p_channel m2p_tx[] = {
- {"m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0},
- {"m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2},
- {"m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4},
- {"m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6},
- {"m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8},
- {NULL},
-};
-
-static void feed_buf(struct m2p_channel *ch, struct ep93xx_dma_buffer *buf)
-{
- if (ch->next_slot == 0) {
- writel(buf->size, ch->base + M2P_MAXCNT0);
- writel(buf->bus_addr, ch->base + M2P_BASE0);
- } else {
- writel(buf->size, ch->base + M2P_MAXCNT1);
- writel(buf->bus_addr, ch->base + M2P_BASE1);
- }
- ch->next_slot ^= 1;
-}
-
-static void choose_buffer_xfer(struct m2p_channel *ch)
-{
- struct ep93xx_dma_buffer *buf;
-
- ch->buffer_xfer = NULL;
- if (!list_empty(&ch->buffers_pending)) {
- buf = list_entry(ch->buffers_pending.next,
- struct ep93xx_dma_buffer, list);
- list_del(&buf->list);
- feed_buf(ch, buf);
- ch->buffer_xfer = buf;
- }
-}
-
-static void choose_buffer_next(struct m2p_channel *ch)
-{
- struct ep93xx_dma_buffer *buf;
-
- ch->buffer_next = NULL;
- if (!list_empty(&ch->buffers_pending)) {
- buf = list_entry(ch->buffers_pending.next,
- struct ep93xx_dma_buffer, list);
- list_del(&buf->list);
- feed_buf(ch, buf);
- ch->buffer_next = buf;
- }
-}
-
-static inline void m2p_set_control(struct m2p_channel *ch, u32 v)
-{
- /*
- * The control register must be read immediately after being written so
- * that the internal state machine is correctly updated. See the ep93xx
- * users' guide for details.
- */
- writel(v, ch->base + M2P_CONTROL);
- readl(ch->base + M2P_CONTROL);
-}
-
-static inline int m2p_channel_state(struct m2p_channel *ch)
-{
- return (readl(ch->base + M2P_STATUS) >> 4) & 0x3;
-}
-
-static irqreturn_t m2p_irq(int irq, void *dev_id)
-{
- struct m2p_channel *ch = dev_id;
- struct ep93xx_dma_m2p_client *cl;
- u32 irq_status, v;
- int error = 0;
-
- cl = ch->client;
-
- spin_lock(&ch->lock);
- irq_status = readl(ch->base + M2P_INTERRUPT);
-
- if (irq_status & M2P_INTERRUPT_ERROR) {
- writel(M2P_INTERRUPT_ERROR, ch->base + M2P_INTERRUPT);
- error = 1;
- }
-
- if ((irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) == 0) {
- spin_unlock(&ch->lock);
- return IRQ_NONE;
- }
-
- switch (m2p_channel_state(ch)) {
- case STATE_IDLE:
- pr_crit("dma interrupt without a dma buffer\n");
- BUG();
- break;
-
- case STATE_STALL:
- cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
- if (ch->buffer_next != NULL) {
- cl->buffer_finished(cl->cookie, ch->buffer_next,
- 0, error);
- }
- choose_buffer_xfer(ch);
- choose_buffer_next(ch);
- if (ch->buffer_xfer != NULL)
- cl->buffer_started(cl->cookie, ch->buffer_xfer);
- break;
-
- case STATE_ON:
- cl->buffer_finished(cl->cookie, ch->buffer_xfer, 0, error);
- ch->buffer_xfer = ch->buffer_next;
- choose_buffer_next(ch);
- cl->buffer_started(cl->cookie, ch->buffer_xfer);
- break;
-
- case STATE_NEXT:
- pr_crit("dma interrupt while next\n");
- BUG();
- break;
- }
-
- v = readl(ch->base + M2P_CONTROL) & ~(M2P_CONTROL_STALL_IRQ_EN |
- M2P_CONTROL_NFB_IRQ_EN);
- if (ch->buffer_xfer != NULL)
- v |= M2P_CONTROL_STALL_IRQ_EN;
- if (ch->buffer_next != NULL)
- v |= M2P_CONTROL_NFB_IRQ_EN;
- m2p_set_control(ch, v);
-
- spin_unlock(&ch->lock);
- return IRQ_HANDLED;
-}
-
-static struct m2p_channel *find_free_channel(struct ep93xx_dma_m2p_client *cl)
-{
- struct m2p_channel *ch;
- int i;
-
- if (cl->flags & EP93XX_DMA_M2P_RX)
- ch = m2p_rx;
- else
- ch = m2p_tx;
-
- for (i = 0; ch[i].base; i++) {
- struct ep93xx_dma_m2p_client *client;
-
- client = ch[i].client;
- if (client != NULL) {
- int port;
-
- port = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
- if (port == (client->flags &
- EP93XX_DMA_M2P_PORT_MASK)) {
- pr_warning("DMA channel already used by %s\n",
- cl->name ? : "unknown client");
- return ERR_PTR(-EBUSY);
- }
- }
- }
-
- for (i = 0; ch[i].base; i++) {
- if (ch[i].client == NULL)
- return ch + i;
- }
-
- pr_warning("No free DMA channel for %s\n",
- cl->name ? : "unknown client");
- return ERR_PTR(-ENODEV);
-}
-
-static void channel_enable(struct m2p_channel *ch)
-{
- struct ep93xx_dma_m2p_client *cl = ch->client;
- u32 v;
-
- clk_enable(ch->clk);
-
- v = cl->flags & EP93XX_DMA_M2P_PORT_MASK;
- writel(v, ch->base + M2P_PPALLOC);
-
- v = cl->flags & EP93XX_DMA_M2P_ERROR_MASK;
- v |= M2P_CONTROL_ENABLE | M2P_CONTROL_ERROR_IRQ_EN;
- m2p_set_control(ch, v);
-}
-
-static void channel_disable(struct m2p_channel *ch)
-{
- u32 v;
-
- v = readl(ch->base + M2P_CONTROL);
- v &= ~(M2P_CONTROL_STALL_IRQ_EN | M2P_CONTROL_NFB_IRQ_EN);
- m2p_set_control(ch, v);
-
- while (m2p_channel_state(ch) >= STATE_ON)
- cpu_relax();
-
- m2p_set_control(ch, 0x0);
-
- while (m2p_channel_state(ch) == STATE_STALL)
- cpu_relax();
-
- clk_disable(ch->clk);
-}
-
-int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *cl)
-{
- struct m2p_channel *ch;
- int err;
-
- ch = find_free_channel(cl);
- if (IS_ERR(ch))
- return PTR_ERR(ch);
-
- err = request_irq(ch->irq, m2p_irq, 0, cl->name ? : "dma-m2p", ch);
- if (err)
- return err;
-
- ch->client = cl;
- ch->next_slot = 0;
- ch->buffer_xfer = NULL;
- ch->buffer_next = NULL;
- INIT_LIST_HEAD(&ch->buffers_pending);
-
- cl->channel = ch;
-
- channel_enable(ch);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_register);
-
-void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *cl)
-{
- struct m2p_channel *ch = cl->channel;
-
- channel_disable(ch);
- free_irq(ch->irq, ch);
- ch->client = NULL;
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_client_unregister);
-
-void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *cl,
- struct ep93xx_dma_buffer *buf)
-{
- struct m2p_channel *ch = cl->channel;
- unsigned long flags;
- u32 v;
-
- spin_lock_irqsave(&ch->lock, flags);
- v = readl(ch->base + M2P_CONTROL);
- if (ch->buffer_xfer == NULL) {
- ch->buffer_xfer = buf;
- feed_buf(ch, buf);
- cl->buffer_started(cl->cookie, buf);
-
- v |= M2P_CONTROL_STALL_IRQ_EN;
- m2p_set_control(ch, v);
-
- } else if (ch->buffer_next == NULL) {
- ch->buffer_next = buf;
- feed_buf(ch, buf);
-
- v |= M2P_CONTROL_NFB_IRQ_EN;
- m2p_set_control(ch, v);
- } else {
- list_add_tail(&buf->list, &ch->buffers_pending);
- }
- spin_unlock_irqrestore(&ch->lock, flags);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit);
-
-void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *cl,
- struct ep93xx_dma_buffer *buf)
-{
- struct m2p_channel *ch = cl->channel;
-
- list_add_tail(&buf->list, &ch->buffers_pending);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_submit_recursive);
-
-void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *cl)
-{
- struct m2p_channel *ch = cl->channel;
-
- channel_disable(ch);
- ch->next_slot = 0;
- ch->buffer_xfer = NULL;
- ch->buffer_next = NULL;
- INIT_LIST_HEAD(&ch->buffers_pending);
- channel_enable(ch);
-}
-EXPORT_SYMBOL_GPL(ep93xx_dma_m2p_flush);
-
-static int init_channel(struct m2p_channel *ch)
-{
- ch->clk = clk_get(NULL, ch->name);
- if (IS_ERR(ch->clk))
- return PTR_ERR(ch->clk);
-
- spin_lock_init(&ch->lock);
- ch->client = NULL;
-
- return 0;
-}
-
-static int __init ep93xx_dma_m2p_init(void)
-{
- int i;
- int ret;
-
- for (i = 0; m2p_rx[i].base; i++) {
- ret = init_channel(m2p_rx + i);
- if (ret)
- return ret;
- }
-
- for (i = 0; m2p_tx[i].base; i++) {
- ret = init_channel(m2p_tx + i);
- if (ret)
- return ret;
- }
-
- pr_info("M2P DMA subsystem initialized\n");
- return 0;
-}
-arch_initcall(ep93xx_dma_m2p_init);
diff --git a/arch/arm/mach-ep93xx/dma.c b/arch/arm/mach-ep93xx/dma.c
new file mode 100644
index 00000000000..5a257088125
--- /dev/null
+++ b/arch/arm/mach-ep93xx/dma.c
@@ -0,0 +1,108 @@
+/*
+ * arch/arm/mach-ep93xx/dma.c
+ *
+ * Platform support code for the EP93xx dmaengine driver.
+ *
+ * Copyright (C) 2011 Mika Westerberg
+ *
+ * This work is based on the original dma-m2p implementation with
+ * following copyrights:
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or (at
+ * your option) any later version.
+ */
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <mach/dma.h>
+#include <mach/hardware.h>
+
+#define DMA_CHANNEL(_name, _base, _irq) \
+ { .name = (_name), .base = (_base), .irq = (_irq) }
+
+/*
+ * DMA M2P channels.
+ *
+ * On the EP93xx chip the following peripherals my be allocated to the 10
+ * Memory to Internal Peripheral (M2P) channels (5 transmit + 5 receive).
+ *
+ * I2S contains 3 Tx and 3 Rx DMA Channels
+ * AAC contains 3 Tx and 3 Rx DMA Channels
+ * UART1 contains 1 Tx and 1 Rx DMA Channels
+ * UART2 contains 1 Tx and 1 Rx DMA Channels
+ * UART3 contains 1 Tx and 1 Rx DMA Channels
+ * IrDA contains 1 Tx and 1 Rx DMA Channels
+ *
+ * Registers are mapped statically in ep93xx_map_io().
+ */
+static struct ep93xx_dma_chan_data ep93xx_dma_m2p_channels[] = {
+ DMA_CHANNEL("m2p0", EP93XX_DMA_BASE + 0x0000, IRQ_EP93XX_DMAM2P0),
+ DMA_CHANNEL("m2p1", EP93XX_DMA_BASE + 0x0040, IRQ_EP93XX_DMAM2P1),
+ DMA_CHANNEL("m2p2", EP93XX_DMA_BASE + 0x0080, IRQ_EP93XX_DMAM2P2),
+ DMA_CHANNEL("m2p3", EP93XX_DMA_BASE + 0x00c0, IRQ_EP93XX_DMAM2P3),
+ DMA_CHANNEL("m2p4", EP93XX_DMA_BASE + 0x0240, IRQ_EP93XX_DMAM2P4),
+ DMA_CHANNEL("m2p5", EP93XX_DMA_BASE + 0x0200, IRQ_EP93XX_DMAM2P5),
+ DMA_CHANNEL("m2p6", EP93XX_DMA_BASE + 0x02c0, IRQ_EP93XX_DMAM2P6),
+ DMA_CHANNEL("m2p7", EP93XX_DMA_BASE + 0x0280, IRQ_EP93XX_DMAM2P7),
+ DMA_CHANNEL("m2p8", EP93XX_DMA_BASE + 0x0340, IRQ_EP93XX_DMAM2P8),
+ DMA_CHANNEL("m2p9", EP93XX_DMA_BASE + 0x0300, IRQ_EP93XX_DMAM2P9),
+};
+
+static struct ep93xx_dma_platform_data ep93xx_dma_m2p_data = {
+ .channels = ep93xx_dma_m2p_channels,
+ .num_channels = ARRAY_SIZE(ep93xx_dma_m2p_channels),
+};
+
+static struct platform_device ep93xx_dma_m2p_device = {
+ .name = "ep93xx-dma-m2p",
+ .id = -1,
+ .dev = {
+ .platform_data = &ep93xx_dma_m2p_data,
+ },
+};
+
+/*
+ * DMA M2M channels.
+ *
+ * There are 2 M2M channels which support memcpy/memset and in addition simple
+ * hardware requests from/to SSP and IDE. We do not implement an external
+ * hardware requests.
+ *
+ * Registers are mapped statically in ep93xx_map_io().
+ */
+static struct ep93xx_dma_chan_data ep93xx_dma_m2m_channels[] = {
+ DMA_CHANNEL("m2m0", EP93XX_DMA_BASE + 0x0100, IRQ_EP93XX_DMAM2M0),
+ DMA_CHANNEL("m2m1", EP93XX_DMA_BASE + 0x0140, IRQ_EP93XX_DMAM2M1),
+};
+
+static struct ep93xx_dma_platform_data ep93xx_dma_m2m_data = {
+ .channels = ep93xx_dma_m2m_channels,
+ .num_channels = ARRAY_SIZE(ep93xx_dma_m2m_channels),
+};
+
+static struct platform_device ep93xx_dma_m2m_device = {
+ .name = "ep93xx-dma-m2m",
+ .id = -1,
+ .dev = {
+ .platform_data = &ep93xx_dma_m2m_data,
+ },
+};
+
+static int __init ep93xx_dma_init(void)
+{
+ platform_device_register(&ep93xx_dma_m2p_device);
+ platform_device_register(&ep93xx_dma_m2m_device);
+ return 0;
+}
+arch_initcall(ep93xx_dma_init);
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 5e31b2b25da..46d4d876e6f 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,149 +1,93 @@
-/**
- * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
- *
- * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
- * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
- * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
- * engine.
- *
- * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
- *
- */
-
#ifndef __ASM_ARCH_DMA_H
#define __ASM_ARCH_DMA_H
-#include <linux/list.h>
#include <linux/types.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
-/**
- * struct ep93xx_dma_buffer - Information about a buffer to be transferred
- * using the DMA M2P engine
+/*
+ * M2P channels.
*
- * @list: Entry in DMA buffer list
- * @bus_addr: Physical address of the buffer
- * @size: Size of the buffer in bytes
+ * Note that these values are also directly used for setting the PPALLOC
+ * register.
*/
-struct ep93xx_dma_buffer {
- struct list_head list;
- u32 bus_addr;
- u16 size;
-};
+#define EP93XX_DMA_I2S1 0
+#define EP93XX_DMA_I2S2 1
+#define EP93XX_DMA_AAC1 2
+#define EP93XX_DMA_AAC2 3
+#define EP93XX_DMA_AAC3 4
+#define EP93XX_DMA_I2S3 5
+#define EP93XX_DMA_UART1 6
+#define EP93XX_DMA_UART2 7
+#define EP93XX_DMA_UART3 8
+#define EP93XX_DMA_IRDA 9
+/* M2M channels */
+#define EP93XX_DMA_SSP 10
+#define EP93XX_DMA_IDE 11
/**
- * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
- *
- * @name: Unique name for this client
- * @flags: Client flags
- * @cookie: User data to pass to callback functions
- * @buffer_started: Non NULL function to call when a transfer is started.
- * The arguments are the user data cookie and the DMA
- * buffer which is starting.
- * @buffer_finished: Non NULL function to call when a transfer is completed.
- * The arguments are the user data cookie, the DMA buffer
- * which has completed, and a boolean flag indicating if
- * the transfer had an error.
+ * struct ep93xx_dma_data - configuration data for the EP93xx dmaengine
+ * @port: peripheral which is requesting the channel
+ * @direction: TX/RX channel
+ * @name: optional name for the channel, this is displayed in /proc/interrupts
+ *
+ * This information is passed as private channel parameter in a filter
+ * function. Note that this is only needed for slave/cyclic channels. For
+ * memcpy channels %NULL data should be passed.
*/
-struct ep93xx_dma_m2p_client {
- char *name;
- u8 flags;
- void *cookie;
- void (*buffer_started)(void *cookie,
- struct ep93xx_dma_buffer *buf);
- void (*buffer_finished)(void *cookie,
- struct ep93xx_dma_buffer *buf,
- int bytes, int error);
-
- /* private: Internal use only */
- void *channel;
+struct ep93xx_dma_data {
+ int port;
+ enum dma_data_direction direction;
+ const char *name;
};
-/* DMA M2P ports */
-#define EP93XX_DMA_M2P_PORT_I2S1 0x00
-#define EP93XX_DMA_M2P_PORT_I2S2 0x01
-#define EP93XX_DMA_M2P_PORT_AAC1 0x02
-#define EP93XX_DMA_M2P_PORT_AAC2 0x03
-#define EP93XX_DMA_M2P_PORT_AAC3 0x04
-#define EP93XX_DMA_M2P_PORT_I2S3 0x05
-#define EP93XX_DMA_M2P_PORT_UART1 0x06
-#define EP93XX_DMA_M2P_PORT_UART2 0x07
-#define EP93XX_DMA_M2P_PORT_UART3 0x08
-#define EP93XX_DMA_M2P_PORT_IRDA 0x09
-#define EP93XX_DMA_M2P_PORT_MASK 0x0f
-
-/* DMA M2P client flags */
-#define EP93XX_DMA_M2P_TX 0x00 /* Memory to peripheral */
-#define EP93XX_DMA_M2P_RX 0x10 /* Peripheral to memory */
-
-/*
- * DMA M2P client error handling flags. See the EP93xx users guide
- * documentation on the DMA M2P CONTROL register for more details
- */
-#define EP93XX_DMA_M2P_ABORT_ON_ERROR 0x20 /* Abort on peripheral error */
-#define EP93XX_DMA_M2P_IGNORE_ERROR 0x40 /* Ignore peripheral errors */
-#define EP93XX_DMA_M2P_ERROR_MASK 0x60 /* Mask of error bits */
-
/**
- * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
- * subsystem
- *
- * @m2p: Client information to register
- * returns 0 on success
- *
- * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
- * client
+ * struct ep93xx_dma_chan_data - platform specific data for a DMA channel
+ * @name: name of the channel, used for getting the right clock for the channel
+ * @base: mapped registers
+ * @irq: interrupt number used by this channel
*/
-int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+struct ep93xx_dma_chan_data {
+ const char *name;
+ void __iomem *base;
+ int irq;
+};
/**
- * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
- * subsystem
- *
- * @m2p: Client to unregister
+ * struct ep93xx_dma_platform_data - platform data for the dmaengine driver
+ * @channels: array of channels which are passed to the driver
+ * @num_channels: number of channels in the array
*
- * Any transfers currently in progress will be completed in hardware, but
- * ignored in software.
+ * This structure is passed to the DMA engine driver via platform data. For
+ * M2P channels, contract is that even channels are for TX and odd for RX.
+ * There is no requirement for the M2M channels.
*/
-void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
+struct ep93xx_dma_platform_data {
+ struct ep93xx_dma_chan_data *channels;
+ size_t num_channels;
+};
-/**
- * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
- *
- * @m2p: DMA Client to submit the transfer on
- * @buf: DMA Buffer to submit
- *
- * If the current or next transfer positions are free on the M2P client then
- * the transfer is started immediately. If not, the transfer is added to the
- * list of pending transfers. This function must not be called from the
- * buffer_finished callback for an M2P channel.
- *
- */
-void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
- struct ep93xx_dma_buffer *buf);
+static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
+{
+ return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
+}
/**
- * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
- * for an M2P channel
+ * ep93xx_dma_chan_direction - returns direction the channel can be used
+ * @chan: channel
*
- * @m2p: DMA Client to submit the transfer on
- * @buf: DMA Buffer to submit
- *
- * This function must only be called from the buffer_finished callback for an
- * M2P channel. It is commonly used to add the next transfer in a chained list
- * of DMA transfers.
+ * This function can be used in filter functions to find out whether the
+ * channel supports given DMA direction. Only M2P channels have such
+ * limitation, for M2M channels the direction is configurable.
*/
-void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
- struct ep93xx_dma_buffer *buf);
+static inline enum dma_data_direction
+ep93xx_dma_chan_direction(struct dma_chan *chan)
+{
+ if (!ep93xx_dma_chan_is_m2p(chan))
+ return DMA_NONE;
-/**
- * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
- *
- * @m2p: DMA client to flush transfers on
- *
- * Any transfers currently in progress will be completed in hardware, but
- * ignored in software.
- *
- */
-void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
+ /* even channels are for TX, odd for RX */
+ return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
#endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
index 9ac4d105509..c4a7b84ef06 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
@@ -98,6 +98,7 @@
#define EP93XX_SECURITY_BASE EP93XX_APB_IOMEM(0x00030000)
+#define EP93XX_GPIO_PHYS_BASE EP93XX_APB_PHYS(0x00040000)
#define EP93XX_GPIO_BASE EP93XX_APB_IOMEM(0x00040000)
#define EP93XX_GPIO_REG(x) (EP93XX_GPIO_BASE + (x))
#define EP93XX_GPIO_F_INT_STATUS EP93XX_GPIO_REG(0x5c)
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
index 0a37961b345..9bb63ac13f0 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx_spi.h
@@ -7,9 +7,11 @@ struct spi_device;
* struct ep93xx_spi_info - EP93xx specific SPI descriptor
* @num_chipselect: number of chip selects on this board, must be
* at least one
+ * @use_dma: use DMA for the transfers
*/
struct ep93xx_spi_info {
int num_chipselect;
+ bool use_dma;
};
/**
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c
index a65838fc061..af1c580b06b 100644
--- a/arch/arm/mach-imx/clock-imx25.c
+++ b/arch/arm/mach-imx/clock-imx25.c
@@ -282,9 +282,10 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
_REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
_REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
- _REGISTER_CLOCK("imx25-cspi.0", NULL, cspi1_clk)
- _REGISTER_CLOCK("imx25-cspi.1", NULL, cspi2_clk)
- _REGISTER_CLOCK("imx25-cspi.2", NULL, cspi3_clk)
+ /* i.mx25 has the i.mx35 type cspi */
+ _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
+ _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
+ _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk)
_REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk)
_REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk)
_REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk)
diff --git a/arch/arm/mach-imx/mach-apf9328.c b/arch/arm/mach-imx/mach-apf9328.c
index 15e45c84e37..59d2a3b137d 100644
--- a/arch/arm/mach-imx/mach-apf9328.c
+++ b/arch/arm/mach-imx/mach-apf9328.c
@@ -115,6 +115,8 @@ static struct platform_device *devices[] __initdata = {
static void __init apf9328_init(void)
{
+ imx1_soc_init();
+
mxc_gpio_setup_multiple_pins(apf9328_pins,
ARRAY_SIZE(apf9328_pins),
"APF9328");
diff --git a/arch/arm/mach-imx/mach-armadillo5x0.c b/arch/arm/mach-imx/mach-armadillo5x0.c
index ffb40ff619b..ede2710f8b7 100644
--- a/arch/arm/mach-imx/mach-armadillo5x0.c
+++ b/arch/arm/mach-imx/mach-armadillo5x0.c
@@ -490,6 +490,8 @@ static struct platform_device *devices[] __initdata = {
*/
static void __init armadillo5x0_init(void)
{
+ imx31_soc_init();
+
mxc_iomux_setup_multiple_pins(armadillo5x0_pins,
ARRAY_SIZE(armadillo5x0_pins), "armadillo5x0");
diff --git a/arch/arm/mach-imx/mach-bug.c b/arch/arm/mach-imx/mach-bug.c
index 42e4f078a19..f49470553bd 100644
--- a/arch/arm/mach-imx/mach-bug.c
+++ b/arch/arm/mach-imx/mach-bug.c
@@ -42,6 +42,8 @@ static const unsigned int bug_pins[] __initconst = {
static void __init bug_board_init(void)
{
+ imx31_soc_init();
+
mxc_iomux_setup_multiple_pins(bug_pins,
ARRAY_SIZE(bug_pins), "uart-4");
imx31_add_imx_uart4(&uart_pdata);
diff --git a/arch/arm/mach-imx/mach-cpuimx27.c b/arch/arm/mach-imx/mach-cpuimx27.c
index 46a2e41d43d..87887ac5806 100644
--- a/arch/arm/mach-imx/mach-cpuimx27.c
+++ b/arch/arm/mach-imx/mach-cpuimx27.c
@@ -250,6 +250,8 @@ __setup("otg_mode=", eukrea_cpuimx27_otg_mode);
static void __init eukrea_cpuimx27_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(eukrea_cpuimx27_pins,
ARRAY_SIZE(eukrea_cpuimx27_pins), "CPUIMX27");
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index 3f8ef825fa6..f39a478ba1a 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -156,6 +156,8 @@ __setup("otg_mode=", eukrea_cpuimx35_otg_mode);
*/
static void __init eukrea_cpuimx35_init(void)
{
+ imx35_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx35_pads,
ARRAY_SIZE(eukrea_cpuimx35_pads));
diff --git a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
index 148cff2819b..da36da52969 100644
--- a/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
+++ b/arch/arm/mach-imx/mach-eukrea_cpuimx25.c
@@ -125,6 +125,8 @@ __setup("otg_mode=", eukrea_cpuimx25_otg_mode);
static void __init eukrea_cpuimx25_init(void)
{
+ imx25_soc_init();
+
if (mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx25_pads,
ARRAY_SIZE(eukrea_cpuimx25_pads)))
printk(KERN_ERR "error setting cpuimx25 pads !\n");
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index 7ae43b1ec51..c6269d60ddb 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -231,6 +231,8 @@ static void __init visstrim_m10_board_init(void)
{
int ret;
+ imx27_soc_init();
+
ret = mxc_gpio_setup_multiple_pins(visstrim_m10_pins,
ARRAY_SIZE(visstrim_m10_pins), "VISSTRIM_M10");
if (ret)
diff --git a/arch/arm/mach-imx/mach-imx27ipcam.c b/arch/arm/mach-imx/mach-imx27ipcam.c
index 9be6cd6fbf8..272f793e924 100644
--- a/arch/arm/mach-imx/mach-imx27ipcam.c
+++ b/arch/arm/mach-imx/mach-imx27ipcam.c
@@ -50,6 +50,8 @@ static const int mx27ipcam_pins[] __initconst = {
static void __init mx27ipcam_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(mx27ipcam_pins, ARRAY_SIZE(mx27ipcam_pins),
"mx27ipcam");
diff --git a/arch/arm/mach-imx/mach-imx27lite.c b/arch/arm/mach-imx/mach-imx27lite.c
index 841140516ed..d81a769fe89 100644
--- a/arch/arm/mach-imx/mach-imx27lite.c
+++ b/arch/arm/mach-imx/mach-imx27lite.c
@@ -59,6 +59,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
static void __init mx27lite_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(mx27lite_pins, ARRAY_SIZE(mx27lite_pins),
"imx27lite");
imx27_add_imx_uart0(&uart_pdata);
diff --git a/arch/arm/mach-imx/mach-kzm_arm11_01.c b/arch/arm/mach-imx/mach-kzm_arm11_01.c
index 1ecae20cf4e..e472a1d8805 100644
--- a/arch/arm/mach-imx/mach-kzm_arm11_01.c
+++ b/arch/arm/mach-imx/mach-kzm_arm11_01.c
@@ -223,6 +223,8 @@ static int kzm_pins[] __initdata = {
*/
static void __init kzm_board_init(void)
{
+ imx31_soc_init();
+
mxc_iomux_setup_multiple_pins(kzm_pins,
ARRAY_SIZE(kzm_pins), "kzm");
kzm_init_ext_uart();
diff --git a/arch/arm/mach-imx/mach-mx1ads.c b/arch/arm/mach-imx/mach-mx1ads.c
index 38ec5cbbda9..5cd8bee4696 100644
--- a/arch/arm/mach-imx/mach-mx1ads.c
+++ b/arch/arm/mach-imx/mach-mx1ads.c
@@ -115,6 +115,8 @@ static struct i2c_board_info mx1ads_i2c_devices[] = {
*/
static void __init mx1ads_init(void)
{
+ imx1_soc_init();
+
mxc_gpio_setup_multiple_pins(mx1ads_pins,
ARRAY_SIZE(mx1ads_pins), "mx1ads");
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index 74ac88978dd..d389ecf9b5a 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -279,6 +279,8 @@ static struct platform_device *platform_devices[] __initdata = {
static void __init mx21ads_board_init(void)
{
+ imx21_soc_init();
+
mxc_gpio_setup_multiple_pins(mx21ads_pins, ARRAY_SIZE(mx21ads_pins),
"mx21ads");
diff --git a/arch/arm/mach-imx/mach-mx25_3ds.c b/arch/arm/mach-imx/mach-mx25_3ds.c
index 58ea3fdf091..01534bb6130 100644
--- a/arch/arm/mach-imx/mach-mx25_3ds.c
+++ b/arch/arm/mach-imx/mach-mx25_3ds.c
@@ -219,6 +219,8 @@ static const struct esdhc_platform_data mx25pdk_esdhc_pdata __initconst = {
static void __init mx25pdk_init(void)
{
+ imx25_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads,
ARRAY_SIZE(mx25pdk_pads));
diff --git a/arch/arm/mach-imx/mach-mx27_3ds.c b/arch/arm/mach-imx/mach-mx27_3ds.c
index 6e1accf93f8..117ce0a50f4 100644
--- a/arch/arm/mach-imx/mach-mx27_3ds.c
+++ b/arch/arm/mach-imx/mach-mx27_3ds.c
@@ -267,6 +267,8 @@ static const struct imxi2c_platform_data mx27_3ds_i2c0_data __initconst = {
static void __init mx27pdk_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(mx27pdk_pins, ARRAY_SIZE(mx27pdk_pins),
"mx27pdk");
mx27_3ds_sdhc1_enable_level_translator();
diff --git a/arch/arm/mach-imx/mach-mx27ads.c b/arch/arm/mach-imx/mach-mx27ads.c
index 1db79506f5e..fc26ed71b9e 100644
--- a/arch/arm/mach-imx/mach-mx27ads.c
+++ b/arch/arm/mach-imx/mach-mx27ads.c
@@ -288,6 +288,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
static void __init mx27ads_board_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(mx27ads_pins, ARRAY_SIZE(mx27ads_pins),
"mx27ads");
diff --git a/arch/arm/mach-imx/mach-mx31_3ds.c b/arch/arm/mach-imx/mach-mx31_3ds.c
index b5ecc26d08a..441fbb83f39 100644
--- a/arch/arm/mach-imx/mach-mx31_3ds.c
+++ b/arch/arm/mach-imx/mach-mx31_3ds.c
@@ -690,6 +690,8 @@ static void __init mx31_3ds_init(void)
{
int ret;
+ imx31_soc_init();
+
mxc_iomux_setup_multiple_pins(mx31_3ds_pins, ARRAY_SIZE(mx31_3ds_pins),
"mx31_3ds");
diff --git a/arch/arm/mach-imx/mach-mx31ads.c b/arch/arm/mach-imx/mach-mx31ads.c
index f4dee025463..0ce49478a47 100644
--- a/arch/arm/mach-imx/mach-mx31ads.c
+++ b/arch/arm/mach-imx/mach-mx31ads.c
@@ -516,6 +516,8 @@ static void __init mx31ads_init_irq(void)
static void __init mx31ads_init(void)
{
+ imx31_soc_init();
+
mxc_init_extuart();
mxc_init_imx_uart();
mxc_init_i2c();
diff --git a/arch/arm/mach-imx/mach-mx31lilly.c b/arch/arm/mach-imx/mach-mx31lilly.c
index 410e676ae08..750368ddf0f 100644
--- a/arch/arm/mach-imx/mach-mx31lilly.c
+++ b/arch/arm/mach-imx/mach-mx31lilly.c
@@ -243,6 +243,8 @@ core_param(mx31lilly_baseboard, mx31lilly_baseboard, int, 0444);
static void __init mx31lilly_board_init(void)
{
+ imx31_soc_init();
+
switch (mx31lilly_baseboard) {
case MX31LILLY_NOBOARD:
break;
diff --git a/arch/arm/mach-imx/mach-mx31lite.c b/arch/arm/mach-imx/mach-mx31lite.c
index ac9b4cad320..4b47fd9fdd8 100644
--- a/arch/arm/mach-imx/mach-mx31lite.c
+++ b/arch/arm/mach-imx/mach-mx31lite.c
@@ -230,6 +230,8 @@ static void __init mx31lite_init(void)
{
int ret;
+ imx31_soc_init();
+
switch (mx31lite_baseboard) {
case MX31LITE_NOBOARD:
break;
diff --git a/arch/arm/mach-imx/mach-mx31moboard.c b/arch/arm/mach-imx/mach-mx31moboard.c
index eaa51e49ca9..a52fd36e2b5 100644
--- a/arch/arm/mach-imx/mach-mx31moboard.c
+++ b/arch/arm/mach-imx/mach-mx31moboard.c
@@ -507,6 +507,8 @@ core_param(mx31moboard_baseboard, mx31moboard_baseboard, int, 0444);
*/
static void __init mx31moboard_init(void)
{
+ imx31_soc_init();
+
mxc_iomux_setup_multiple_pins(moboard_pins, ARRAY_SIZE(moboard_pins),
"moboard");
diff --git a/arch/arm/mach-imx/mach-mx35_3ds.c b/arch/arm/mach-imx/mach-mx35_3ds.c
index 882880ac1bb..48b3c6fd5cf 100644
--- a/arch/arm/mach-imx/mach-mx35_3ds.c
+++ b/arch/arm/mach-imx/mach-mx35_3ds.c
@@ -179,6 +179,8 @@ static const struct imxi2c_platform_data mx35_3ds_i2c0_data __initconst = {
*/
static void __init mx35_3ds_init(void)
{
+ imx35_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx35pdk_pads, ARRAY_SIZE(mx35pdk_pads));
imx35_add_fec(NULL);
diff --git a/arch/arm/mach-imx/mach-mxt_td60.c b/arch/arm/mach-imx/mach-mxt_td60.c
index 2774541511e..c85876fed66 100644
--- a/arch/arm/mach-imx/mach-mxt_td60.c
+++ b/arch/arm/mach-imx/mach-mxt_td60.c
@@ -233,6 +233,8 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
static void __init mxt_td60_board_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(mxt_td60_pins, ARRAY_SIZE(mxt_td60_pins),
"MXT_TD60");
diff --git a/arch/arm/mach-imx/mach-pca100.c b/arch/arm/mach-imx/mach-pca100.c
index bbddc5a11c4..71083aa1603 100644
--- a/arch/arm/mach-imx/mach-pca100.c
+++ b/arch/arm/mach-imx/mach-pca100.c
@@ -357,6 +357,8 @@ static void __init pca100_init(void)
{
int ret;
+ imx27_soc_init();
+
/* SSI unit */
mxc_audmux_v1_configure_port(MX27_AUDMUX_HPCR1_SSI0,
MXC_AUDMUX_V1_PCR_SYN | /* 4wire mode */
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index 89c213b8129..f45b7cd72c8 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -576,6 +576,8 @@ static void __init pcm037_init(void)
{
int ret;
+ imx31_soc_init();
+
mxc_iomux_set_gpr(MUX_PGP_UH2, 1);
mxc_iomux_setup_multiple_pins(pcm037_pins, ARRAY_SIZE(pcm037_pins),
diff --git a/arch/arm/mach-imx/mach-pcm038.c b/arch/arm/mach-imx/mach-pcm038.c
index 853bb871c7e..2d6a64bbac4 100644
--- a/arch/arm/mach-imx/mach-pcm038.c
+++ b/arch/arm/mach-imx/mach-pcm038.c
@@ -295,6 +295,8 @@ static const struct mxc_usbh_platform_data usbh2_pdata __initconst = {
static void __init pcm038_init(void)
{
+ imx27_soc_init();
+
mxc_gpio_setup_multiple_pins(pcm038_pins, ARRAY_SIZE(pcm038_pins),
"PCM038");
diff --git a/arch/arm/mach-imx/mach-pcm043.c b/arch/arm/mach-imx/mach-pcm043.c
index 026441628df..163cc318caf 100644
--- a/arch/arm/mach-imx/mach-pcm043.c
+++ b/arch/arm/mach-imx/mach-pcm043.c
@@ -356,6 +356,8 @@ static struct esdhc_platform_data sd1_pdata = {
*/
static void __init pcm043_init(void)
{
+ imx35_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(pcm043_pads, ARRAY_SIZE(pcm043_pads));
mxc_audmux_v2_configure_port(3,
diff --git a/arch/arm/mach-imx/mach-qong.c b/arch/arm/mach-imx/mach-qong.c
index c1632871593..3626f486498 100644
--- a/arch/arm/mach-imx/mach-qong.c
+++ b/arch/arm/mach-imx/mach-qong.c
@@ -244,6 +244,8 @@ static void __init qong_init_fpga(void)
*/
static void __init qong_init(void)
{
+ imx31_soc_init();
+
mxc_init_imx_uart();
qong_init_nor_mtd();
qong_init_fpga();
diff --git a/arch/arm/mach-imx/mach-scb9328.c b/arch/arm/mach-imx/mach-scb9328.c
index dcaee043628..82805260e19 100644
--- a/arch/arm/mach-imx/mach-scb9328.c
+++ b/arch/arm/mach-imx/mach-scb9328.c
@@ -129,6 +129,8 @@ static struct platform_device *devices[] __initdata = {
*/
static void __init scb9328_init(void)
{
+ imx1_soc_init();
+
imx1_add_imx_uart0(&uart_pdata);
printk(KERN_INFO"Scb9328: Adding devices\n");
diff --git a/arch/arm/mach-imx/mach-vpr200.c b/arch/arm/mach-imx/mach-vpr200.c
index d74e3473d23..7d8e012a633 100644
--- a/arch/arm/mach-imx/mach-vpr200.c
+++ b/arch/arm/mach-imx/mach-vpr200.c
@@ -267,6 +267,8 @@ static struct platform_device *devices[] __initdata = {
*/
static void __init vpr200_board_init(void)
{
+ imx35_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(vpr200_pads, ARRAY_SIZE(vpr200_pads));
imx35_add_fec(NULL);
diff --git a/arch/arm/mach-imx/mm-imx1.c b/arch/arm/mach-imx/mm-imx1.c
index 2e482ba5a0e..2bded591d5c 100644
--- a/arch/arm/mach-imx/mm-imx1.c
+++ b/arch/arm/mach-imx/mm-imx1.c
@@ -23,7 +23,6 @@
#include <mach/common.h>
#include <mach/hardware.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
#include <mach/iomux-v1.h>
@@ -44,15 +43,19 @@ void __init imx1_init_early(void)
MX1_NUM_GPIO_PORT);
}
-static struct mxc_gpio_port imx1_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX1, 0, 1, MX1_GPIO_INT_PORTA),
- DEFINE_IMX_GPIO_PORT_IRQ(MX1, 1, 2, MX1_GPIO_INT_PORTB),
- DEFINE_IMX_GPIO_PORT_IRQ(MX1, 2, 3, MX1_GPIO_INT_PORTC),
- DEFINE_IMX_GPIO_PORT_IRQ(MX1, 3, 4, MX1_GPIO_INT_PORTD),
-};
-
void __init mx1_init_irq(void)
{
mxc_init_irq(MX1_IO_ADDRESS(MX1_AVIC_BASE_ADDR));
- mxc_gpio_init(imx1_gpio_ports, ARRAY_SIZE(imx1_gpio_ports));
+}
+
+void __init imx1_soc_init(void)
+{
+ mxc_register_gpio("imx1-gpio", 0, MX1_GPIO1_BASE_ADDR, SZ_256,
+ MX1_GPIO_INT_PORTA, 0);
+ mxc_register_gpio("imx1-gpio", 1, MX1_GPIO2_BASE_ADDR, SZ_256,
+ MX1_GPIO_INT_PORTB, 0);
+ mxc_register_gpio("imx1-gpio", 2, MX1_GPIO3_BASE_ADDR, SZ_256,
+ MX1_GPIO_INT_PORTC, 0);
+ mxc_register_gpio("imx1-gpio", 3, MX1_GPIO4_BASE_ADDR, SZ_256,
+ MX1_GPIO_INT_PORTD, 0);
}
diff --git a/arch/arm/mach-imx/mm-imx21.c b/arch/arm/mach-imx/mm-imx21.c
index 7a0c500ac2c..6d7d518686a 100644
--- a/arch/arm/mach-imx/mm-imx21.c
+++ b/arch/arm/mach-imx/mm-imx21.c
@@ -24,7 +24,6 @@
#include <mach/common.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
#include <mach/iomux-v1.h>
@@ -70,17 +69,17 @@ void __init imx21_init_early(void)
MX21_NUM_GPIO_PORT);
}
-static struct mxc_gpio_port imx21_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX21, 0, 1, MX21_INT_GPIO),
- DEFINE_IMX_GPIO_PORT(MX21, 1, 2),
- DEFINE_IMX_GPIO_PORT(MX21, 2, 3),
- DEFINE_IMX_GPIO_PORT(MX21, 3, 4),
- DEFINE_IMX_GPIO_PORT(MX21, 4, 5),
- DEFINE_IMX_GPIO_PORT(MX21, 5, 6),
-};
-
void __init mx21_init_irq(void)
{
mxc_init_irq(MX21_IO_ADDRESS(MX21_AVIC_BASE_ADDR));
- mxc_gpio_init(imx21_gpio_ports, ARRAY_SIZE(imx21_gpio_ports));
+}
+
+void __init imx21_soc_init(void)
+{
+ mxc_register_gpio("imx21-gpio", 0, MX21_GPIO1_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 1, MX21_GPIO2_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 2, MX21_GPIO3_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 3, MX21_GPIO4_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 4, MX21_GPIO5_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 5, MX21_GPIO6_BASE_ADDR, SZ_256, MX21_INT_GPIO, 0);
}
diff --git a/arch/arm/mach-imx/mm-imx25.c b/arch/arm/mach-imx/mm-imx25.c
index 02f7b5c7fa8..9a1591c2508 100644
--- a/arch/arm/mach-imx/mm-imx25.c
+++ b/arch/arm/mach-imx/mm-imx25.c
@@ -27,7 +27,6 @@
#include <mach/hardware.h>
#include <mach/mx25.h>
#include <mach/iomux-v3.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
/*
@@ -57,16 +56,16 @@ void __init imx25_init_early(void)
mxc_arch_reset_init(MX25_IO_ADDRESS(MX25_WDOG_BASE_ADDR));
}
-static struct mxc_gpio_port imx25_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX25, 0, 1, MX25_INT_GPIO1),
- DEFINE_IMX_GPIO_PORT_IRQ(MX25, 1, 2, MX25_INT_GPIO2),
- DEFINE_IMX_GPIO_PORT_IRQ(MX25, 2, 3, MX25_INT_GPIO3),
- DEFINE_IMX_GPIO_PORT_IRQ(MX25, 3, 4, MX25_INT_GPIO4),
-};
-
void __init mx25_init_irq(void)
{
mxc_init_irq(MX25_IO_ADDRESS(MX25_AVIC_BASE_ADDR));
- mxc_gpio_init(imx25_gpio_ports, ARRAY_SIZE(imx25_gpio_ports));
}
+void __init imx25_soc_init(void)
+{
+ /* i.mx25 has the i.mx31 type gpio */
+ mxc_register_gpio("imx31-gpio", 0, MX25_GPIO1_BASE_ADDR, SZ_16K, MX25_INT_GPIO1, 0);
+ mxc_register_gpio("imx31-gpio", 1, MX25_GPIO2_BASE_ADDR, SZ_16K, MX25_INT_GPIO2, 0);
+ mxc_register_gpio("imx31-gpio", 2, MX25_GPIO3_BASE_ADDR, SZ_16K, MX25_INT_GPIO3, 0);
+ mxc_register_gpio("imx31-gpio", 3, MX25_GPIO4_BASE_ADDR, SZ_16K, MX25_INT_GPIO4, 0);
+}
diff --git a/arch/arm/mach-imx/mm-imx27.c b/arch/arm/mach-imx/mm-imx27.c
index a6761a39f08..133b30003dd 100644
--- a/arch/arm/mach-imx/mm-imx27.c
+++ b/arch/arm/mach-imx/mm-imx27.c
@@ -24,7 +24,6 @@
#include <mach/common.h>
#include <asm/pgtable.h>
#include <asm/mach/map.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
#include <mach/iomux-v1.h>
@@ -70,17 +69,18 @@ void __init imx27_init_early(void)
MX27_NUM_GPIO_PORT);
}
-static struct mxc_gpio_port imx27_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX27, 0, 1, MX27_INT_GPIO),
- DEFINE_IMX_GPIO_PORT(MX27, 1, 2),
- DEFINE_IMX_GPIO_PORT(MX27, 2, 3),
- DEFINE_IMX_GPIO_PORT(MX27, 3, 4),
- DEFINE_IMX_GPIO_PORT(MX27, 4, 5),
- DEFINE_IMX_GPIO_PORT(MX27, 5, 6),
-};
-
void __init mx27_init_irq(void)
{
mxc_init_irq(MX27_IO_ADDRESS(MX27_AVIC_BASE_ADDR));
- mxc_gpio_init(imx27_gpio_ports, ARRAY_SIZE(imx27_gpio_ports));
+}
+
+void __init imx27_soc_init(void)
+{
+ /* i.mx27 has the i.mx21 type gpio */
+ mxc_register_gpio("imx21-gpio", 0, MX27_GPIO1_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 1, MX27_GPIO2_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 2, MX27_GPIO3_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 3, MX27_GPIO4_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 4, MX27_GPIO5_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
+ mxc_register_gpio("imx21-gpio", 5, MX27_GPIO6_BASE_ADDR, SZ_256, MX27_INT_GPIO, 0);
}
diff --git a/arch/arm/mach-imx/mm-imx31.c b/arch/arm/mach-imx/mm-imx31.c
index 86b9b45864d..6d103c01b8b 100644
--- a/arch/arm/mach-imx/mm-imx31.c
+++ b/arch/arm/mach-imx/mm-imx31.c
@@ -26,7 +26,6 @@
#include <mach/common.h>
#include <mach/hardware.h>
#include <mach/iomux-v3.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
static struct map_desc mx31_io_desc[] __initdata = {
@@ -53,14 +52,14 @@ void __init imx31_init_early(void)
mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
}
-static struct mxc_gpio_port imx31_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX31, 0, 1, MX31_INT_GPIO1),
- DEFINE_IMX_GPIO_PORT_IRQ(MX31, 1, 2, MX31_INT_GPIO2),
- DEFINE_IMX_GPIO_PORT_IRQ(MX31, 2, 3, MX31_INT_GPIO3),
-};
-
void __init mx31_init_irq(void)
{
mxc_init_irq(MX31_IO_ADDRESS(MX31_AVIC_BASE_ADDR));
- mxc_gpio_init(imx31_gpio_ports, ARRAY_SIZE(imx31_gpio_ports));
+}
+
+void __init imx31_soc_init(void)
+{
+ mxc_register_gpio("imx31-gpio", 0, MX31_GPIO1_BASE_ADDR, SZ_16K, MX31_INT_GPIO1, 0);
+ mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0);
+ mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0);
}
diff --git a/arch/arm/mach-imx/mm-imx35.c b/arch/arm/mach-imx/mm-imx35.c
index c880e6d1ae5..bb068bc8dab 100644
--- a/arch/arm/mach-imx/mm-imx35.c
+++ b/arch/arm/mach-imx/mm-imx35.c
@@ -27,7 +27,6 @@
#include <mach/common.h>
#include <mach/hardware.h>
#include <mach/iomux-v3.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
static struct map_desc mx35_io_desc[] __initdata = {
@@ -50,14 +49,15 @@ void __init imx35_init_early(void)
mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
}
-static struct mxc_gpio_port imx35_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ(MX35, 0, 1, MX35_INT_GPIO1),
- DEFINE_IMX_GPIO_PORT_IRQ(MX35, 1, 2, MX35_INT_GPIO2),
- DEFINE_IMX_GPIO_PORT_IRQ(MX35, 2, 3, MX35_INT_GPIO3),
-};
-
void __init mx35_init_irq(void)
{
mxc_init_irq(MX35_IO_ADDRESS(MX35_AVIC_BASE_ADDR));
- mxc_gpio_init(imx35_gpio_ports, ARRAY_SIZE(imx35_gpio_ports));
+}
+
+void __init imx35_soc_init(void)
+{
+ /* i.mx35 has the i.mx31 type gpio */
+ mxc_register_gpio("imx31-gpio", 0, MX35_GPIO1_BASE_ADDR, SZ_16K, MX35_INT_GPIO1, 0);
+ mxc_register_gpio("imx31-gpio", 1, MX35_GPIO2_BASE_ADDR, SZ_16K, MX35_INT_GPIO2, 0);
+ mxc_register_gpio("imx31-gpio", 2, MX35_GPIO3_BASE_ADDR, SZ_16K, MX35_INT_GPIO3, 0);
}
diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c
index 7bb78fd5a2a..c79162a50f2 100644
--- a/arch/arm/mach-mmp/brownstone.c
+++ b/arch/arm/mach-mmp/brownstone.c
@@ -177,9 +177,16 @@ static struct i2c_board_info brownstone_twsi1_info[] = {
};
static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = {
- .max_speed = 25000000,
+ .clk_delay_cycles = 0x1f,
};
+static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc2 = {
+ .clk_delay_cycles = 0x1f,
+ .flags = PXA_FLAG_CARD_PERMANENT
+ | PXA_FLAG_SD_8_BIT_CAPABLE_SLOT,
+};
+
+
static void __init brownstone_init(void)
{
mfp_config(ARRAY_AND_SIZE(brownstone_pin_config));
@@ -189,6 +196,7 @@ static void __init brownstone_init(void)
mmp2_add_uart(3);
mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info));
mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */
+ mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */
/* enable 5v regulator */
platform_device_register(&brownstone_v_5vp_device);
diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h
index 2cbf6df09b8..de7b88826ad 100644
--- a/arch/arm/mach-mmp/include/mach/mmp2.h
+++ b/arch/arm/mach-mmp/include/mach/mmp2.h
@@ -1,7 +1,7 @@
#ifndef __ASM_MACH_MMP2_H
#define __ASM_MACH_MMP2_H
-#include <plat/sdhci.h>
+#include <linux/platform_data/pxa_sdhci.h>
struct sys_timer;
diff --git a/arch/arm/mach-mmp/jasper.c b/arch/arm/mach-mmp/jasper.c
index 24172a0aad5..5d6421d6325 100644
--- a/arch/arm/mach-mmp/jasper.c
+++ b/arch/arm/mach-mmp/jasper.c
@@ -154,7 +154,7 @@ static struct i2c_board_info jasper_twsi1_info[] = {
};
static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc0 = {
- .max_speed = 25000000,
+ .clk_delay_cycles = 0x1f,
};
static void __init jasper_init(void)
diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c
index 8e6c3ac7f7c..079c18861d5 100644
--- a/arch/arm/mach-mmp/mmp2.c
+++ b/arch/arm/mach-mmp/mmp2.c
@@ -168,10 +168,10 @@ static struct clk_lookup mmp2_clkregs[] = {
INIT_CLKREG(&clk_twsi5, "pxa2xx-i2c.4", NULL),
INIT_CLKREG(&clk_twsi6, "pxa2xx-i2c.5", NULL),
INIT_CLKREG(&clk_nand, "pxa3xx-nand", NULL),
- INIT_CLKREG(&clk_sdh0, "sdhci-pxa.0", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh1, "sdhci-pxa.1", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh2, "sdhci-pxa.2", "PXA-SDHCLK"),
- INIT_CLKREG(&clk_sdh3, "sdhci-pxa.3", "PXA-SDHCLK"),
+ INIT_CLKREG(&clk_sdh0, "sdhci-pxav3.0", "PXA-SDHCLK"),
+ INIT_CLKREG(&clk_sdh1, "sdhci-pxav3.1", "PXA-SDHCLK"),
+ INIT_CLKREG(&clk_sdh2, "sdhci-pxav3.2", "PXA-SDHCLK"),
+ INIT_CLKREG(&clk_sdh3, "sdhci-pxav3.3", "PXA-SDHCLK"),
};
static int __init mmp2_init(void)
@@ -222,8 +222,8 @@ MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70);
MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70);
MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70);
MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29);
-MMP2_DEVICE(sdh0, "sdhci-pxa", 0, MMC, 0xd4280000, 0x120);
-MMP2_DEVICE(sdh1, "sdhci-pxa", 1, MMC2, 0xd4280800, 0x120);
-MMP2_DEVICE(sdh2, "sdhci-pxa", 2, MMC3, 0xd4281000, 0x120);
-MMP2_DEVICE(sdh3, "sdhci-pxa", 3, MMC4, 0xd4281800, 0x120);
+MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120);
+MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120);
+MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120);
+MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120);
diff --git a/arch/arm/mach-mx5/board-cpuimx51.c b/arch/arm/mach-mx5/board-cpuimx51.c
index 4efa02ee163..add0d42de7a 100644
--- a/arch/arm/mach-mx5/board-cpuimx51.c
+++ b/arch/arm/mach-mx5/board-cpuimx51.c
@@ -245,6 +245,8 @@ __setup("otg_mode=", eukrea_cpuimx51_otg_mode);
*/
static void __init eukrea_cpuimx51_init(void)
{
+ imx51_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51_pads,
ARRAY_SIZE(eukrea_cpuimx51_pads));
diff --git a/arch/arm/mach-mx5/board-cpuimx51sd.c b/arch/arm/mach-mx5/board-cpuimx51sd.c
index 5ef25a59614..ff096d58729 100644
--- a/arch/arm/mach-mx5/board-cpuimx51sd.c
+++ b/arch/arm/mach-mx5/board-cpuimx51sd.c
@@ -264,6 +264,8 @@ static struct platform_device *platform_devices[] __initdata = {
static void __init eukrea_cpuimx51sd_init(void)
{
+ imx51_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(eukrea_cpuimx51sd_pads,
ARRAY_SIZE(eukrea_cpuimx51sd_pads));
diff --git a/arch/arm/mach-mx5/board-mx50_rdp.c b/arch/arm/mach-mx5/board-mx50_rdp.c
index 11210e1ae42..7de25c6712e 100644
--- a/arch/arm/mach-mx5/board-mx50_rdp.c
+++ b/arch/arm/mach-mx5/board-mx50_rdp.c
@@ -192,6 +192,8 @@ static const struct imxi2c_platform_data i2c_data __initconst = {
*/
static void __init mx50_rdp_board_init(void)
{
+ imx50_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx50_rdp_pads,
ARRAY_SIZE(mx50_rdp_pads));
diff --git a/arch/arm/mach-mx5/board-mx51_3ds.c b/arch/arm/mach-mx5/board-mx51_3ds.c
index 63dfbeafbc1..3112d15feeb 100644
--- a/arch/arm/mach-mx5/board-mx51_3ds.c
+++ b/arch/arm/mach-mx5/board-mx51_3ds.c
@@ -135,6 +135,8 @@ static struct spi_board_info mx51_3ds_spi_nor_device[] = {
*/
static void __init mx51_3ds_init(void)
{
+ imx51_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx51_3ds_pads,
ARRAY_SIZE(mx51_3ds_pads));
diff --git a/arch/arm/mach-mx5/board-mx51_babbage.c b/arch/arm/mach-mx5/board-mx51_babbage.c
index c7b3fabf50f..6021dd00ec7 100644
--- a/arch/arm/mach-mx5/board-mx51_babbage.c
+++ b/arch/arm/mach-mx5/board-mx51_babbage.c
@@ -340,6 +340,8 @@ static void __init mx51_babbage_init(void)
iomux_v3_cfg_t power_key = _MX51_PAD_EIM_A27__GPIO2_21 |
MUX_PAD_CTRL(PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+ imx51_soc_init();
+
#if defined(CONFIG_CPU_FREQ_IMX)
get_cpu_op = mx51_get_cpu_op;
#endif
diff --git a/arch/arm/mach-mx5/board-mx51_efikamx.c b/arch/arm/mach-mx5/board-mx51_efikamx.c
index 6e362315291..3be603b9075 100644
--- a/arch/arm/mach-mx5/board-mx51_efikamx.c
+++ b/arch/arm/mach-mx5/board-mx51_efikamx.c
@@ -236,6 +236,8 @@ late_initcall(mx51_efikamx_power_init);
static void __init mx51_efikamx_init(void)
{
+ imx51_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx51efikamx_pads,
ARRAY_SIZE(mx51efikamx_pads));
efika_board_common_init();
diff --git a/arch/arm/mach-mx5/board-mx51_efikasb.c b/arch/arm/mach-mx5/board-mx51_efikasb.c
index 474fc6e4c6d..4b2e522de0f 100644
--- a/arch/arm/mach-mx5/board-mx51_efikasb.c
+++ b/arch/arm/mach-mx5/board-mx51_efikasb.c
@@ -248,6 +248,8 @@ static void __init mx51_efikasb_board_id(void)
static void __init efikasb_board_init(void)
{
+ imx51_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx51efikasb_pads,
ARRAY_SIZE(mx51efikasb_pads));
efika_board_common_init();
diff --git a/arch/arm/mach-mx5/board-mx53_evk.c b/arch/arm/mach-mx5/board-mx53_evk.c
index f87d571882c..0d9218a6e2d 100644
--- a/arch/arm/mach-mx5/board-mx53_evk.c
+++ b/arch/arm/mach-mx5/board-mx53_evk.c
@@ -117,6 +117,8 @@ static const struct spi_imx_master mx53_evk_spi_data __initconst = {
static void __init mx53_evk_board_init(void)
{
+ imx53_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx53_evk_pads,
ARRAY_SIZE(mx53_evk_pads));
mx53_evk_init_uart();
diff --git a/arch/arm/mach-mx5/board-mx53_loco.c b/arch/arm/mach-mx5/board-mx53_loco.c
index 1b947e8c9c0..359c3e248ad 100644
--- a/arch/arm/mach-mx5/board-mx53_loco.c
+++ b/arch/arm/mach-mx5/board-mx53_loco.c
@@ -227,6 +227,8 @@ static const struct imxi2c_platform_data mx53_loco_i2c_data __initconst = {
static void __init mx53_loco_board_init(void)
{
+ imx53_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx53_loco_pads,
ARRAY_SIZE(mx53_loco_pads));
imx53_add_imx_uart(0, NULL);
diff --git a/arch/arm/mach-mx5/board-mx53_smd.c b/arch/arm/mach-mx5/board-mx53_smd.c
index 817c08938f5..bc02894eafe 100644
--- a/arch/arm/mach-mx5/board-mx53_smd.c
+++ b/arch/arm/mach-mx5/board-mx53_smd.c
@@ -113,6 +113,8 @@ static const struct imxi2c_platform_data mx53_smd_i2c_data __initconst = {
static void __init mx53_smd_board_init(void)
{
+ imx53_soc_init();
+
mxc_iomux_v3_setup_multiple_pads(mx53_smd_pads,
ARRAY_SIZE(mx53_smd_pads));
mx53_smd_init_uart();
diff --git a/arch/arm/mach-mx5/clock-mx51-mx53.c b/arch/arm/mach-mx5/clock-mx51-mx53.c
index 6b89c1bf4eb..cd79e3435e2 100644
--- a/arch/arm/mach-mx5/clock-mx51-mx53.c
+++ b/arch/arm/mach-mx5/clock-mx51-mx53.c
@@ -1442,7 +1442,8 @@ static struct clk_lookup mx51_lookups[] = {
_REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
_REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
_REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
- _REGISTER_CLOCK("imx51-cspi.0", NULL, cspi_clk)
+ /* i.mx51 has the i.mx35 type cspi */
+ _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.0", NULL, esdhc1_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_clk)
@@ -1471,9 +1472,11 @@ static struct clk_lookup mx53_lookups[] = {
_REGISTER_CLOCK("sdhci-esdhc-imx.1", NULL, esdhc2_mx53_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.2", NULL, esdhc3_mx53_clk)
_REGISTER_CLOCK("sdhci-esdhc-imx.3", NULL, esdhc4_mx53_clk)
- _REGISTER_CLOCK("imx53-ecspi.0", NULL, ecspi1_clk)
- _REGISTER_CLOCK("imx53-ecspi.1", NULL, ecspi2_clk)
- _REGISTER_CLOCK("imx53-cspi.0", NULL, cspi_clk)
+ /* i.mx53 has the i.mx51 type ecspi */
+ _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
+ _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
+ /* i.mx53 has the i.mx25 type cspi */
+ _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
_REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
_REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
};
diff --git a/arch/arm/mach-mx5/devices.c b/arch/arm/mach-mx5/devices.c
index 153ada53e57..371ca8c8414 100644
--- a/arch/arm/mach-mx5/devices.c
+++ b/arch/arm/mach-mx5/devices.c
@@ -12,7 +12,6 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/gpio.h>
#include <mach/hardware.h>
#include <mach/imx-uart.h>
#include <mach/irqs.h>
@@ -119,66 +118,3 @@ struct platform_device mxc_usbh2_device = {
.coherent_dma_mask = DMA_BIT_MASK(32),
},
};
-
-static struct mxc_gpio_port mxc_gpio_ports[] = {
- {
- .chip.label = "gpio-0",
- .base = MX51_IO_ADDRESS(MX51_GPIO1_BASE_ADDR),
- .irq = MX51_MXC_INT_GPIO1_LOW,
- .irq_high = MX51_MXC_INT_GPIO1_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START
- },
- {
- .chip.label = "gpio-1",
- .base = MX51_IO_ADDRESS(MX51_GPIO2_BASE_ADDR),
- .irq = MX51_MXC_INT_GPIO2_LOW,
- .irq_high = MX51_MXC_INT_GPIO2_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 1
- },
- {
- .chip.label = "gpio-2",
- .base = MX51_IO_ADDRESS(MX51_GPIO3_BASE_ADDR),
- .irq = MX51_MXC_INT_GPIO3_LOW,
- .irq_high = MX51_MXC_INT_GPIO3_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 2
- },
- {
- .chip.label = "gpio-3",
- .base = MX51_IO_ADDRESS(MX51_GPIO4_BASE_ADDR),
- .irq = MX51_MXC_INT_GPIO4_LOW,
- .irq_high = MX51_MXC_INT_GPIO4_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 3
- },
- {
- .chip.label = "gpio-4",
- .base = MX53_IO_ADDRESS(MX53_GPIO5_BASE_ADDR),
- .irq = MX53_INT_GPIO5_LOW,
- .irq_high = MX53_INT_GPIO5_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 4
- },
- {
- .chip.label = "gpio-5",
- .base = MX53_IO_ADDRESS(MX53_GPIO6_BASE_ADDR),
- .irq = MX53_INT_GPIO6_LOW,
- .irq_high = MX53_INT_GPIO6_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 5
- },
- {
- .chip.label = "gpio-6",
- .base = MX53_IO_ADDRESS(MX53_GPIO7_BASE_ADDR),
- .irq = MX53_INT_GPIO7_LOW,
- .irq_high = MX53_INT_GPIO7_HIGH,
- .virtual_irq_start = MXC_GPIO_IRQ_START + 32 * 6
- },
-};
-
-int __init imx51_register_gpios(void)
-{
- return mxc_gpio_init(mxc_gpio_ports, 4);
-}
-
-int __init imx53_register_gpios(void)
-{
- return mxc_gpio_init(mxc_gpio_ports, ARRAY_SIZE(mxc_gpio_ports));
-}
-
diff --git a/arch/arm/mach-mx5/mm-mx50.c b/arch/arm/mach-mx5/mm-mx50.c
index b9c363b514a..77e374c726f 100644
--- a/arch/arm/mach-mx5/mm-mx50.c
+++ b/arch/arm/mach-mx5/mm-mx50.c
@@ -26,7 +26,6 @@
#include <mach/hardware.h>
#include <mach/common.h>
#include <mach/iomux-v3.h>
-#include <mach/gpio.h>
#include <mach/irqs.h>
/*
@@ -56,17 +55,18 @@ void __init imx50_init_early(void)
mxc_arch_reset_init(MX50_IO_ADDRESS(MX50_WDOG_BASE_ADDR));
}
-static struct mxc_gpio_port imx50_gpio_ports[] = {
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 0, 1, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH),
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 1, 2, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH),
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 2, 3, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 3, 4, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 4, 5, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(MX50, 5, 6, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH),
-};
-
void __init mx50_init_irq(void)
{
tzic_init_irq(MX50_IO_ADDRESS(MX50_TZIC_BASE_ADDR));
- mxc_gpio_init(imx50_gpio_ports, ARRAY_SIZE(imx50_gpio_ports));
+}
+
+void __init imx50_soc_init(void)
+{
+ /* i.mx50 has the i.mx31 type gpio */
+ mxc_register_gpio("imx31-gpio", 0, MX50_GPIO1_BASE_ADDR, SZ_16K, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH);
+ mxc_register_gpio("imx31-gpio", 1, MX50_GPIO2_BASE_ADDR, SZ_16K, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH);
+ mxc_register_gpio("imx31-gpio", 2, MX50_GPIO3_BASE_ADDR, SZ_16K, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH);
+ mxc_register_gpio("imx31-gpio", 3, MX50_GPIO4_BASE_ADDR, SZ_16K, MX50_INT_GPIO4_LOW, MX50_INT_GPIO4_HIGH);
+ mxc_register_gpio("imx31-gpio", 4, MX50_GPIO5_BASE_ADDR, SZ_16K, MX50_INT_GPIO5_LOW, MX50_INT_GPIO5_HIGH);
+ mxc_register_gpio("imx31-gpio", 5, MX50_GPIO6_BASE_ADDR, SZ_16K, MX50_INT_GPIO6_LOW, MX50_INT_GPIO6_HIGH);
}
diff --git a/arch/arm/mach-mx5/mm.c b/arch/arm/mach-mx5/mm.c
index ff557301b42..665843d6c2b 100644
--- a/arch/arm/mach-mx5/mm.c
+++ b/arch/arm/mach-mx5/mm.c
@@ -69,8 +69,6 @@ void __init imx53_init_early(void)
mxc_arch_reset_init(MX53_IO_ADDRESS(MX53_WDOG1_BASE_ADDR));
}
-int imx51_register_gpios(void);
-
void __init mx51_init_irq(void)
{
unsigned long tzic_addr;
@@ -86,11 +84,8 @@ void __init mx51_init_irq(void)
panic("unable to map TZIC interrupt controller\n");
tzic_init_irq(tzic_virt);
- imx51_register_gpios();
}
-int imx53_register_gpios(void);
-
void __init mx53_init_irq(void)
{
unsigned long tzic_addr;
@@ -103,5 +98,25 @@ void __init mx53_init_irq(void)
panic("unable to map TZIC interrupt controller\n");
tzic_init_irq(tzic_virt);
- imx53_register_gpios();
+}
+
+void __init imx51_soc_init(void)
+{
+ /* i.mx51 has the i.mx31 type gpio */
+ mxc_register_gpio("imx31-gpio", 0, MX51_GPIO1_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO1_LOW, MX51_MXC_INT_GPIO1_HIGH);
+ mxc_register_gpio("imx31-gpio", 1, MX51_GPIO2_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO2_LOW, MX51_MXC_INT_GPIO2_HIGH);
+ mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO3_LOW, MX51_MXC_INT_GPIO3_HIGH);
+ mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_MXC_INT_GPIO4_LOW, MX51_MXC_INT_GPIO4_HIGH);
+}
+
+void __init imx53_soc_init(void)
+{
+ /* i.mx53 has the i.mx31 type gpio */
+ mxc_register_gpio("imx31-gpio", 0, MX53_GPIO1_BASE_ADDR, SZ_16K, MX53_INT_GPIO1_LOW, MX53_INT_GPIO1_HIGH);
+ mxc_register_gpio("imx31-gpio", 1, MX53_GPIO2_BASE_ADDR, SZ_16K, MX53_INT_GPIO2_LOW, MX53_INT_GPIO2_HIGH);
+ mxc_register_gpio("imx31-gpio", 2, MX53_GPIO3_BASE_ADDR, SZ_16K, MX53_INT_GPIO3_LOW, MX53_INT_GPIO3_HIGH);
+ mxc_register_gpio("imx31-gpio", 3, MX53_GPIO4_BASE_ADDR, SZ_16K, MX53_INT_GPIO4_LOW, MX53_INT_GPIO4_HIGH);
+ mxc_register_gpio("imx31-gpio", 4, MX53_GPIO5_BASE_ADDR, SZ_16K, MX53_INT_GPIO5_LOW, MX53_INT_GPIO5_HIGH);
+ mxc_register_gpio("imx31-gpio", 5, MX53_GPIO6_BASE_ADDR, SZ_16K, MX53_INT_GPIO6_LOW, MX53_INT_GPIO6_HIGH);
+ mxc_register_gpio("imx31-gpio", 6, MX53_GPIO7_BASE_ADDR, SZ_16K, MX53_INT_GPIO7_LOW, MX53_INT_GPIO7_HIGH);
}
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index 58e892376bf..6c38262a3aa 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,5 +1,5 @@
# Common support
-obj-y := clock.o devices.o gpio.o icoll.o iomux.o system.o timer.o
+obj-y := clock.o devices.o icoll.o iomux.o system.o timer.o
obj-$(CONFIG_MXS_OCOTP) += ocotp.o
obj-$(CONFIG_PM) += pm.o
diff --git a/arch/arm/mach-mxs/devices.c b/arch/arm/mach-mxs/devices.c
index cfdb6b28470..fe3e847930c 100644
--- a/arch/arm/mach-mxs/devices.c
+++ b/arch/arm/mach-mxs/devices.c
@@ -88,3 +88,14 @@ int __init mxs_add_amba_device(const struct amba_device *dev)
return amba_device_register(adev, &iomem_resource);
}
+
+struct device mxs_apbh_bus = {
+ .init_name = "mxs_apbh",
+ .parent = &platform_bus,
+};
+
+static int __init mxs_device_init(void)
+{
+ return device_register(&mxs_apbh_bus);
+}
+core_initcall(mxs_device_init);
diff --git a/arch/arm/mach-mxs/devices/Makefile b/arch/arm/mach-mxs/devices/Makefile
index 324f2824d38..351915c683f 100644
--- a/arch/arm/mach-mxs/devices/Makefile
+++ b/arch/arm/mach-mxs/devices/Makefile
@@ -6,4 +6,5 @@ obj-$(CONFIG_MXS_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_I2C) += platform-mxs-i2c.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_MMC) += platform-mxs-mmc.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXS_PWM) += platform-mxs-pwm.o
+obj-y += platform-gpio-mxs.o
obj-$(CONFIG_MXS_HAVE_PLATFORM_MXSFB) += platform-mxsfb.o
diff --git a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
new file mode 100644
index 00000000000..ed0885e414e
--- /dev/null
+++ b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/init.h>
+
+#include <mach/mx23.h>
+#include <mach/mx28.h>
+#include <mach/devices-common.h>
+
+struct platform_device *__init mxs_add_gpio(
+ int id, resource_size_t iobase, int irq)
+{
+ struct resource res[] = {
+ {
+ .start = iobase,
+ .end = iobase + SZ_8K - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = irq,
+ .end = irq,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ return platform_device_register_resndata(&mxs_apbh_bus,
+ "gpio-mxs", id, res, ARRAY_SIZE(res), NULL, 0);
+}
+
+static int __init mxs_add_mxs_gpio(void)
+{
+ if (cpu_is_mx23()) {
+ mxs_add_gpio(0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
+ mxs_add_gpio(1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
+ mxs_add_gpio(2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
+ }
+
+ if (cpu_is_mx28()) {
+ mxs_add_gpio(0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
+ mxs_add_gpio(1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
+ mxs_add_gpio(2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
+ mxs_add_gpio(3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
+ mxs_add_gpio(4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
+ }
+
+ return 0;
+}
+postcore_initcall(mxs_add_mxs_gpio);
diff --git a/arch/arm/mach-mxs/gpio.c b/arch/arm/mach-mxs/gpio.c
deleted file mode 100644
index 2c950fef71a..00000000000
--- a/arch/arm/mach-mxs/gpio.c
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * Based on code from Freescale,
- * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <mach/mx23.h>
-#include <mach/mx28.h>
-#include <asm-generic/bug.h>
-
-#include "gpio.h"
-
-static struct mxs_gpio_port *mxs_gpio_ports;
-static int gpio_table_size;
-
-#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
-#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
-#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
-#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
-#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
-#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
-#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
-#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
-
-#define GPIO_INT_FALL_EDGE 0x0
-#define GPIO_INT_LOW_LEV 0x1
-#define GPIO_INT_RISE_EDGE 0x2
-#define GPIO_INT_HIGH_LEV 0x3
-#define GPIO_INT_LEV_MASK (1 << 0)
-#define GPIO_INT_POL_MASK (1 << 1)
-
-/* Note: This driver assumes 32 GPIOs are handled in one register */
-
-static void clear_gpio_irqstatus(struct mxs_gpio_port *port, u32 index)
-{
- __mxs_clrl(1 << index, port->base + PINCTRL_IRQSTAT(port->id));
-}
-
-static void set_gpio_irqenable(struct mxs_gpio_port *port, u32 index,
- int enable)
-{
- if (enable) {
- __mxs_setl(1 << index, port->base + PINCTRL_IRQEN(port->id));
- __mxs_setl(1 << index, port->base + PINCTRL_PIN2IRQ(port->id));
- } else {
- __mxs_clrl(1 << index, port->base + PINCTRL_IRQEN(port->id));
- }
-}
-
-static void mxs_gpio_ack_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- clear_gpio_irqstatus(&mxs_gpio_ports[gpio / 32], gpio & 0x1f);
-}
-
-static void mxs_gpio_mask_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 0);
-}
-
-static void mxs_gpio_unmask_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- set_gpio_irqenable(&mxs_gpio_ports[gpio / 32], gpio & 0x1f, 1);
-}
-
-static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset);
-
-static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
-{
- u32 gpio = irq_to_gpio(d->irq);
- u32 pin_mask = 1 << (gpio & 31);
- struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32];
- void __iomem *pin_addr;
- int edge;
-
- switch (type) {
- case IRQ_TYPE_EDGE_RISING:
- edge = GPIO_INT_RISE_EDGE;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- edge = GPIO_INT_FALL_EDGE;
- break;
- case IRQ_TYPE_LEVEL_LOW:
- edge = GPIO_INT_LOW_LEV;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- edge = GPIO_INT_HIGH_LEV;
- break;
- default:
- return -EINVAL;
- }
-
- /* set level or edge */
- pin_addr = port->base + PINCTRL_IRQLEV(port->id);
- if (edge & GPIO_INT_LEV_MASK)
- __mxs_setl(pin_mask, pin_addr);
- else
- __mxs_clrl(pin_mask, pin_addr);
-
- /* set polarity */
- pin_addr = port->base + PINCTRL_IRQPOL(port->id);
- if (edge & GPIO_INT_POL_MASK)
- __mxs_setl(pin_mask, pin_addr);
- else
- __mxs_clrl(pin_mask, pin_addr);
-
- clear_gpio_irqstatus(port, gpio & 0x1f);
-
- return 0;
-}
-
-/* MXS has one interrupt *per* gpio port */
-static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
-{
- u32 irq_stat;
- struct mxs_gpio_port *port = (struct mxs_gpio_port *)irq_get_handler_data(irq);
- u32 gpio_irq_no_base = port->virtual_irq_start;
-
- desc->irq_data.chip->irq_ack(&desc->irq_data);
-
- irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) &
- __raw_readl(port->base + PINCTRL_IRQEN(port->id));
-
- while (irq_stat != 0) {
- int irqoffset = fls(irq_stat) - 1;
- generic_handle_irq(gpio_irq_no_base + irqoffset);
- irq_stat &= ~(1 << irqoffset);
- }
-}
-
-/*
- * Set interrupt number "irq" in the GPIO as a wake-up source.
- * While system is running, all registered GPIO interrupts need to have
- * wake-up enabled. When system is suspended, only selected GPIO interrupts
- * need to have wake-up enabled.
- * @param irq interrupt source number
- * @param enable enable as wake-up if equal to non-zero
- * @return This function returns 0 on success.
- */
-static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
-{
- u32 gpio = irq_to_gpio(d->irq);
- u32 gpio_idx = gpio & 0x1f;
- struct mxs_gpio_port *port = &mxs_gpio_ports[gpio / 32];
-
- if (enable) {
- if (port->irq_high && (gpio_idx >= 16))
- enable_irq_wake(port->irq_high);
- else
- enable_irq_wake(port->irq);
- } else {
- if (port->irq_high && (gpio_idx >= 16))
- disable_irq_wake(port->irq_high);
- else
- disable_irq_wake(port->irq);
- }
-
- return 0;
-}
-
-static struct irq_chip gpio_irq_chip = {
- .name = "mxs gpio",
- .irq_ack = mxs_gpio_ack_irq,
- .irq_mask = mxs_gpio_mask_irq,
- .irq_unmask = mxs_gpio_unmask_irq,
- .irq_set_type = mxs_gpio_set_irq_type,
- .irq_set_wake = mxs_gpio_set_wake_irq,
-};
-
-static void mxs_set_gpio_direction(struct gpio_chip *chip, unsigned offset,
- int dir)
-{
- struct mxs_gpio_port *port =
- container_of(chip, struct mxs_gpio_port, chip);
- void __iomem *pin_addr = port->base + PINCTRL_DOE(port->id);
-
- if (dir)
- __mxs_setl(1 << offset, pin_addr);
- else
- __mxs_clrl(1 << offset, pin_addr);
-}
-
-static int mxs_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct mxs_gpio_port *port =
- container_of(chip, struct mxs_gpio_port, chip);
-
- return (__raw_readl(port->base + PINCTRL_DIN(port->id)) >> offset) & 1;
-}
-
-static void mxs_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct mxs_gpio_port *port =
- container_of(chip, struct mxs_gpio_port, chip);
- void __iomem *pin_addr = port->base + PINCTRL_DOUT(port->id);
-
- if (value)
- __mxs_setl(1 << offset, pin_addr);
- else
- __mxs_clrl(1 << offset, pin_addr);
-}
-
-static int mxs_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
-{
- struct mxs_gpio_port *port =
- container_of(chip, struct mxs_gpio_port, chip);
-
- return port->virtual_irq_start + offset;
-}
-
-static int mxs_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- mxs_set_gpio_direction(chip, offset, 0);
- return 0;
-}
-
-static int mxs_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- mxs_gpio_set(chip, offset, value);
- mxs_set_gpio_direction(chip, offset, 1);
- return 0;
-}
-
-int __init mxs_gpio_init(struct mxs_gpio_port *port, int cnt)
-{
- int i, j;
-
- /* save for local usage */
- mxs_gpio_ports = port;
- gpio_table_size = cnt;
-
- pr_info("MXS GPIO hardware\n");
-
- for (i = 0; i < cnt; i++) {
- /* disable the interrupt and clear the status */
- __raw_writel(0, port[i].base + PINCTRL_PIN2IRQ(i));
- __raw_writel(0, port[i].base + PINCTRL_IRQEN(i));
-
- /* clear address has to be used to clear IRQSTAT bits */
- __mxs_clrl(~0U, port[i].base + PINCTRL_IRQSTAT(i));
-
- for (j = port[i].virtual_irq_start;
- j < port[i].virtual_irq_start + 32; j++) {
- irq_set_chip_and_handler(j, &gpio_irq_chip,
- handle_level_irq);
- set_irq_flags(j, IRQF_VALID);
- }
-
- /* setup one handler for each entry */
- irq_set_chained_handler(port[i].irq, mxs_gpio_irq_handler);
- irq_set_handler_data(port[i].irq, &port[i]);
-
- /* register gpio chip */
- port[i].chip.direction_input = mxs_gpio_direction_input;
- port[i].chip.direction_output = mxs_gpio_direction_output;
- port[i].chip.get = mxs_gpio_get;
- port[i].chip.set = mxs_gpio_set;
- port[i].chip.to_irq = mxs_gpio_to_irq;
- port[i].chip.base = i * 32;
- port[i].chip.ngpio = 32;
-
- /* its a serious configuration bug when it fails */
- BUG_ON(gpiochip_add(&port[i].chip) < 0);
- }
-
- return 0;
-}
-
-#define MX23_GPIO_BASE MX23_IO_ADDRESS(MX23_PINCTRL_BASE_ADDR)
-#define MX28_GPIO_BASE MX28_IO_ADDRESS(MX28_PINCTRL_BASE_ADDR)
-
-#define DEFINE_MXS_GPIO_PORT(_base, _irq, _id) \
- { \
- .chip.label = "gpio-" #_id, \
- .id = _id, \
- .irq = _irq, \
- .base = _base, \
- .virtual_irq_start = MXS_GPIO_IRQ_START + (_id) * 32, \
- }
-
-#ifdef CONFIG_SOC_IMX23
-static struct mxs_gpio_port mx23_gpio_ports[] = {
- DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO0, 0),
- DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO1, 1),
- DEFINE_MXS_GPIO_PORT(MX23_GPIO_BASE, MX23_INT_GPIO2, 2),
-};
-
-int __init mx23_register_gpios(void)
-{
- return mxs_gpio_init(mx23_gpio_ports, ARRAY_SIZE(mx23_gpio_ports));
-}
-#endif
-
-#ifdef CONFIG_SOC_IMX28
-static struct mxs_gpio_port mx28_gpio_ports[] = {
- DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO0, 0),
- DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO1, 1),
- DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO2, 2),
- DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO3, 3),
- DEFINE_MXS_GPIO_PORT(MX28_GPIO_BASE, MX28_INT_GPIO4, 4),
-};
-
-int __init mx28_register_gpios(void)
-{
- return mxs_gpio_init(mx28_gpio_ports, ARRAY_SIZE(mx28_gpio_ports));
-}
-#endif
diff --git a/arch/arm/mach-mxs/gpio.h b/arch/arm/mach-mxs/gpio.h
deleted file mode 100644
index 005bb06630b..00000000000
--- a/arch/arm/mach-mxs/gpio.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#ifndef __MXS_GPIO_H__
-#define __MXS_GPIO_H__
-
-struct mxs_gpio_port {
- void __iomem *base;
- int id;
- int irq;
- int irq_high;
- int virtual_irq_start;
- struct gpio_chip chip;
-};
-
-int mxs_gpio_init(struct mxs_gpio_port*, int);
-
-#endif /* __MXS_GPIO_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index 7a37469ed5b..812d7a813a7 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -11,6 +11,8 @@
#include <linux/init.h>
#include <linux/amba/bus.h>
+extern struct device mxs_apbh_bus;
+
struct platform_device *mxs_add_platform_device_dmamask(
const char *name, int id,
const struct resource *res, unsigned int num_resources,
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index eacdc6b0e70..56767a5cce0 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -26,7 +26,6 @@
#include <mach/iomux-mx28.h>
#include "devices-mx28.h"
-#include "gpio.h"
#define MX28EVK_FLEXCAN_SWITCH MXS_GPIO_NR(2, 13)
#define MX28EVK_FEC_PHY_POWER MXS_GPIO_NR(2, 15)
diff --git a/arch/arm/mach-mxs/mm-mx23.c b/arch/arm/mach-mxs/mm-mx23.c
index 5148cd64a6b..1b2345ac1a8 100644
--- a/arch/arm/mach-mxs/mm-mx23.c
+++ b/arch/arm/mach-mxs/mm-mx23.c
@@ -41,5 +41,4 @@ void __init mx23_map_io(void)
void __init mx23_init_irq(void)
{
icoll_init_irq();
- mx23_register_gpios();
}
diff --git a/arch/arm/mach-mxs/mm-mx28.c b/arch/arm/mach-mxs/mm-mx28.c
index 7e4cea32ebc..b6e18ddb92c 100644
--- a/arch/arm/mach-mxs/mm-mx28.c
+++ b/arch/arm/mach-mxs/mm-mx28.c
@@ -41,5 +41,4 @@ void __init mx28_map_io(void)
void __init mx28_init_irq(void)
{
icoll_init_irq();
- mx28_register_gpios();
}
diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c
index 364137c2042..399da4ce017 100644
--- a/arch/arm/mach-omap1/gpio15xx.c
+++ b/arch/arm/mach-omap1/gpio15xx.c
@@ -34,11 +34,22 @@ static struct __initdata resource omap15xx_mpu_gpio_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap15xx_mpuio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP_MPUIO_IO_CNTL,
+ .datain = OMAP_MPUIO_INPUT_LATCH,
+ .dataout = OMAP_MPUIO_OUTPUT,
+ .irqstatus = OMAP_MPUIO_GPIO_INT,
+ .irqenable = OMAP_MPUIO_GPIO_MASKIT,
+ .irqenable_inv = true,
+};
+
static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
.virtual_irq_start = IH_MPUIO_BASE,
.bank_type = METHOD_MPUIO,
.bank_width = 16,
.bank_stride = 1,
+ .regs = &omap15xx_mpuio_regs,
};
static struct platform_device omap15xx_mpu_gpio = {
@@ -64,10 +75,21 @@ static struct __initdata resource omap15xx_gpio_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap15xx_gpio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP1510_GPIO_DIR_CONTROL,
+ .datain = OMAP1510_GPIO_DATA_INPUT,
+ .dataout = OMAP1510_GPIO_DATA_OUTPUT,
+ .irqstatus = OMAP1510_GPIO_INT_STATUS,
+ .irqenable = OMAP1510_GPIO_INT_MASK,
+ .irqenable_inv = true,
+};
+
static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
.virtual_irq_start = IH_GPIO_BASE,
.bank_type = METHOD_GPIO_1510,
.bank_width = 16,
+ .regs = &omap15xx_gpio_regs,
};
static struct platform_device omap15xx_gpio = {
diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c
index 293a246e282..0f399bd0e70 100644
--- a/arch/arm/mach-omap1/gpio16xx.c
+++ b/arch/arm/mach-omap1/gpio16xx.c
@@ -37,11 +37,22 @@ static struct __initdata resource omap16xx_mpu_gpio_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap16xx_mpuio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP_MPUIO_IO_CNTL,
+ .datain = OMAP_MPUIO_INPUT_LATCH,
+ .dataout = OMAP_MPUIO_OUTPUT,
+ .irqstatus = OMAP_MPUIO_GPIO_INT,
+ .irqenable = OMAP_MPUIO_GPIO_MASKIT,
+ .irqenable_inv = true,
+};
+
static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = {
.virtual_irq_start = IH_MPUIO_BASE,
.bank_type = METHOD_MPUIO,
.bank_width = 16,
.bank_stride = 1,
+ .regs = &omap16xx_mpuio_regs,
};
static struct platform_device omap16xx_mpu_gpio = {
@@ -67,10 +78,24 @@ static struct __initdata resource omap16xx_gpio1_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap16xx_gpio_regs = {
+ .revision = OMAP1610_GPIO_REVISION,
+ .direction = OMAP1610_GPIO_DIRECTION,
+ .set_dataout = OMAP1610_GPIO_SET_DATAOUT,
+ .clr_dataout = OMAP1610_GPIO_CLEAR_DATAOUT,
+ .datain = OMAP1610_GPIO_DATAIN,
+ .dataout = OMAP1610_GPIO_DATAOUT,
+ .irqstatus = OMAP1610_GPIO_IRQSTATUS1,
+ .irqenable = OMAP1610_GPIO_IRQENABLE1,
+ .set_irqenable = OMAP1610_GPIO_SET_IRQENABLE1,
+ .clr_irqenable = OMAP1610_GPIO_CLEAR_IRQENABLE1,
+};
+
static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = {
.virtual_irq_start = IH_GPIO_BASE,
.bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio1 = {
@@ -100,6 +125,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = {
.virtual_irq_start = IH_GPIO_BASE + 16,
.bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio2 = {
@@ -129,6 +155,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = {
.virtual_irq_start = IH_GPIO_BASE + 32,
.bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio3 = {
@@ -158,6 +185,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = {
.virtual_irq_start = IH_GPIO_BASE + 48,
.bank_type = METHOD_GPIO_1610,
.bank_width = 16,
+ .regs = &omap16xx_gpio_regs,
};
static struct platform_device omap16xx_gpio4 = {
diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c
index c6ad248d63a..5ab63eab0ff 100644
--- a/arch/arm/mach-omap1/gpio7xx.c
+++ b/arch/arm/mach-omap1/gpio7xx.c
@@ -39,11 +39,22 @@ static struct __initdata resource omap7xx_mpu_gpio_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap7xx_mpuio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP_MPUIO_IO_CNTL / 2,
+ .datain = OMAP_MPUIO_INPUT_LATCH / 2,
+ .dataout = OMAP_MPUIO_OUTPUT / 2,
+ .irqstatus = OMAP_MPUIO_GPIO_INT / 2,
+ .irqenable = OMAP_MPUIO_GPIO_MASKIT / 2,
+ .irqenable_inv = true,
+};
+
static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = {
.virtual_irq_start = IH_MPUIO_BASE,
.bank_type = METHOD_MPUIO,
.bank_width = 32,
.bank_stride = 2,
+ .regs = &omap7xx_mpuio_regs,
};
static struct platform_device omap7xx_mpu_gpio = {
@@ -69,10 +80,21 @@ static struct __initdata resource omap7xx_gpio1_resources[] = {
},
};
+static struct omap_gpio_reg_offs omap7xx_gpio_regs = {
+ .revision = USHRT_MAX,
+ .direction = OMAP7XX_GPIO_DIR_CONTROL,
+ .datain = OMAP7XX_GPIO_DATA_INPUT,
+ .dataout = OMAP7XX_GPIO_DATA_OUTPUT,
+ .irqstatus = OMAP7XX_GPIO_INT_STATUS,
+ .irqenable = OMAP7XX_GPIO_INT_MASK,
+ .irqenable_inv = true,
+};
+
static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = {
.virtual_irq_start = IH_GPIO_BASE,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio1 = {
@@ -102,6 +124,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = {
.virtual_irq_start = IH_GPIO_BASE + 32,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio2 = {
@@ -131,6 +154,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = {
.virtual_irq_start = IH_GPIO_BASE + 64,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio3 = {
@@ -160,6 +184,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = {
.virtual_irq_start = IH_GPIO_BASE + 96,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio4 = {
@@ -189,6 +214,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = {
.virtual_irq_start = IH_GPIO_BASE + 128,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio5 = {
@@ -218,6 +244,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = {
.virtual_irq_start = IH_GPIO_BASE + 160,
.bank_type = METHOD_GPIO_7XX,
.bank_width = 32,
+ .regs = &omap7xx_gpio_regs,
};
static struct platform_device omap7xx_gpio6 = {
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index 334fb8871bc..943072d5a1d 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -32,7 +32,7 @@ static int omap1_pm_runtime_suspend(struct device *dev)
if (ret)
return ret;
- ret = pm_runtime_clk_suspend(dev);
+ ret = pm_clk_suspend(dev);
if (ret) {
pm_generic_runtime_resume(dev);
return ret;
@@ -45,24 +45,24 @@ static int omap1_pm_runtime_resume(struct device *dev)
{
dev_dbg(dev, "%s\n", __func__);
- pm_runtime_clk_resume(dev);
+ pm_clk_resume(dev);
return pm_generic_runtime_resume(dev);
}
-static struct dev_power_domain default_power_domain = {
+static struct dev_pm_domain default_pm_domain = {
.ops = {
.runtime_suspend = omap1_pm_runtime_suspend,
.runtime_resume = omap1_pm_runtime_resume,
USE_PLATFORM_PM_SLEEP_OPS
},
};
-#define OMAP1_PWR_DOMAIN (&default_power_domain)
+#define OMAP1_PM_DOMAIN (&default_pm_domain)
#else
-#define OMAP1_PWR_DOMAIN NULL
+#define OMAP1_PM_DOMAIN NULL
#endif /* CONFIG_PM_RUNTIME */
static struct pm_clk_notifier_block platform_bus_notifier = {
- .pwr_domain = OMAP1_PWR_DOMAIN,
+ .pm_domain = OMAP1_PM_DOMAIN,
.con_ids = { "ick", "fck", NULL, },
};
@@ -71,7 +71,7 @@ static int __init omap1_pm_runtime_init(void)
if (!cpu_class_is_omap1())
return -ENODEV;
- pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
+ pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
diff --git a/arch/arm/mach-omap2/gpio.c b/arch/arm/mach-omap2/gpio.c
index 9529842ae05..2765cdc3152 100644
--- a/arch/arm/mach-omap2/gpio.c
+++ b/arch/arm/mach-omap2/gpio.c
@@ -61,13 +61,45 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
pdata->dbck_flag = dev_attr->dbck_flag;
pdata->virtual_irq_start = IH_GPIO_BASE + 32 * (id - 1);
+ pdata->regs = kzalloc(sizeof(struct omap_gpio_reg_offs), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("gpio%d: Memory allocation failed\n", id);
+ return -ENOMEM;
+ }
+
switch (oh->class->rev) {
case 0:
case 1:
pdata->bank_type = METHOD_GPIO_24XX;
+ pdata->regs->revision = OMAP24XX_GPIO_REVISION;
+ pdata->regs->direction = OMAP24XX_GPIO_OE;
+ pdata->regs->datain = OMAP24XX_GPIO_DATAIN;
+ pdata->regs->dataout = OMAP24XX_GPIO_DATAOUT;
+ pdata->regs->set_dataout = OMAP24XX_GPIO_SETDATAOUT;
+ pdata->regs->clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT;
+ pdata->regs->irqstatus = OMAP24XX_GPIO_IRQSTATUS1;
+ pdata->regs->irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2;
+ pdata->regs->irqenable = OMAP24XX_GPIO_IRQENABLE1;
+ pdata->regs->set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1;
+ pdata->regs->clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1;
+ pdata->regs->debounce = OMAP24XX_GPIO_DEBOUNCE_VAL;
+ pdata->regs->debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN;
break;
case 2:
pdata->bank_type = METHOD_GPIO_44XX;
+ pdata->regs->revision = OMAP4_GPIO_REVISION;
+ pdata->regs->direction = OMAP4_GPIO_OE;
+ pdata->regs->datain = OMAP4_GPIO_DATAIN;
+ pdata->regs->dataout = OMAP4_GPIO_DATAOUT;
+ pdata->regs->set_dataout = OMAP4_GPIO_SETDATAOUT;
+ pdata->regs->clr_dataout = OMAP4_GPIO_CLEARDATAOUT;
+ pdata->regs->irqstatus = OMAP4_GPIO_IRQSTATUS0;
+ pdata->regs->irqstatus2 = OMAP4_GPIO_IRQSTATUS1;
+ pdata->regs->irqenable = OMAP4_GPIO_IRQSTATUSSET0;
+ pdata->regs->set_irqenable = OMAP4_GPIO_IRQSTATUSSET0;
+ pdata->regs->clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0;
+ pdata->regs->debounce = OMAP4_GPIO_DEBOUNCINGTIME;
+ pdata->regs->debounce_en = OMAP4_GPIO_DEBOUNCENABLE;
break;
default:
WARN(1, "Invalid gpio bank_type\n");
@@ -87,6 +119,8 @@ static int omap2_gpio_dev_init(struct omap_hwmod *oh, void *unused)
return PTR_ERR(od);
}
+ omap_device_disable_idle_on_suspend(od);
+
gpio_bank_count++;
return 0;
}
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 1ac361b7b8c..466fc722fa0 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -805,6 +805,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata)
WARN(IS_ERR(od), "Could not build omap_device for %s: %s.\n",
name, oh->name);
+ omap_device_disable_idle_on_suspend(od);
oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt);
uart->irq = oh->mpu_irqs[0].irq;
diff --git a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h b/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
deleted file mode 100644
index dcef2287cb3..00000000000
--- a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* arch/arm/mach-s3c2410/include/mach/spi-gpio.h
- *
- * Copyright (c) 2006 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- *
- * S3C2410 - SPI Controller platform_device info
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __ASM_ARCH_SPIGPIO_H
-#define __ASM_ARCH_SPIGPIO_H __FILE__
-
-struct s3c2410_spigpio_info {
- unsigned long pin_clk;
- unsigned long pin_mosi;
- unsigned long pin_miso;
-
- int num_chipselect;
- int bus_num;
-
- void (*chip_select)(struct s3c2410_spigpio_info *spi, int cs);
-};
-
-
-#endif /* __ASM_ARCH_SPIGPIO_H */
diff --git a/arch/arm/mach-s3c2410/mach-qt2410.c b/arch/arm/mach-s3c2410/mach-qt2410.c
index e8f49feef28..f44f77531b1 100644
--- a/arch/arm/mach-s3c2410/mach-qt2410.c
+++ b/arch/arm/mach-s3c2410/mach-qt2410.c
@@ -32,7 +32,7 @@
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/spi_gpio.h>
#include <linux/io.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
@@ -53,8 +53,6 @@
#include <mach/fb.h>
#include <plat/nand.h>
#include <plat/udc.h>
-#include <mach/spi.h>
-#include <mach/spi-gpio.h>
#include <plat/iic.h>
#include <plat/common-smdk.h>
@@ -216,32 +214,16 @@ static struct platform_device qt2410_led = {
/* SPI */
-static void spi_gpio_cs(struct s3c2410_spigpio_info *spi, int cs)
-{
- switch (cs) {
- case BITBANG_CS_ACTIVE:
- gpio_set_value(S3C2410_GPB(5), 0);
- break;
- case BITBANG_CS_INACTIVE:
- gpio_set_value(S3C2410_GPB(5), 1);
- break;
- }
-}
-
-static struct s3c2410_spigpio_info spi_gpio_cfg = {
- .pin_clk = S3C2410_GPG(7),
- .pin_mosi = S3C2410_GPG(6),
- .pin_miso = S3C2410_GPG(5),
- .chip_select = &spi_gpio_cs,
+static struct spi_gpio_platform_data spi_gpio_cfg = {
+ .sck = S3C2410_GPG(7),
+ .mosi = S3C2410_GPG(6),
+ .miso = S3C2410_GPG(5),
};
-
static struct platform_device qt2410_spi = {
- .name = "s3c24xx-spi-gpio",
- .id = 1,
- .dev = {
- .platform_data = &spi_gpio_cfg,
- },
+ .name = "spi-gpio",
+ .id = 1,
+ .dev.platform_data = &spi_gpio_cfg,
};
/* Board devices */
diff --git a/arch/arm/mach-s3c2412/mach-jive.c b/arch/arm/mach-s3c2412/mach-jive.c
index 85dcaeb9e62..5eeb47580b0 100644
--- a/arch/arm/mach-s3c2412/mach-jive.c
+++ b/arch/arm/mach-s3c2412/mach-jive.c
@@ -25,6 +25,7 @@
#include <video/ili9320.h>
#include <linux/spi/spi.h>
+#include <linux/spi/spi_gpio.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -38,7 +39,6 @@
#include <mach/regs-gpio.h>
#include <mach/regs-mem.h>
#include <mach/regs-lcd.h>
-#include <mach/spi-gpio.h>
#include <mach/fb.h>
#include <asm/mach-types.h>
@@ -389,45 +389,30 @@ static struct ili9320_platdata jive_lcm_config = {
/* LCD SPI support */
-static void jive_lcd_spi_chipselect(struct s3c2410_spigpio_info *spi, int cs)
-{
- gpio_set_value(S3C2410_GPB(7), cs ? 0 : 1);
-}
-
-static struct s3c2410_spigpio_info jive_lcd_spi = {
- .bus_num = 1,
- .pin_clk = S3C2410_GPG(8),
- .pin_mosi = S3C2410_GPB(8),
- .num_chipselect = 1,
- .chip_select = jive_lcd_spi_chipselect,
+static struct spi_gpio_platform_data jive_lcd_spi = {
+ .sck = S3C2410_GPG(8),
+ .mosi = S3C2410_GPB(8),
+ .miso = SPI_GPIO_NO_MISO,
};
static struct platform_device jive_device_lcdspi = {
- .name = "spi_s3c24xx_gpio",
+ .name = "spi-gpio",
.id = 1,
- .num_resources = 0,
.dev.platform_data = &jive_lcd_spi,
};
-/* WM8750 audio code SPI definition */
-static void jive_wm8750_chipselect(struct s3c2410_spigpio_info *spi, int cs)
-{
- gpio_set_value(S3C2410_GPH(10), cs ? 0 : 1);
-}
+/* WM8750 audio code SPI definition */
-static struct s3c2410_spigpio_info jive_wm8750_spi = {
- .bus_num = 2,
- .pin_clk = S3C2410_GPB(4),
- .pin_mosi = S3C2410_GPB(9),
- .num_chipselect = 1,
- .chip_select = jive_wm8750_chipselect,
+static struct spi_gpio_platform_data jive_wm8750_spi = {
+ .sck = S3C2410_GPB(4),
+ .mosi = S3C2410_GPB(9),
+ .miso = SPI_GPIO_NO_MISO,
};
static struct platform_device jive_device_wm8750 = {
- .name = "spi_s3c24xx_gpio",
+ .name = "spi-gpio",
.id = 2,
- .num_resources = 0,
.dev.platform_data = &jive_wm8750_spi,
};
@@ -441,12 +426,14 @@ static struct spi_board_info __initdata jive_spi_devs[] = {
.mode = SPI_MODE_3, /* CPOL=1, CPHA=1 */
.max_speed_hz = 100000,
.platform_data = &jive_lcm_config,
+ .controller_data = (void *)S3C2410_GPB(7),
}, {
.modalias = "WM8750",
.bus_num = 2,
.chip_select = 0,
.mode = SPI_MODE_0, /* CPOL=0, CPHA=0 */
.max_speed_hz = 100000,
+ .controller_data = (void *)S3C2410_GPH(10),
},
};
diff --git a/arch/arm/mach-s3c2440/mach-gta02.c b/arch/arm/mach-s3c2440/mach-gta02.c
index 716662008ce..c10ddf4ed7f 100644
--- a/arch/arm/mach-s3c2440/mach-gta02.c
+++ b/arch/arm/mach-s3c2440/mach-gta02.c
@@ -74,7 +74,6 @@
#include <mach/fb.h>
#include <mach/spi.h>
-#include <mach/spi-gpio.h>
#include <plat/usb-control.h>
#include <mach/regs-mem.h>
#include <mach/hardware.h>
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 803bc6edfca..b473b8efac6 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1408,9 +1408,14 @@ static void __init ap4evb_init(void)
platform_add_devices(ap4evb_devices, ARRAY_SIZE(ap4evb_devices));
+ sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc1_device);
+ sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
+
hdmi_init_pm_clock();
fsi_init_pm_clock();
sh7372_pm_init();
+ pm_clk_add(&fsi_device.dev, "spu2");
}
static void __init ap4evb_timer_init(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 3802f2afabe..5b36b6c5b44 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1582,8 +1582,13 @@ static void __init mackerel_init(void)
platform_add_devices(mackerel_devices, ARRAY_SIZE(mackerel_devices));
+ sh7372_add_device_to_domain(&sh7372_a4lc, &lcdc_device);
+ sh7372_add_device_to_domain(&sh7372_a4lc, &hdmi_lcdc_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &fsi_device);
+
hdmi_init_pm_clock();
sh7372_pm_init();
+ pm_clk_add(&fsi_device.dev, "spu2");
}
static void __init mackerel_timer_init(void)
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index c0800d83971..91f5779abdd 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -662,6 +662,7 @@ static struct clk_lookup lookups[] = {
CLKDEV_ICK_ID("ick", "sh-mobile-hdmi", &div6_reparent_clks[DIV6_HDMI]),
CLKDEV_ICK_ID("icka", "sh_fsi2", &div6_reparent_clks[DIV6_FSIA]),
CLKDEV_ICK_ID("ickb", "sh_fsi2", &div6_reparent_clks[DIV6_FSIB]),
+ CLKDEV_ICK_ID("spu2", "sh_fsi2", &mstp_clks[MSTP223]),
};
void __init sh7372_clock_init(void)
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index df20d767017..ce595cee86c 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -12,6 +12,7 @@
#define __ASM_SH7372_H__
#include <linux/sh_clk.h>
+#include <linux/pm_domain.h>
/*
* Pin Function Controller:
@@ -470,4 +471,32 @@ extern struct clk sh7372_fsibck_clk;
extern struct clk sh7372_fsidiva_clk;
extern struct clk sh7372_fsidivb_clk;
+struct platform_device;
+
+struct sh7372_pm_domain {
+ struct generic_pm_domain genpd;
+ unsigned int bit_shift;
+};
+
+static inline struct sh7372_pm_domain *to_sh7372_pd(struct generic_pm_domain *d)
+{
+ return container_of(d, struct sh7372_pm_domain, genpd);
+}
+
+#ifdef CONFIG_PM
+extern struct sh7372_pm_domain sh7372_a4lc;
+extern struct sh7372_pm_domain sh7372_a4mp;
+extern struct sh7372_pm_domain sh7372_d4;
+extern struct sh7372_pm_domain sh7372_a3rv;
+extern struct sh7372_pm_domain sh7372_a3ri;
+extern struct sh7372_pm_domain sh7372_a3sg;
+
+extern void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd);
+extern void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
+ struct platform_device *pdev);
+#else
+#define sh7372_init_pm_domain(pd) do { } while(0)
+#define sh7372_add_device_to_domain(pd, pdev) do { } while(0)
+#endif /* CONFIG_PM */
+
#endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-shmobile/pm-sh7372.c b/arch/arm/mach-shmobile/pm-sh7372.c
index 8e4aadf14c9..933fb411be0 100644
--- a/arch/arm/mach-shmobile/pm-sh7372.c
+++ b/arch/arm/mach-shmobile/pm-sh7372.c
@@ -15,16 +15,176 @@
#include <linux/list.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/tlbflush.h>
#include <mach/common.h>
+#include <mach/sh7372.h>
#define SMFRAM 0xe6a70000
#define SYSTBCR 0xe6150024
#define SBAR 0xe6180020
#define APARMBAREA 0xe6f10020
+#define SPDCR 0xe6180008
+#define SWUCR 0xe6180014
+#define PSTR 0xe6180080
+
+#define PSTR_RETRIES 100
+#define PSTR_DELAY_US 10
+
+#ifdef CONFIG_PM
+
+static int pd_power_down(struct generic_pm_domain *genpd)
+{
+ struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
+ unsigned int mask = 1 << sh7372_pd->bit_shift;
+
+ if (__raw_readl(PSTR) & mask) {
+ unsigned int retry_count;
+
+ __raw_writel(mask, SPDCR);
+
+ for (retry_count = PSTR_RETRIES; retry_count; retry_count--) {
+ if (!(__raw_readl(SPDCR) & mask))
+ break;
+ cpu_relax();
+ }
+ }
+
+ pr_debug("sh7372 power domain down 0x%08x -> PSTR = 0x%08x\n",
+ mask, __raw_readl(PSTR));
+
+ return 0;
+}
+
+static int pd_power_up(struct generic_pm_domain *genpd)
+{
+ struct sh7372_pm_domain *sh7372_pd = to_sh7372_pd(genpd);
+ unsigned int mask = 1 << sh7372_pd->bit_shift;
+ unsigned int retry_count;
+ int ret = 0;
+
+ if (__raw_readl(PSTR) & mask)
+ goto out;
+
+ __raw_writel(mask, SWUCR);
+
+ for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
+ if (!(__raw_readl(SWUCR) & mask))
+ goto out;
+ if (retry_count > PSTR_RETRIES)
+ udelay(PSTR_DELAY_US);
+ else
+ cpu_relax();
+ }
+ if (__raw_readl(SWUCR) & mask)
+ ret = -EIO;
+
+ out:
+ pr_debug("sh7372 power domain up 0x%08x -> PSTR = 0x%08x\n",
+ mask, __raw_readl(PSTR));
+
+ return ret;
+}
+
+static int pd_power_up_a3rv(struct generic_pm_domain *genpd)
+{
+ int ret = pd_power_up(genpd);
+
+ /* force A4LC on after A3RV has been requested on */
+ pm_genpd_poweron(&sh7372_a4lc.genpd);
+
+ return ret;
+}
+
+static int pd_power_down_a3rv(struct generic_pm_domain *genpd)
+{
+ int ret = pd_power_down(genpd);
+
+ /* try to power down A4LC after A3RV is requested off */
+ genpd_queue_power_off_work(&sh7372_a4lc.genpd);
+
+ return ret;
+}
+
+static int pd_power_down_a4lc(struct generic_pm_domain *genpd)
+{
+ /* only power down A4LC if A3RV is off */
+ if (!(__raw_readl(PSTR) & (1 << sh7372_a3rv.bit_shift)))
+ return pd_power_down(genpd);
+
+ return -EBUSY;
+}
+
+static bool pd_active_wakeup(struct device *dev)
+{
+ return true;
+}
+
+void sh7372_init_pm_domain(struct sh7372_pm_domain *sh7372_pd)
+{
+ struct generic_pm_domain *genpd = &sh7372_pd->genpd;
+
+ pm_genpd_init(genpd, NULL, false);
+ genpd->stop_device = pm_clk_suspend;
+ genpd->start_device = pm_clk_resume;
+ genpd->active_wakeup = pd_active_wakeup;
+
+ if (sh7372_pd == &sh7372_a4lc) {
+ genpd->power_off = pd_power_down_a4lc;
+ genpd->power_on = pd_power_up;
+ } else if (sh7372_pd == &sh7372_a3rv) {
+ genpd->power_off = pd_power_down_a3rv;
+ genpd->power_on = pd_power_up_a3rv;
+ } else {
+ genpd->power_off = pd_power_down;
+ genpd->power_on = pd_power_up;
+ }
+ genpd->power_on(&sh7372_pd->genpd);
+}
+
+void sh7372_add_device_to_domain(struct sh7372_pm_domain *sh7372_pd,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (!dev->power.subsys_data) {
+ pm_clk_init(dev);
+ pm_clk_add(dev, NULL);
+ }
+ pm_genpd_add_device(&sh7372_pd->genpd, dev);
+}
+
+struct sh7372_pm_domain sh7372_a4lc = {
+ .bit_shift = 1,
+};
+
+struct sh7372_pm_domain sh7372_a4mp = {
+ .bit_shift = 2,
+};
+
+struct sh7372_pm_domain sh7372_d4 = {
+ .bit_shift = 3,
+};
+
+struct sh7372_pm_domain sh7372_a3rv = {
+ .bit_shift = 6,
+};
+
+struct sh7372_pm_domain sh7372_a3ri = {
+ .bit_shift = 8,
+};
+
+struct sh7372_pm_domain sh7372_a3sg = {
+ .bit_shift = 13,
+};
+
+#endif /* CONFIG_PM */
+
static void sh7372_enter_core_standby(void)
{
void __iomem *smfram = (void __iomem *)SMFRAM;
diff --git a/arch/arm/mach-shmobile/pm_runtime.c b/arch/arm/mach-shmobile/pm_runtime.c
index 2d1b67a59e4..6ec454e1e06 100644
--- a/arch/arm/mach-shmobile/pm_runtime.c
+++ b/arch/arm/mach-shmobile/pm_runtime.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/sh_clk.h>
@@ -28,31 +29,38 @@ static int default_platform_runtime_idle(struct device *dev)
return pm_runtime_suspend(dev);
}
-static struct dev_power_domain default_power_domain = {
+static struct dev_pm_domain default_pm_domain = {
.ops = {
- .runtime_suspend = pm_runtime_clk_suspend,
- .runtime_resume = pm_runtime_clk_resume,
+ .runtime_suspend = pm_clk_suspend,
+ .runtime_resume = pm_clk_resume,
.runtime_idle = default_platform_runtime_idle,
USE_PLATFORM_PM_SLEEP_OPS
},
};
-#define DEFAULT_PWR_DOMAIN_PTR (&default_power_domain)
+#define DEFAULT_PM_DOMAIN_PTR (&default_pm_domain)
#else
-#define DEFAULT_PWR_DOMAIN_PTR NULL
+#define DEFAULT_PM_DOMAIN_PTR NULL
#endif /* CONFIG_PM_RUNTIME */
static struct pm_clk_notifier_block platform_bus_notifier = {
- .pwr_domain = DEFAULT_PWR_DOMAIN_PTR,
+ .pm_domain = DEFAULT_PM_DOMAIN_PTR,
.con_ids = { NULL, },
};
static int __init sh_pm_runtime_init(void)
{
- pm_runtime_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
+ pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier);
return 0;
}
core_initcall(sh_pm_runtime_init);
+
+static int __init sh_pm_runtime_late_init(void)
+{
+ pm_genpd_poweroff_unused();
+ return 0;
+}
+late_initcall(sh_pm_runtime_late_init);
diff --git a/arch/arm/mach-shmobile/setup-sh7372.c b/arch/arm/mach-shmobile/setup-sh7372.c
index cd807eea69e..79f0413d872 100644
--- a/arch/arm/mach-shmobile/setup-sh7372.c
+++ b/arch/arm/mach-shmobile/setup-sh7372.c
@@ -841,11 +841,22 @@ static struct platform_device *sh7372_late_devices[] __initdata = {
void __init sh7372_add_standard_devices(void)
{
+ sh7372_init_pm_domain(&sh7372_a4lc);
+ sh7372_init_pm_domain(&sh7372_a4mp);
+ sh7372_init_pm_domain(&sh7372_d4);
+ sh7372_init_pm_domain(&sh7372_a3rv);
+ sh7372_init_pm_domain(&sh7372_a3ri);
+ sh7372_init_pm_domain(&sh7372_a3sg);
+
platform_add_devices(sh7372_early_devices,
ARRAY_SIZE(sh7372_early_devices));
platform_add_devices(sh7372_late_devices,
ARRAY_SIZE(sh7372_late_devices));
+
+ sh7372_add_device_to_domain(&sh7372_a3rv, &vpu_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &spu0_device);
+ sh7372_add_device_to_domain(&sh7372_a4mp, &spu1_device);
}
void __init sh7372_add_early_devices(void)
diff --git a/arch/arm/mach-tegra/Makefile b/arch/arm/mach-tegra/Makefile
index 823c703e573..ed58ef9019b 100644
--- a/arch/arm/mach-tegra/Makefile
+++ b/arch/arm/mach-tegra/Makefile
@@ -4,7 +4,6 @@ obj-y += io.o
obj-y += irq.o
obj-y += clock.o
obj-y += timer.o
-obj-y += gpio.o
obj-y += pinmux.o
obj-y += powergate.o
obj-y += fuse.o
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index a1387875a49..d53c35fe2ea 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := clock.o gpio.o time.o devices.o cpu.o system.o irq-common.o
+obj-y := clock.o time.o devices.o cpu.o system.o irq-common.o
# MX51 uses the TZIC interrupt controller, older platforms use AVIC
obj-$(CONFIG_MXC_TZIC) += tzic.o
diff --git a/arch/arm/plat-mxc/devices.c b/arch/arm/plat-mxc/devices.c
index eee1b6096a0..fb166b20f60 100644
--- a/arch/arm/plat-mxc/devices.c
+++ b/arch/arm/plat-mxc/devices.c
@@ -89,3 +89,14 @@ err:
return pdev;
}
+
+struct device mxc_aips_bus = {
+ .init_name = "mxc_aips",
+ .parent = &platform_bus,
+};
+
+static int __init mxc_device_init(void)
+{
+ return device_register(&mxc_aips_bus);
+}
+core_initcall(mxc_device_init);
diff --git a/arch/arm/plat-mxc/devices/Makefile b/arch/arm/plat-mxc/devices/Makefile
index ad2922acf48..b41bf972b54 100644
--- a/arch/arm/plat-mxc/devices/Makefile
+++ b/arch/arm/plat-mxc/devices/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_IMX_HAVE_PLATFORM_FEC) += platform-fec.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_FLEXCAN) += platform-flexcan.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_FSL_USB2_UDC) += platform-fsl-usb2-udc.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_GPIO_KEYS) += platform-gpio_keys.o
+obj-y += platform-gpio-mxc.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX21_HCD) += platform-imx21-hcd.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMX2_WDT) += platform-imx2-wdt.o
obj-$(CONFIG_IMX_HAVE_PLATFORM_IMXDI_RTC) += platform-imxdi_rtc.o
diff --git a/arch/arm/plat-mxc/devices/platform-gpio-mxc.c b/arch/arm/plat-mxc/devices/platform-gpio-mxc.c
new file mode 100644
index 00000000000..a7919a24103
--- /dev/null
+++ b/arch/arm/plat-mxc/devices/platform-gpio-mxc.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2011 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <mach/devices-common.h>
+
+struct platform_device *__init mxc_register_gpio(char *name, int id,
+ resource_size_t iobase, resource_size_t iosize, int irq, int irq_high)
+{
+ struct resource res[] = {
+ {
+ .start = iobase,
+ .end = iobase + iosize - 1,
+ .flags = IORESOURCE_MEM,
+ }, {
+ .start = irq,
+ .end = irq,
+ .flags = IORESOURCE_IRQ,
+ }, {
+ .start = irq_high,
+ .end = irq_high,
+ .flags = IORESOURCE_IRQ,
+ },
+ };
+
+ return platform_device_register_resndata(&mxc_aips_bus,
+ name, id, res, ARRAY_SIZE(res), NULL, 0);
+}
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index f97eb3615b2..9bfae8bd5b8 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -40,9 +40,10 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
#endif
#ifdef CONFIG_SOC_IMX25
+/* i.mx25 has the i.mx35 type cspi */
const struct imx_spi_imx_data imx25_cspi_data[] __initconst = {
#define imx25_cspi_data_entry(_id, _hwid) \
- imx_spi_imx_data_entry(MX25, CSPI, "imx25-cspi", _id, _hwid, SZ_16K)
+ imx_spi_imx_data_entry(MX25, CSPI, "imx35-cspi", _id, _hwid, SZ_16K)
imx25_cspi_data_entry(0, 1),
imx25_cspi_data_entry(1, 2),
imx25_cspi_data_entry(2, 3),
@@ -79,8 +80,9 @@ const struct imx_spi_imx_data imx35_cspi_data[] __initconst = {
#endif /* ifdef CONFIG_SOC_IMX35 */
#ifdef CONFIG_SOC_IMX51
+/* i.mx51 has the i.mx35 type cspi */
const struct imx_spi_imx_data imx51_cspi_data __initconst =
- imx_spi_imx_data_entry_single(MX51, CSPI, "imx51-cspi", 2, , SZ_4K);
+ imx_spi_imx_data_entry_single(MX51, CSPI, "imx35-cspi", 2, , SZ_4K);
const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = {
#define imx51_ecspi_data_entry(_id, _hwid) \
@@ -91,12 +93,14 @@ const struct imx_spi_imx_data imx51_ecspi_data[] __initconst = {
#endif /* ifdef CONFIG_SOC_IMX51 */
#ifdef CONFIG_SOC_IMX53
+/* i.mx53 has the i.mx35 type cspi */
const struct imx_spi_imx_data imx53_cspi_data __initconst =
- imx_spi_imx_data_entry_single(MX53, CSPI, "imx53-cspi", 0, , SZ_4K);
+ imx_spi_imx_data_entry_single(MX53, CSPI, "imx35-cspi", 0, , SZ_4K);
+/* i.mx53 has the i.mx51 type ecspi */
const struct imx_spi_imx_data imx53_ecspi_data[] __initconst = {
#define imx53_ecspi_data_entry(_id, _hwid) \
- imx_spi_imx_data_entry(MX53, ECSPI, "imx53-ecspi", _id, _hwid, SZ_4K)
+ imx_spi_imx_data_entry(MX53, ECSPI, "imx51-ecspi", _id, _hwid, SZ_4K)
imx53_ecspi_data_entry(0, 1),
imx53_ecspi_data_entry(1, 2),
};
diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
deleted file mode 100644
index 6cd6d7f686f..00000000000
--- a/arch/arm/plat-mxc/gpio.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
- * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
- *
- * Based on code from Freescale,
- * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <mach/hardware.h>
-#include <asm-generic/bug.h>
-
-static struct mxc_gpio_port *mxc_gpio_ports;
-static int gpio_table_size;
-
-#define cpu_is_mx1_mx2() (cpu_is_mx1() || cpu_is_mx2())
-
-#define GPIO_DR (cpu_is_mx1_mx2() ? 0x1c : 0x00)
-#define GPIO_GDIR (cpu_is_mx1_mx2() ? 0x00 : 0x04)
-#define GPIO_PSR (cpu_is_mx1_mx2() ? 0x24 : 0x08)
-#define GPIO_ICR1 (cpu_is_mx1_mx2() ? 0x28 : 0x0C)
-#define GPIO_ICR2 (cpu_is_mx1_mx2() ? 0x2C : 0x10)
-#define GPIO_IMR (cpu_is_mx1_mx2() ? 0x30 : 0x14)
-#define GPIO_ISR (cpu_is_mx1_mx2() ? 0x34 : 0x18)
-
-#define GPIO_INT_LOW_LEV (cpu_is_mx1_mx2() ? 0x3 : 0x0)
-#define GPIO_INT_HIGH_LEV (cpu_is_mx1_mx2() ? 0x2 : 0x1)
-#define GPIO_INT_RISE_EDGE (cpu_is_mx1_mx2() ? 0x0 : 0x2)
-#define GPIO_INT_FALL_EDGE (cpu_is_mx1_mx2() ? 0x1 : 0x3)
-#define GPIO_INT_NONE 0x4
-
-/* Note: This driver assumes 32 GPIOs are handled in one register */
-
-static void _clear_gpio_irqstatus(struct mxc_gpio_port *port, u32 index)
-{
- __raw_writel(1 << index, port->base + GPIO_ISR);
-}
-
-static void _set_gpio_irqenable(struct mxc_gpio_port *port, u32 index,
- int enable)
-{
- u32 l;
-
- l = __raw_readl(port->base + GPIO_IMR);
- l = (l & (~(1 << index))) | (!!enable << index);
- __raw_writel(l, port->base + GPIO_IMR);
-}
-
-static void gpio_ack_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- _clear_gpio_irqstatus(&mxc_gpio_ports[gpio / 32], gpio & 0x1f);
-}
-
-static void gpio_mask_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 0);
-}
-
-static void gpio_unmask_irq(struct irq_data *d)
-{
- u32 gpio = irq_to_gpio(d->irq);
- _set_gpio_irqenable(&mxc_gpio_ports[gpio / 32], gpio & 0x1f, 1);
-}
-
-static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset);
-
-static int gpio_set_irq_type(struct irq_data *d, u32 type)
-{
- u32 gpio = irq_to_gpio(d->irq);
- struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
- u32 bit, val;
- int edge;
- void __iomem *reg = port->base;
-
- port->both_edges &= ~(1 << (gpio & 31));
- switch (type) {
- case IRQ_TYPE_EDGE_RISING:
- edge = GPIO_INT_RISE_EDGE;
- break;
- case IRQ_TYPE_EDGE_FALLING:
- edge = GPIO_INT_FALL_EDGE;
- break;
- case IRQ_TYPE_EDGE_BOTH:
- val = mxc_gpio_get(&port->chip, gpio & 31);
- if (val) {
- edge = GPIO_INT_LOW_LEV;
- pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
- } else {
- edge = GPIO_INT_HIGH_LEV;
- pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
- }
- port->both_edges |= 1 << (gpio & 31);
- break;
- case IRQ_TYPE_LEVEL_LOW:
- edge = GPIO_INT_LOW_LEV;
- break;
- case IRQ_TYPE_LEVEL_HIGH:
- edge = GPIO_INT_HIGH_LEV;
- break;
- default:
- return -EINVAL;
- }
-
- reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
- bit = gpio & 0xf;
- val = __raw_readl(reg) & ~(0x3 << (bit << 1));
- __raw_writel(val | (edge << (bit << 1)), reg);
- _clear_gpio_irqstatus(port, gpio & 0x1f);
-
- return 0;
-}
-
-static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
-{
- void __iomem *reg = port->base;
- u32 bit, val;
- int edge;
-
- reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
- bit = gpio & 0xf;
- val = __raw_readl(reg);
- edge = (val >> (bit << 1)) & 3;
- val &= ~(0x3 << (bit << 1));
- if (edge == GPIO_INT_HIGH_LEV) {
- edge = GPIO_INT_LOW_LEV;
- pr_debug("mxc: switch GPIO %d to low trigger\n", gpio);
- } else if (edge == GPIO_INT_LOW_LEV) {
- edge = GPIO_INT_HIGH_LEV;
- pr_debug("mxc: switch GPIO %d to high trigger\n", gpio);
- } else {
- pr_err("mxc: invalid configuration for GPIO %d: %x\n",
- gpio, edge);
- return;
- }
- __raw_writel(val | (edge << (bit << 1)), reg);
-}
-
-/* handle 32 interrupts in one status register */
-static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
-{
- u32 gpio_irq_no_base = port->virtual_irq_start;
-
- while (irq_stat != 0) {
- int irqoffset = fls(irq_stat) - 1;
-
- if (port->both_edges & (1 << irqoffset))
- mxc_flip_edge(port, irqoffset);
-
- generic_handle_irq(gpio_irq_no_base + irqoffset);
-
- irq_stat &= ~(1 << irqoffset);
- }
-}
-
-/* MX1 and MX3 has one interrupt *per* gpio port */
-static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
-{
- u32 irq_stat;
- struct mxc_gpio_port *port = irq_get_handler_data(irq);
-
- irq_stat = __raw_readl(port->base + GPIO_ISR) &
- __raw_readl(port->base + GPIO_IMR);
-
- mxc_gpio_irq_handler(port, irq_stat);
-}
-
-/* MX2 has one interrupt *for all* gpio ports */
-static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
-{
- int i;
- u32 irq_msk, irq_stat;
- struct mxc_gpio_port *port = irq_get_handler_data(irq);
-
- /* walk through all interrupt status registers */
- for (i = 0; i < gpio_table_size; i++) {
- irq_msk = __raw_readl(port[i].base + GPIO_IMR);
- if (!irq_msk)
- continue;
-
- irq_stat = __raw_readl(port[i].base + GPIO_ISR) & irq_msk;
- if (irq_stat)
- mxc_gpio_irq_handler(&port[i], irq_stat);
- }
-}
-
-/*
- * Set interrupt number "irq" in the GPIO as a wake-up source.
- * While system is running, all registered GPIO interrupts need to have
- * wake-up enabled. When system is suspended, only selected GPIO interrupts
- * need to have wake-up enabled.
- * @param irq interrupt source number
- * @param enable enable as wake-up if equal to non-zero
- * @return This function returns 0 on success.
- */
-static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
-{
- u32 gpio = irq_to_gpio(d->irq);
- u32 gpio_idx = gpio & 0x1F;
- struct mxc_gpio_port *port = &mxc_gpio_ports[gpio / 32];
-
- if (enable) {
- if (port->irq_high && (gpio_idx >= 16))
- enable_irq_wake(port->irq_high);
- else
- enable_irq_wake(port->irq);
- } else {
- if (port->irq_high && (gpio_idx >= 16))
- disable_irq_wake(port->irq_high);
- else
- disable_irq_wake(port->irq);
- }
-
- return 0;
-}
-
-static struct irq_chip gpio_irq_chip = {
- .name = "GPIO",
- .irq_ack = gpio_ack_irq,
- .irq_mask = gpio_mask_irq,
- .irq_unmask = gpio_unmask_irq,
- .irq_set_type = gpio_set_irq_type,
- .irq_set_wake = gpio_set_wake_irq,
-};
-
-static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
- int dir)
-{
- struct mxc_gpio_port *port =
- container_of(chip, struct mxc_gpio_port, chip);
- u32 l;
- unsigned long flags;
-
- spin_lock_irqsave(&port->lock, flags);
- l = __raw_readl(port->base + GPIO_GDIR);
- if (dir)
- l |= 1 << offset;
- else
- l &= ~(1 << offset);
- __raw_writel(l, port->base + GPIO_GDIR);
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct mxc_gpio_port *port =
- container_of(chip, struct mxc_gpio_port, chip);
- void __iomem *reg = port->base + GPIO_DR;
- u32 l;
- unsigned long flags;
-
- spin_lock_irqsave(&port->lock, flags);
- l = (__raw_readl(reg) & (~(1 << offset))) | (!!value << offset);
- __raw_writel(l, reg);
- spin_unlock_irqrestore(&port->lock, flags);
-}
-
-static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct mxc_gpio_port *port =
- container_of(chip, struct mxc_gpio_port, chip);
-
- return (__raw_readl(port->base + GPIO_PSR) >> offset) & 1;
-}
-
-static int mxc_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- _set_gpio_direction(chip, offset, 0);
- return 0;
-}
-
-static int mxc_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- mxc_gpio_set(chip, offset, value);
- _set_gpio_direction(chip, offset, 1);
- return 0;
-}
-
-/*
- * This lock class tells lockdep that GPIO irqs are in a different
- * category than their parents, so it won't report false recursion.
- */
-static struct lock_class_key gpio_lock_class;
-
-int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
-{
- int i, j;
-
- /* save for local usage */
- mxc_gpio_ports = port;
- gpio_table_size = cnt;
-
- printk(KERN_INFO "MXC GPIO hardware\n");
-
- for (i = 0; i < cnt; i++) {
- /* disable the interrupt and clear the status */
- __raw_writel(0, port[i].base + GPIO_IMR);
- __raw_writel(~0, port[i].base + GPIO_ISR);
- for (j = port[i].virtual_irq_start;
- j < port[i].virtual_irq_start + 32; j++) {
- irq_set_lockdep_class(j, &gpio_lock_class);
- irq_set_chip_and_handler(j, &gpio_irq_chip,
- handle_level_irq);
- set_irq_flags(j, IRQF_VALID);
- }
-
- /* register gpio chip */
- port[i].chip.direction_input = mxc_gpio_direction_input;
- port[i].chip.direction_output = mxc_gpio_direction_output;
- port[i].chip.get = mxc_gpio_get;
- port[i].chip.set = mxc_gpio_set;
- port[i].chip.base = i * 32;
- port[i].chip.ngpio = 32;
-
- spin_lock_init(&port[i].lock);
-
- /* its a serious configuration bug when it fails */
- BUG_ON( gpiochip_add(&port[i].chip) < 0 );
-
- if (cpu_is_mx1() || cpu_is_mx3() || cpu_is_mx25() || cpu_is_mx51()) {
- /* setup one handler for each entry */
- irq_set_chained_handler(port[i].irq,
- mx3_gpio_irq_handler);
- irq_set_handler_data(port[i].irq, &port[i]);
- if (port[i].irq_high) {
- /* setup handler for GPIO 16 to 31 */
- irq_set_chained_handler(port[i].irq_high,
- mx3_gpio_irq_handler);
- irq_set_handler_data(port[i].irq_high,
- &port[i]);
- }
- }
- }
-
- if (cpu_is_mx2()) {
- /* setup one handler for all GPIO interrupts */
- irq_set_chained_handler(port[0].irq, mx2_gpio_irq_handler);
- irq_set_handler_data(port[0].irq, port);
- }
-
- return 0;
-}
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index da7991832af..4e3d97890d6 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -43,6 +43,15 @@ extern void mx35_init_irq(void);
extern void mx50_init_irq(void);
extern void mx51_init_irq(void);
extern void mx53_init_irq(void);
+extern void imx1_soc_init(void);
+extern void imx21_soc_init(void);
+extern void imx25_soc_init(void);
+extern void imx27_soc_init(void);
+extern void imx31_soc_init(void);
+extern void imx35_soc_init(void);
+extern void imx50_soc_init(void);
+extern void imx51_soc_init(void);
+extern void imx53_soc_init(void);
extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
extern int mx1_clocks_init(unsigned long fref);
@@ -55,7 +64,8 @@ extern int mx51_clocks_init(unsigned long ckil, unsigned long osc,
unsigned long ckih1, unsigned long ckih2);
extern int mx53_clocks_init(unsigned long ckil, unsigned long osc,
unsigned long ckih1, unsigned long ckih2);
-extern int mxc_register_gpios(void);
+extern struct platform_device *mxc_register_gpio(char *name, int id,
+ resource_size_t iobase, resource_size_t iosize, int irq, int irq_high);
extern int mxc_register_device(struct platform_device *pdev, void *data);
extern void mxc_set_cpu_type(unsigned int type);
extern void mxc_arch_reset_init(void __iomem *);
diff --git a/arch/arm/plat-mxc/include/mach/devices-common.h b/arch/arm/plat-mxc/include/mach/devices-common.h
index fa8477337f9..03f62664537 100644
--- a/arch/arm/plat-mxc/include/mach/devices-common.h
+++ b/arch/arm/plat-mxc/include/mach/devices-common.h
@@ -10,6 +10,8 @@
#include <linux/platform_device.h>
#include <linux/init.h>
+extern struct device mxc_aips_bus;
+
struct platform_device *imx_add_platform_device_dmamask(
const char *name, int id,
const struct resource *res, unsigned int num_resources,
diff --git a/arch/arm/plat-mxc/include/mach/gpio.h b/arch/arm/plat-mxc/include/mach/gpio.h
index a2747f12813..31c820c1b79 100644
--- a/arch/arm/plat-mxc/include/mach/gpio.h
+++ b/arch/arm/plat-mxc/include/mach/gpio.h
@@ -36,31 +36,4 @@
#define gpio_to_irq(gpio) (MXC_GPIO_IRQ_START + (gpio))
#define irq_to_gpio(irq) ((irq) - MXC_GPIO_IRQ_START)
-struct mxc_gpio_port {
- void __iomem *base;
- int irq;
- int irq_high;
- int virtual_irq_start;
- struct gpio_chip chip;
- u32 both_edges;
- spinlock_t lock;
-};
-
-#define DEFINE_IMX_GPIO_PORT_IRQ_HIGH(soc, _id, _hwid, _irq, _irq_high) \
- { \
- .chip.label = "gpio-" #_id, \
- .irq = _irq, \
- .irq_high = _irq_high, \
- .base = soc ## _IO_ADDRESS( \
- soc ## _GPIO ## _hwid ## _BASE_ADDR), \
- .virtual_irq_start = MXC_GPIO_IRQ_START + (_id) * 32, \
- }
-
-#define DEFINE_IMX_GPIO_PORT_IRQ(soc, _id, _hwid, _irq) \
- DEFINE_IMX_GPIO_PORT_IRQ_HIGH(soc, _id, _hwid, _irq, 0)
-#define DEFINE_IMX_GPIO_PORT(soc, _id, _hwid) \
- DEFINE_IMX_GPIO_PORT_IRQ(soc, _id, _hwid, 0)
-
-int mxc_gpio_init(struct mxc_gpio_port*, int);
-
#endif
diff --git a/arch/arm/plat-mxc/include/mach/irqs.h b/arch/arm/plat-mxc/include/mach/irqs.h
index 35c89bcdf75..00e812bbd81 100644
--- a/arch/arm/plat-mxc/include/mach/irqs.h
+++ b/arch/arm/plat-mxc/include/mach/irqs.h
@@ -11,6 +11,8 @@
#ifndef __ASM_ARCH_MXC_IRQS_H__
#define __ASM_ARCH_MXC_IRQS_H__
+#include <asm-generic/gpio.h>
+
/*
* SoCs with TZIC interrupt controller have 128 IRQs, those with AVIC have 64
*/
@@ -22,30 +24,13 @@
#define MXC_GPIO_IRQ_START MXC_INTERNAL_IRQS
-/* these are ordered by size to support multi-SoC kernels */
-#if defined CONFIG_SOC_IMX53
-#define MXC_GPIO_IRQS (32 * 7)
-#elif defined CONFIG_ARCH_MX2
-#define MXC_GPIO_IRQS (32 * 6)
-#elif defined CONFIG_SOC_IMX50
-#define MXC_GPIO_IRQS (32 * 6)
-#elif defined CONFIG_ARCH_MX1
-#define MXC_GPIO_IRQS (32 * 4)
-#elif defined CONFIG_ARCH_MX25
-#define MXC_GPIO_IRQS (32 * 4)
-#elif defined CONFIG_SOC_IMX51
-#define MXC_GPIO_IRQS (32 * 4)
-#elif defined CONFIG_ARCH_MX3
-#define MXC_GPIO_IRQS (32 * 3)
-#endif
-
/*
* The next 16 interrupts are for board specific purposes. Since
* the kernel can only run on one machine at a time, we can re-use
* these. If you need more, increase MXC_BOARD_IRQS, but keep it
* within sensible limits.
*/
-#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + MXC_GPIO_IRQS)
+#define MXC_BOARD_IRQ_START (MXC_INTERNAL_IRQS + ARCH_NR_GPIOS)
#ifdef CONFIG_MACH_MX31ADS_WM1133_EV1
#define MXC_BOARD_IRQS 80
diff --git a/arch/arm/plat-omap/include/plat/gpio.h b/arch/arm/plat-omap/include/plat/gpio.h
index ec97e00cb58..91e8de3db08 100644
--- a/arch/arm/plat-omap/include/plat/gpio.h
+++ b/arch/arm/plat-omap/include/plat/gpio.h
@@ -174,12 +174,32 @@ struct omap_gpio_dev_attr {
bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
};
+struct omap_gpio_reg_offs {
+ u16 revision;
+ u16 direction;
+ u16 datain;
+ u16 dataout;
+ u16 set_dataout;
+ u16 clr_dataout;
+ u16 irqstatus;
+ u16 irqstatus2;
+ u16 irqenable;
+ u16 set_irqenable;
+ u16 clr_irqenable;
+ u16 debounce;
+ u16 debounce_en;
+
+ bool irqenable_inv;
+};
+
struct omap_gpio_platform_data {
u16 virtual_irq_start;
int bank_type;
int bank_width; /* GPIO bank width */
int bank_stride; /* Only needed for omap1 MPUIO */
bool dbck_flag; /* dbck required or not - True for OMAP3&4 */
+
+ struct omap_gpio_reg_offs *regs;
};
/* TODO: Analyze removing gpio_bank_count usage from driver code */
diff --git a/arch/arm/plat-omap/include/plat/omap_device.h b/arch/arm/plat-omap/include/plat/omap_device.h
index e4c349ff9fd..ee405b36df4 100644
--- a/arch/arm/plat-omap/include/plat/omap_device.h
+++ b/arch/arm/plat-omap/include/plat/omap_device.h
@@ -44,6 +44,10 @@ extern struct device omap_device_parent;
#define OMAP_DEVICE_STATE_IDLE 2
#define OMAP_DEVICE_STATE_SHUTDOWN 3
+/* omap_device.flags values */
+#define OMAP_DEVICE_SUSPENDED BIT(0)
+#define OMAP_DEVICE_NO_IDLE_ON_SUSPEND BIT(1)
+
/**
* struct omap_device - omap_device wrapper for platform_devices
* @pdev: platform_device
@@ -73,6 +77,7 @@ struct omap_device {
s8 pm_lat_level;
u8 hwmods_cnt;
u8 _state;
+ u8 flags;
};
/* Device driver interface (call via platform_data fn ptrs) */
@@ -117,6 +122,10 @@ int omap_device_enable_hwmods(struct omap_device *od);
int omap_device_disable_clocks(struct omap_device *od);
int omap_device_enable_clocks(struct omap_device *od);
+static inline void omap_device_disable_idle_on_suspend(struct omap_device *od)
+{
+ od->flags |= OMAP_DEVICE_NO_IDLE_ON_SUSPEND;
+}
/*
* Entries should be kept in latency order ascending
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index 49fc0df0c21..2526fa312b8 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -537,6 +537,7 @@ int omap_early_device_register(struct omap_device *od)
return 0;
}
+#ifdef CONFIG_PM_RUNTIME
static int _od_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -563,13 +564,55 @@ static int _od_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
+#endif
-static struct dev_power_domain omap_device_power_domain = {
+#ifdef CONFIG_SUSPEND
+static int _od_suspend_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_device *od = to_omap_device(pdev);
+ int ret;
+
+ if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND)
+ return pm_generic_suspend_noirq(dev);
+
+ ret = pm_generic_suspend_noirq(dev);
+
+ if (!ret && !pm_runtime_status_suspended(dev)) {
+ if (pm_generic_runtime_suspend(dev) == 0) {
+ omap_device_idle(pdev);
+ od->flags |= OMAP_DEVICE_SUSPENDED;
+ }
+ }
+
+ return ret;
+}
+
+static int _od_resume_noirq(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct omap_device *od = to_omap_device(pdev);
+
+ if (od->flags & OMAP_DEVICE_NO_IDLE_ON_SUSPEND)
+ return pm_generic_resume_noirq(dev);
+
+ if ((od->flags & OMAP_DEVICE_SUSPENDED) &&
+ !pm_runtime_status_suspended(dev)) {
+ od->flags &= ~OMAP_DEVICE_SUSPENDED;
+ omap_device_enable(pdev);
+ pm_generic_runtime_resume(dev);
+ }
+
+ return pm_generic_resume_noirq(dev);
+}
+#endif
+
+static struct dev_pm_domain omap_device_pm_domain = {
.ops = {
- .runtime_suspend = _od_runtime_suspend,
- .runtime_idle = _od_runtime_idle,
- .runtime_resume = _od_runtime_resume,
+ SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
+ _od_runtime_idle)
USE_PLATFORM_PM_SLEEP_OPS
+ SET_SYSTEM_SLEEP_PM_OPS(_od_suspend_noirq, _od_resume_noirq)
}
};
@@ -586,7 +629,7 @@ int omap_device_register(struct omap_device *od)
pr_debug("omap_device: %s: registering\n", od->pdev.name);
od->pdev.dev.parent = &omap_device_parent;
- od->pdev.dev.pwr_domain = &omap_device_power_domain;
+ od->pdev.dev.pm_domain = &omap_device_pm_domain;
return platform_device_register(&od->pdev);
}
diff --git a/arch/arm/plat-pxa/include/plat/sdhci.h b/arch/arm/plat-pxa/include/plat/sdhci.h
deleted file mode 100644
index 1ab332e37d7..00000000000
--- a/arch/arm/plat-pxa/include/plat/sdhci.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* linux/arch/arm/plat-pxa/include/plat/sdhci.h
- *
- * Copyright 2010 Marvell
- * Zhangfei Gao <zhangfei.gao@marvell.com>
- *
- * PXA Platform - SDHCI platform data definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __PLAT_PXA_SDHCI_H
-#define __PLAT_PXA_SDHCI_H
-
-/* pxa specific flag */
-/* Require clock free running */
-#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0)
-
-/* Board design supports 8-bit data on SD/SDIO BUS */
-#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2)
-
-/*
- * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI
- * @max_speed: the maximum speed supported
- * @quirks: quirks of specific device
- * @flags: flags for platform requirement
- */
-struct sdhci_pxa_platdata {
- unsigned int max_speed;
- unsigned int quirks;
- unsigned int flags;
-};
-
-#endif /* __PLAT_PXA_SDHCI_H */
diff --git a/arch/microblaze/include/asm/pci-bridge.h b/arch/microblaze/include/asm/pci-bridge.h
index 746df91e579..242be57a319 100644
--- a/arch/microblaze/include/asm/pci-bridge.h
+++ b/arch/microblaze/include/asm/pci-bridge.h
@@ -19,9 +19,6 @@ enum {
*/
PCI_REASSIGN_ALL_RSRC = 0x00000001,
- /* Re-assign all bus numbers */
- PCI_REASSIGN_ALL_BUS = 0x00000002,
-
/* Do not try to assign, just use existing setup */
PCI_PROBE_ONLY = 0x00000004,
@@ -110,16 +107,6 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
return bus->sysdata;
}
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- struct pci_controller *host;
-
- if (bus->self)
- return pci_device_to_OF_node(bus->self);
- host = pci_bus_to_host(bus);
- return host ? host->dn : NULL;
-}
-
static inline int isa_vaddr_is_ioport(void __iomem *address)
{
/* No specific ISA handling on ppc32 at this stage, it
diff --git a/arch/microblaze/include/asm/pci.h b/arch/microblaze/include/asm/pci.h
index ba65cf47254..1dd9d6b1e27 100644
--- a/arch/microblaze/include/asm/pci.h
+++ b/arch/microblaze/include/asm/pci.h
@@ -40,8 +40,7 @@ struct pci_dev;
* Set this to 1 if you want the kernel to re-assign all PCI
* bus numbers (don't do that on ppc64 yet !)
*/
-#define pcibios_assign_all_busses() \
- (pci_has_flag(PCI_REASSIGN_ALL_BUS))
+#define pcibios_assign_all_busses() 0
static inline void pcibios_set_master(struct pci_dev *dev)
{
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index d0890d36ef6..9bd01ecb00d 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -29,21 +29,6 @@
extern int early_uartlite_console(void);
extern int early_uart16550_console(void);
-#ifdef CONFIG_PCI
-/*
- * PCI <-> OF matching functions
- * (XXX should these be here?)
- */
-struct pci_bus;
-struct pci_dev;
-extern int pci_device_from_OF_node(struct device_node *node,
- u8 *bus, u8 *devfn);
-extern struct device_node *pci_busdev_to_OF_node(struct pci_bus *bus,
- int devfn);
-extern struct device_node *pci_device_to_OF_node(struct pci_dev *dev);
-extern void pci_create_OF_bus_map(void);
-#endif
-
/*
* OF address retreival & translation
*/
diff --git a/arch/microblaze/pci/Makefile b/arch/microblaze/pci/Makefile
index 9889cc2e129..d1114fbd478 100644
--- a/arch/microblaze/pci/Makefile
+++ b/arch/microblaze/pci/Makefile
@@ -2,5 +2,5 @@
# Makefile
#
-obj-$(CONFIG_PCI) += pci_32.o pci-common.o indirect_pci.o iomap.o
+obj-$(CONFIG_PCI) += pci-common.o indirect_pci.o iomap.o
obj-$(CONFIG_PCI_XILINX) += xilinx_pci.o
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 53599067d2f..041b1d86d75 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -50,6 +50,11 @@ unsigned int pci_flags;
static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
+unsigned long isa_io_base;
+unsigned long pci_dram_offset;
+static int pci_bus_count;
+
+
void set_pci_dma_ops(struct dma_map_ops *dma_ops)
{
pci_dma_ops = dma_ops;
@@ -1558,6 +1563,112 @@ void __devinit pcibios_setup_phb_resources(struct pci_controller *hose)
(unsigned long)hose->io_base_virt - _IO_BASE);
}
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return of_node_get(hose->dn);
+}
+
+static void __devinit pcibios_scan_phb(struct pci_controller *hose)
+{
+ struct pci_bus *bus;
+ struct device_node *node = hose->dn;
+ unsigned long io_offset;
+ struct resource *res = &hose->io_resource;
+
+ pr_debug("PCI: Scanning PHB %s\n",
+ node ? node->full_name : "<NO NAME>");
+
+ /* Create an empty bus for the toplevel */
+ bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
+ if (bus == NULL) {
+ printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
+ hose->global_number);
+ return;
+ }
+ bus->secondary = hose->first_busno;
+ hose->bus = bus;
+
+ /* Fixup IO space offset */
+ io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
+ res->start = (res->start + io_offset) & 0xffffffffu;
+ res->end = (res->end + io_offset) & 0xffffffffu;
+
+ /* Wire up PHB bus resources */
+ pcibios_setup_phb_resources(hose);
+
+ /* Scan children */
+ hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
+}
+
+static int __init pcibios_init(void)
+{
+ struct pci_controller *hose, *tmp;
+ int next_busno = 0;
+
+ printk(KERN_INFO "PCI: Probing PCI hardware\n");
+
+ /* Scan all of the recorded PCI controllers. */
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ hose->last_busno = 0xff;
+ pcibios_scan_phb(hose);
+ printk(KERN_INFO "calling pci_bus_add_devices()\n");
+ pci_bus_add_devices(hose->bus);
+ if (next_busno <= hose->last_busno)
+ next_busno = hose->last_busno + 1;
+ }
+ pci_bus_count = next_busno;
+
+ /* Call common code to handle resource allocation */
+ pcibios_resource_survey();
+
+ return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+static struct pci_controller *pci_bus_to_hose(int bus)
+{
+ struct pci_controller *hose, *tmp;
+
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
+ if (bus >= hose->first_busno && bus <= hose->last_busno)
+ return hose;
+ return NULL;
+}
+
+/* Provide information on locations of various I/O regions in physical
+ * memory. Do this on a per-card basis so that we choose the right
+ * root bridge.
+ * Note that the returned IO or memory base is a physical address
+ */
+
+long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
+{
+ struct pci_controller *hose;
+ long result = -EOPNOTSUPP;
+
+ hose = pci_bus_to_hose(bus);
+ if (!hose)
+ return -ENODEV;
+
+ switch (which) {
+ case IOBASE_BRIDGE_NUMBER:
+ return (long)hose->first_busno;
+ case IOBASE_MEMORY:
+ return (long)hose->pci_mem_offset;
+ case IOBASE_IO:
+ return (long)hose->io_base_phys;
+ case IOBASE_ISA_IO:
+ return (long)isa_io_base;
+ case IOBASE_ISA_MEM:
+ return (long)isa_mem_base;
+ }
+
+ return result;
+}
+
/*
* Null PCI config access functions, for the case when we can't
* find a hose.
@@ -1626,3 +1737,4 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn,
{
return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
}
+
diff --git a/arch/microblaze/pci/pci_32.c b/arch/microblaze/pci/pci_32.c
deleted file mode 100644
index 92728a6cfd8..00000000000
--- a/arch/microblaze/pci/pci_32.c
+++ /dev/null
@@ -1,432 +0,0 @@
-/*
- * Common pmac/prep/chrp pci routines. -- Cort
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/capability.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/bootmem.h>
-#include <linux/irq.h>
-#include <linux/list.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/sections.h>
-#include <asm/pci-bridge.h>
-#include <asm/byteorder.h>
-#include <asm/uaccess.h>
-
-#undef DEBUG
-
-unsigned long isa_io_base;
-unsigned long pci_dram_offset;
-int pcibios_assign_bus_offset = 1;
-
-static u8 *pci_to_OF_bus_map;
-
-/* By default, we don't re-assign bus numbers. We do this only on
- * some pmacs
- */
-static int pci_assign_all_buses;
-
-static int pci_bus_count;
-
-/*
- * Functions below are used on OpenFirmware machines.
- */
-static void
-make_one_node_map(struct device_node *node, u8 pci_bus)
-{
- const int *bus_range;
- int len;
-
- if (pci_bus >= pci_bus_count)
- return;
- bus_range = of_get_property(node, "bus-range", &len);
- if (bus_range == NULL || len < 2 * sizeof(int)) {
- printk(KERN_WARNING "Can't get bus-range for %s, "
- "assuming it starts at 0\n", node->full_name);
- pci_to_OF_bus_map[pci_bus] = 0;
- } else
- pci_to_OF_bus_map[pci_bus] = bus_range[0];
-
- for_each_child_of_node(node, node) {
- struct pci_dev *dev;
- const unsigned int *class_code, *reg;
-
- class_code = of_get_property(node, "class-code", NULL);
- if (!class_code ||
- ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
- (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS))
- continue;
- reg = of_get_property(node, "reg", NULL);
- if (!reg)
- continue;
- dev = pci_get_bus_and_slot(pci_bus, ((reg[0] >> 8) & 0xff));
- if (!dev || !dev->subordinate) {
- pci_dev_put(dev);
- continue;
- }
- make_one_node_map(node, dev->subordinate->number);
- pci_dev_put(dev);
- }
-}
-
-void
-pcibios_make_OF_bus_map(void)
-{
- int i;
- struct pci_controller *hose, *tmp;
- struct property *map_prop;
- struct device_node *dn;
-
- pci_to_OF_bus_map = kmalloc(pci_bus_count, GFP_KERNEL);
- if (!pci_to_OF_bus_map) {
- printk(KERN_ERR "Can't allocate OF bus map !\n");
- return;
- }
-
- /* We fill the bus map with invalid values, that helps
- * debugging.
- */
- for (i = 0; i < pci_bus_count; i++)
- pci_to_OF_bus_map[i] = 0xff;
-
- /* For each hose, we begin searching bridges */
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- struct device_node *node = hose->dn;
-
- if (!node)
- continue;
- make_one_node_map(node, hose->first_busno);
- }
- dn = of_find_node_by_path("/");
- map_prop = of_find_property(dn, "pci-OF-bus-map", NULL);
- if (map_prop) {
- BUG_ON(pci_bus_count > map_prop->length);
- memcpy(map_prop->value, pci_to_OF_bus_map, pci_bus_count);
- }
- of_node_put(dn);
-#ifdef DEBUG
- printk(KERN_INFO "PCI->OF bus map:\n");
- for (i = 0; i < pci_bus_count; i++) {
- if (pci_to_OF_bus_map[i] == 0xff)
- continue;
- printk(KERN_INFO "%d -> %d\n", i, pci_to_OF_bus_map[i]);
- }
-#endif
-}
-
-typedef int (*pci_OF_scan_iterator)(struct device_node *node, void *data);
-
-static struct device_node *scan_OF_pci_childs(struct device_node *parent,
- pci_OF_scan_iterator filter, void *data)
-{
- struct device_node *node;
- struct device_node *sub_node;
-
- for_each_child_of_node(parent, node) {
- const unsigned int *class_code;
-
- if (filter(node, data)) {
- of_node_put(node);
- return node;
- }
-
- /* For PCI<->PCI bridges or CardBus bridges, we go down
- * Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well.
- */
- class_code = of_get_property(node, "class-code", NULL);
- if ((!class_code ||
- ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
- (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
- strcmp(node->name, "multifunc-device"))
- continue;
- sub_node = scan_OF_pci_childs(node, filter, data);
- if (sub_node) {
- of_node_put(node);
- return sub_node;
- }
- }
- return NULL;
-}
-
-static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
- unsigned int devfn)
-{
- struct device_node *np, *cnp;
- const u32 *reg;
- unsigned int psize;
-
- for_each_child_of_node(parent, np) {
- reg = of_get_property(np, "reg", &psize);
- if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
- return np;
-
- /* Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well. */
- if (!strcmp(np->name, "multifunc-device")) {
- cnp = scan_OF_for_pci_dev(np, devfn);
- if (cnp)
- return cnp;
- }
- }
- return NULL;
-}
-
-
-static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
-{
- struct device_node *parent, *np;
-
- /* Are we a root bus ? */
- if (bus->self == NULL || bus->parent == NULL) {
- struct pci_controller *hose = pci_bus_to_host(bus);
- if (hose == NULL)
- return NULL;
- return of_node_get(hose->dn);
- }
-
- /* not a root bus, we need to get our parent */
- parent = scan_OF_for_pci_bus(bus->parent);
- if (parent == NULL)
- return NULL;
-
- /* now iterate for children for a match */
- np = scan_OF_for_pci_dev(parent, bus->self->devfn);
- of_node_put(parent);
-
- return np;
-}
-
-/*
- * Scans the OF tree for a device node matching a PCI device
- */
-struct device_node *
-pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
-{
- struct device_node *parent, *np;
-
- pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
- parent = scan_OF_for_pci_bus(bus);
- if (parent == NULL)
- return NULL;
- pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
- np = scan_OF_for_pci_dev(parent, devfn);
- of_node_put(parent);
- pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
-
- /* XXX most callers don't release the returned node
- * mostly because ppc64 doesn't increase the refcount,
- * we need to fix that.
- */
- return np;
-}
-EXPORT_SYMBOL(pci_busdev_to_OF_node);
-
-struct device_node*
-pci_device_to_OF_node(struct pci_dev *dev)
-{
- return pci_busdev_to_OF_node(dev->bus, dev->devfn);
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
-static int
-find_OF_pci_device_filter(struct device_node *node, void *data)
-{
- return ((void *)node == data);
-}
-
-/*
- * Returns the PCI device matching a given OF node
- */
-int
-pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
-{
- const unsigned int *reg;
- struct pci_controller *hose;
- struct pci_dev *dev = NULL;
-
- /* Make sure it's really a PCI device */
- hose = pci_find_hose_for_OF_device(node);
- if (!hose || !hose->dn)
- return -ENODEV;
- if (!scan_OF_pci_childs(hose->dn,
- find_OF_pci_device_filter, (void *)node))
- return -ENODEV;
- reg = of_get_property(node, "reg", NULL);
- if (!reg)
- return -ENODEV;
- *bus = (reg[0] >> 16) & 0xff;
- *devfn = ((reg[0] >> 8) & 0xff);
-
- /* Ok, here we need some tweak. If we have already renumbered
- * all busses, we can't rely on the OF bus number any more.
- * the pci_to_OF_bus_map is not enough as several PCI busses
- * may match the same OF bus number.
- */
- if (!pci_to_OF_bus_map)
- return 0;
-
- for_each_pci_dev(dev)
- if (pci_to_OF_bus_map[dev->bus->number] == *bus &&
- dev->devfn == *devfn) {
- *bus = dev->bus->number;
- pci_dev_put(dev);
- return 0;
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL(pci_device_from_OF_node);
-
-/* We create the "pci-OF-bus-map" property now so it appears in the
- * /proc device tree
- */
-void __init
-pci_create_OF_bus_map(void)
-{
- struct property *of_prop;
- struct device_node *dn;
-
- of_prop = (struct property *) alloc_bootmem(sizeof(struct property) + \
- 256);
- if (!of_prop)
- return;
- dn = of_find_node_by_path("/");
- if (dn) {
- memset(of_prop, -1, sizeof(struct property) + 256);
- of_prop->name = "pci-OF-bus-map";
- of_prop->length = 256;
- of_prop->value = &of_prop[1];
- prom_add_property(dn, of_prop);
- of_node_put(dn);
- }
-}
-
-static void __devinit pcibios_scan_phb(struct pci_controller *hose)
-{
- struct pci_bus *bus;
- struct device_node *node = hose->dn;
- unsigned long io_offset;
- struct resource *res = &hose->io_resource;
-
- pr_debug("PCI: Scanning PHB %s\n",
- node ? node->full_name : "<NO NAME>");
-
- /* Create an empty bus for the toplevel */
- bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose);
- if (bus == NULL) {
- printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
- hose->global_number);
- return;
- }
- bus.dev->of_node = of_node_get(node);
- bus->secondary = hose->first_busno;
- hose->bus = bus;
-
- /* Fixup IO space offset */
- io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
- res->start = (res->start + io_offset) & 0xffffffffu;
- res->end = (res->end + io_offset) & 0xffffffffu;
-
- /* Wire up PHB bus resources */
- pcibios_setup_phb_resources(hose);
-
- /* Scan children */
- hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
-}
-
-static int __init pcibios_init(void)
-{
- struct pci_controller *hose, *tmp;
- int next_busno = 0;
-
- printk(KERN_INFO "PCI: Probing PCI hardware\n");
-
- if (pci_flags & PCI_REASSIGN_ALL_BUS) {
- printk(KERN_INFO "setting pci_asign_all_busses\n");
- pci_assign_all_buses = 1;
- }
-
- /* Scan all of the recorded PCI controllers. */
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
- if (pci_assign_all_buses)
- hose->first_busno = next_busno;
- hose->last_busno = 0xff;
- pcibios_scan_phb(hose);
- printk(KERN_INFO "calling pci_bus_add_devices()\n");
- pci_bus_add_devices(hose->bus);
- if (pci_assign_all_buses || next_busno <= hose->last_busno)
- next_busno = hose->last_busno + \
- pcibios_assign_bus_offset;
- }
- pci_bus_count = next_busno;
-
- /* OpenFirmware based machines need a map of OF bus
- * numbers vs. kernel bus numbers since we may have to
- * remap them.
- */
- if (pci_assign_all_buses)
- pcibios_make_OF_bus_map();
-
- /* Call common code to handle resource allocation */
- pcibios_resource_survey();
-
- return 0;
-}
-
-subsys_initcall(pcibios_init);
-
-static struct pci_controller*
-pci_bus_to_hose(int bus)
-{
- struct pci_controller *hose, *tmp;
-
- list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
- if (bus >= hose->first_busno && bus <= hose->last_busno)
- return hose;
- return NULL;
-}
-
-/* Provide information on locations of various I/O regions in physical
- * memory. Do this on a per-card basis so that we choose the right
- * root bridge.
- * Note that the returned IO or memory base is a physical address
- */
-
-long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
-{
- struct pci_controller *hose;
- long result = -EOPNOTSUPP;
-
- hose = pci_bus_to_hose(bus);
- if (!hose)
- return -ENODEV;
-
- switch (which) {
- case IOBASE_BRIDGE_NUMBER:
- return (long)hose->first_busno;
- case IOBASE_MEMORY:
- return (long)hose->pci_mem_offset;
- case IOBASE_IO:
- return (long)hose->io_base_phys;
- case IOBASE_ISA_IO:
- return (long)isa_io_base;
- case IOBASE_ISA_MEM:
- return (long)isa_mem_base;
- }
-
- return result;
-}
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index b90dbf8e5cd..90bd3ed4816 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -171,15 +171,9 @@ static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
#ifndef CONFIG_PPC64
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- struct pci_controller *host;
-
- if (bus->self)
- return pci_device_to_OF_node(bus->self);
- host = pci_bus_to_host(bus);
- return host ? host->dn : NULL;
-}
+extern int pci_device_from_OF_node(struct device_node *node,
+ u8 *bus, u8 *devfn);
+extern void pci_create_OF_bus_map(void);
static inline int isa_vaddr_is_ioport(void __iomem *address)
{
@@ -223,17 +217,8 @@ struct pci_dn {
/* Get the pointer to a device_node's pci_dn */
#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
-extern struct device_node *fetch_dev_dn(struct pci_dev *dev);
extern void * update_dn_pci_info(struct device_node *dn, void *data);
-/* Get a device_node from a pci_dev. This code must be fast except
- * in the case where the sysdata is incorrect and needs to be fixed
- * up (this will only happen once). */
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev)
-{
- return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev);
-}
-
static inline int pci_device_from_OF_node(struct device_node *np,
u8 *bus, u8 *devfn)
{
@@ -244,14 +229,6 @@ static inline int pci_device_from_OF_node(struct device_node *np,
return 0;
}
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- if (bus->self)
- return pci_device_to_OF_node(bus->self);
- else
- return bus->dev.of_node; /* Must be root bus (PHB) */
-}
-
/** Find the bus corresponding to the indicated device node */
extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 7d7790954e0..1f522680ea1 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -179,8 +179,7 @@ extern int remove_phb_dynamic(struct pci_controller *phb);
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn);
-extern void of_scan_pci_bridge(struct device_node *node,
- struct pci_dev *dev);
+extern void of_scan_pci_bridge(struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index c189aa5fe1f..b823536375d 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -22,20 +22,6 @@
#define HAVE_ARCH_DEVTREE_FIXUPS
-#ifdef CONFIG_PPC32
-/*
- * PCI <-> OF matching functions
- * (XXX should these be here?)
- */
-struct pci_bus;
-struct pci_dev;
-extern int pci_device_from_OF_node(struct device_node *node,
- u8* bus, u8* devfn);
-extern struct device_node* pci_busdev_to_OF_node(struct pci_bus *, int);
-extern struct device_node* pci_device_to_OF_node(struct pci_dev *);
-extern void pci_create_OF_bus_map(void);
-#endif
-
/*
* OF address retreival & translation
*/
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 893af2a9cd0..a3c92770e42 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1097,9 +1097,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
if (dev->is_added)
continue;
- /* Setup OF node pointer in the device */
- dev->dev.of_node = pci_device_to_OF_node(dev);
-
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
*/
@@ -1685,6 +1682,13 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn,
return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
}
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+
+ return of_node_get(hose->dn);
+}
+
/**
* pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
* @hose: Pointer to the PCI host controller instance structure
@@ -1705,7 +1709,6 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
hose->global_number);
return;
}
- bus->dev.of_node = of_node_get(node);
bus->secondary = hose->first_busno;
hose->bus = bus;
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index bedb370459f..86585508e9c 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -167,150 +167,26 @@ pcibios_make_OF_bus_map(void)
#endif
}
-typedef int (*pci_OF_scan_iterator)(struct device_node* node, void* data);
-
-static struct device_node*
-scan_OF_pci_childs(struct device_node *parent, pci_OF_scan_iterator filter, void* data)
-{
- struct device_node *node;
- struct device_node* sub_node;
-
- for_each_child_of_node(parent, node) {
- const unsigned int *class_code;
-
- if (filter(node, data)) {
- of_node_put(node);
- return node;
- }
-
- /* For PCI<->PCI bridges or CardBus bridges, we go down
- * Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well.
- */
- class_code = of_get_property(node, "class-code", NULL);
- if ((!class_code || ((*class_code >> 8) != PCI_CLASS_BRIDGE_PCI &&
- (*class_code >> 8) != PCI_CLASS_BRIDGE_CARDBUS)) &&
- strcmp(node->name, "multifunc-device"))
- continue;
- sub_node = scan_OF_pci_childs(node, filter, data);
- if (sub_node) {
- of_node_put(node);
- return sub_node;
- }
- }
- return NULL;
-}
-
-static struct device_node *scan_OF_for_pci_dev(struct device_node *parent,
- unsigned int devfn)
-{
- struct device_node *np, *cnp;
- const u32 *reg;
- unsigned int psize;
-
- for_each_child_of_node(parent, np) {
- reg = of_get_property(np, "reg", &psize);
- if (reg && psize >= 4 && ((reg[0] >> 8) & 0xff) == devfn)
- return np;
-
- /* Note: some OFs create a parent node "multifunc-device" as
- * a fake root for all functions of a multi-function device,
- * we go down them as well. */
- if (!strcmp(np->name, "multifunc-device")) {
- cnp = scan_OF_for_pci_dev(np, devfn);
- if (cnp)
- return cnp;
- }
- }
- return NULL;
-}
-
-
-static struct device_node *scan_OF_for_pci_bus(struct pci_bus *bus)
-{
- struct device_node *parent, *np;
-
- /* Are we a root bus ? */
- if (bus->self == NULL || bus->parent == NULL) {
- struct pci_controller *hose = pci_bus_to_host(bus);
- if (hose == NULL)
- return NULL;
- return of_node_get(hose->dn);
- }
-
- /* not a root bus, we need to get our parent */
- parent = scan_OF_for_pci_bus(bus->parent);
- if (parent == NULL)
- return NULL;
-
- /* now iterate for children for a match */
- np = scan_OF_for_pci_dev(parent, bus->self->devfn);
- of_node_put(parent);
-
- return np;
-}
-
-/*
- * Scans the OF tree for a device node matching a PCI device
- */
-struct device_node *
-pci_busdev_to_OF_node(struct pci_bus *bus, int devfn)
-{
- struct device_node *parent, *np;
-
- pr_debug("pci_busdev_to_OF_node(%d,0x%x)\n", bus->number, devfn);
- parent = scan_OF_for_pci_bus(bus);
- if (parent == NULL)
- return NULL;
- pr_debug(" parent is %s\n", parent ? parent->full_name : "<NULL>");
- np = scan_OF_for_pci_dev(parent, devfn);
- of_node_put(parent);
- pr_debug(" result is %s\n", np ? np->full_name : "<NULL>");
-
- /* XXX most callers don't release the returned node
- * mostly because ppc64 doesn't increase the refcount,
- * we need to fix that.
- */
- return np;
-}
-EXPORT_SYMBOL(pci_busdev_to_OF_node);
-
-struct device_node*
-pci_device_to_OF_node(struct pci_dev *dev)
-{
- return pci_busdev_to_OF_node(dev->bus, dev->devfn);
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
-static int
-find_OF_pci_device_filter(struct device_node* node, void* data)
-{
- return ((void *)node == data);
-}
/*
* Returns the PCI device matching a given OF node
*/
-int
-pci_device_from_OF_node(struct device_node* node, u8* bus, u8* devfn)
+int pci_device_from_OF_node(struct device_node *node, u8 *bus, u8 *devfn)
{
- const unsigned int *reg;
- struct pci_controller* hose;
- struct pci_dev* dev = NULL;
-
- /* Make sure it's really a PCI device */
- hose = pci_find_hose_for_OF_device(node);
- if (!hose || !hose->dn)
- return -ENODEV;
- if (!scan_OF_pci_childs(hose->dn,
- find_OF_pci_device_filter, (void *)node))
+ struct pci_dev *dev = NULL;
+ const __be32 *reg;
+ int size;
+
+ /* Check if it might have a chance to be a PCI device */
+ if (!pci_find_hose_for_OF_device(node))
return -ENODEV;
- reg = of_get_property(node, "reg", NULL);
- if (!reg)
+
+ reg = of_get_property(node, "reg", &size);
+ if (!reg || size < 5 * sizeof(u32))
return -ENODEV;
- *bus = (reg[0] >> 16) & 0xff;
- *devfn = ((reg[0] >> 8) & 0xff);
+
+ *bus = (be32_to_cpup(&reg[0]) >> 16) & 0xff;
+ *devfn = (be32_to_cpup(&reg[0]) >> 8) & 0xff;
/* Ok, here we need some tweak. If we have already renumbered
* all busses, we can't rely on the OF bus number any more.
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 6baabc13306..478f8d78716 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -142,53 +142,6 @@ void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
traverse_pci_devices(dn, update_dn_pci_info, phb);
}
-/*
- * Traversal func that looks for a <busno,devfcn> value.
- * If found, the pci_dn is returned (thus terminating the traversal).
- */
-static void *is_devfn_node(struct device_node *dn, void *data)
-{
- int busno = ((unsigned long)data >> 8) & 0xff;
- int devfn = ((unsigned long)data) & 0xff;
- struct pci_dn *pci = dn->data;
-
- if (pci && (devfn == pci->devfn) && (busno == pci->busno))
- return dn;
- return NULL;
-}
-
-/*
- * This is the "slow" path for looking up a device_node from a
- * pci_dev. It will hunt for the device under its parent's
- * phb and then update of_node pointer.
- *
- * It may also do fixups on the actual device since this happens
- * on the first read/write.
- *
- * Note that it also must deal with devices that don't exist.
- * In this case it may probe for real hardware ("just in case")
- * and add a device_node to the device tree if necessary.
- *
- * Is this function necessary anymore now that dev->dev.of_node is
- * used to store the node pointer?
- *
- */
-struct device_node *fetch_dev_dn(struct pci_dev *dev)
-{
- struct pci_controller *phb = dev->sysdata;
- struct device_node *dn;
- unsigned long searchval = (dev->bus->number << 8) | dev->devfn;
-
- if (WARN_ON(!phb))
- return NULL;
-
- dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval);
- if (dn)
- dev->dev.of_node = dn;
- return dn;
-}
-EXPORT_SYMBOL(fetch_dev_dn);
-
/**
* pci_devs_phb_init - Initialize phbs and pci devs under them.
*
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 1e89a72fd03..fe0a5ad6f73 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -202,9 +202,9 @@ EXPORT_SYMBOL(of_create_pci_dev);
* this routine in turn call of_scan_bus() recusively to scan for more child
* devices.
*/
-void __devinit of_scan_pci_bridge(struct device_node *node,
- struct pci_dev *dev)
+void __devinit of_scan_pci_bridge(struct pci_dev *dev)
{
+ struct device_node *node = dev->dev.of_node;
struct pci_bus *bus;
const u32 *busrange, *ranges;
int len, i, mode;
@@ -238,7 +238,6 @@ void __devinit of_scan_pci_bridge(struct device_node *node,
bus->primary = dev->bus->number;
bus->subordinate = busrange[1];
bus->bridge_ctl = 0;
- bus->dev.of_node = of_node_get(node);
/* parse ranges property */
/* PCI #address-cells == 3 and #size-cells == 2 always */
@@ -335,9 +334,7 @@ static void __devinit __of_scan_bus(struct device_node *node,
list_for_each_entry(dev, &bus->devices, bus_list) {
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
- struct device_node *child = pci_device_to_OF_node(dev);
- if (child)
- of_scan_pci_bridge(child, dev);
+ of_scan_pci_bridge(dev);
}
}
}
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 47ea1be1481..90f4496017e 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -55,14 +55,6 @@ config PPC_MPC5200_BUGFIX
It is safe to say 'Y' here
-config PPC_MPC5200_GPIO
- bool "MPC5200 GPIO support"
- depends on PPC_MPC52xx
- select ARCH_REQUIRE_GPIOLIB
- select GENERIC_GPIO
- help
- Enable gpiolib support for mpc5200 based boards
-
config PPC_MPC5200_LPBFIFO
tristate "MPC5200 LocalPlus bus FIFO driver"
depends on PPC_MPC52xx
diff --git a/arch/powerpc/platforms/52xx/Makefile b/arch/powerpc/platforms/52xx/Makefile
index 2bc8cd0c5cf..4e62486791e 100644
--- a/arch/powerpc/platforms/52xx/Makefile
+++ b/arch/powerpc/platforms/52xx/Makefile
@@ -14,5 +14,4 @@ ifeq ($(CONFIG_PPC_LITE5200),y)
obj-$(CONFIG_PM) += lite5200_sleep.o lite5200_pm.o
endif
-obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
obj-$(CONFIG_PPC_MPC5200_LPBFIFO) += mpc52xx_lpbfifo.o
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index f33e08d573c..abe8d7e2ebe 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/irq.h>
+#include <linux/of_pci.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -235,7 +236,7 @@ static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset)
if (offset >= 0x100)
return PCIBIOS_BAD_REGISTER_NUMBER;
- np = pci_busdev_to_OF_node(bus, devfn);
+ np = of_pci_find_child_device(bus->dev.of_node, devfn);
if (np == NULL)
return PCIBIOS_DEVICE_NOT_FOUND;
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index a65d2e82f61..a63d34c3611 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -331,7 +331,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
{
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0, SIGTRAP) == NOTIFY_STOP)
return;
- if (tracehook_consider_fatal_signal(current, SIGTRAP))
+ if (current->ptrace)
force_sig(SIGTRAP, current);
}
@@ -425,7 +425,7 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code,
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
return;
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
- if (tracehook_consider_fatal_signal(current, SIGTRAP))
+ if (current->ptrace)
force_sig(SIGTRAP, current);
else
signal = SIGILL;
diff --git a/arch/sh/kernel/cpu/shmobile/pm_runtime.c b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
index 64c807c3920..bf280c812d2 100644
--- a/arch/sh/kernel/cpu/shmobile/pm_runtime.c
+++ b/arch/sh/kernel/cpu/shmobile/pm_runtime.c
@@ -256,7 +256,7 @@ out:
return ret;
}
-static struct dev_power_domain default_power_domain = {
+static struct dev_pm_domain default_pm_domain = {
.ops = {
.runtime_suspend = default_platform_runtime_suspend,
.runtime_resume = default_platform_runtime_resume,
@@ -285,7 +285,7 @@ static int platform_bus_notify(struct notifier_block *nb,
hwblk_disable(hwblk_info, hwblk);
/* make sure driver re-inits itself once */
__set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
- dev->pwr_domain = &default_power_domain;
+ dev->pm_domain = &default_pm_domain;
break;
/* TODO: add BUS_NOTIFY_BIND_DRIVER and increase idle count */
case BUS_NOTIFY_BOUND_DRIVER:
@@ -299,7 +299,7 @@ static int platform_bus_notify(struct notifier_block *nb,
__set_bit(PDEV_ARCHDATA_FLAG_INIT, &pdev->archdata.flags);
break;
case BUS_NOTIFY_DEL_DEVICE:
- dev->pwr_domain = NULL;
+ dev->pm_domain = NULL;
break;
}
return 0;
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index 862e3ce92b1..02939abd356 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -42,9 +42,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
}
#endif
-struct device_node;
-extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
-
#endif /* __KERNEL__ */
#ifndef CONFIG_LEON_PCI
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index 948b686ec08..2614d96141c 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -91,9 +91,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
return PCI_IRQ_NONE;
}
-struct device_node;
-extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
-
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
diff --git a/arch/sparc/include/asm/ptrace.h b/arch/sparc/include/asm/ptrace.h
index c7ad3fe2b25..b928b31424b 100644
--- a/arch/sparc/include/asm/ptrace.h
+++ b/arch/sparc/include/asm/ptrace.h
@@ -205,6 +205,7 @@ do { current_thread_info()->syscall_noerror = 1; \
} while (0)
#define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV))
#define instruction_pointer(regs) ((regs)->tpc)
+#define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
#define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
#ifdef CONFIG_SMP
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index 713dc91020a..80a87e2a3e7 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -284,7 +284,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
dev->sysdata = node;
dev->dev.parent = bus->bridge;
dev->dev.bus = &pci_bus_type;
- dev->dev.of_node = node;
+ dev->dev.of_node = of_node_get(node);
dev->devfn = devfn;
dev->multifunction = 0; /* maybe a lie? */
set_pcie_port_type(dev);
@@ -1021,12 +1021,6 @@ void arch_teardown_msi_irq(unsigned int irq)
}
#endif /* !(CONFIG_PCI_MSI) */
-struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
-{
- return pdev->dev.of_node;
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
{
struct pci_dev *ali_isa_bridge;
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 948601a066f..a19f0419547 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -885,14 +885,6 @@ int pcibios_assign_resource(struct pci_dev *pdev, int resource)
return -ENXIO;
}
-struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
-{
- struct pcidev_cookie *pc = pdev->sysdata;
-
- return pc->prom_node;
-}
-EXPORT_SYMBOL(pci_device_to_OF_node);
-
/*
* This probably belongs here rather than ioport.c because
* we do not want this crud linked into SBus kernels.
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index da349723d41..37357a599dc 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1170,7 +1170,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
config AMD_NUMA
def_bool y
prompt "Old style AMD Opteron NUMA detection"
- depends on NUMA && PCI
+ depends on X86_64 && NUMA && PCI
---help---
Enable AMD NUMA node topology detection. You should say Y here if
you have a multi processor AMD system. This uses an old method to
diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h
index b60f2924c41..879fd7d3387 100644
--- a/arch/x86/include/asm/lguest_hcall.h
+++ b/arch/x86/include/asm/lguest_hcall.h
@@ -61,6 +61,7 @@ hcall(unsigned long call,
: "memory");
return call;
}
+/*:*/
/* Can't use our min() macro here: needs to be a constant */
#define LGUEST_IRQS (NR_IRQS < 32 ? NR_IRQS: 32)
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index a0a9779084d..3470c9d0ebb 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -388,12 +388,9 @@ do { \
#define __this_cpu_xor_1(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
-/*
- * Generic fallback operations for __this_cpu_xchg_[1-4] are okay and much
- * faster than an xchg with forced lock semantics.
- */
-#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
-#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
+#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
+#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
+#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -485,6 +482,8 @@ do { \
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
+#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
+#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 971e0b46446..df1287019e6 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -30,17 +30,6 @@ extern void add_dtb(u64 data);
extern void x86_add_irq_domains(void);
void __cpuinit x86_of_pci_init(void);
void x86_dtb_init(void);
-
-static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
-{
- return pdev ? pdev->dev.of_node : NULL;
-}
-
-static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
-{
- return pci_device_to_OF_node(bus->self);
-}
-
#else
static inline void add_dtb(u64 data) { }
static inline void x86_add_irq_domains(void) { }
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 4fbda9a3f33..968d57dd54c 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -14,13 +14,14 @@ static inline int pci_xen_hvm_init(void)
}
#endif
#if defined(CONFIG_XEN_DOM0)
-void __init xen_setup_pirqs(void);
+int __init pci_xen_initial_domain(void);
int xen_find_device_domain_owner(struct pci_dev *dev);
int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
int xen_unregister_device_domain_owner(struct pci_dev *dev);
#else
-static inline void __init xen_setup_pirqs(void)
+static inline int __init pci_xen_initial_domain(void)
{
+ return -1;
}
static inline int xen_find_device_domain_owner(struct pci_dev *dev)
{
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
index c29d631af6f..395a10e6806 100644
--- a/arch/x86/kernel/asm-offsets_32.c
+++ b/arch/x86/kernel/asm-offsets_32.c
@@ -63,7 +63,6 @@ void foo(void)
BLANK();
OFFSET(LGUEST_DATA_irq_enabled, lguest_data, irq_enabled);
OFFSET(LGUEST_DATA_irq_pending, lguest_data, irq_pending);
- OFFSET(LGUEST_DATA_pgdir, lguest_data, pgdir);
BLANK();
OFFSET(LGUEST_PAGES_host_gdt_desc, lguest_pages, state.host_gdt_desc);
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 9aeb78a23de..a621f342768 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -134,6 +134,24 @@ static int __init add_bus_probe(void)
module_init(add_bus_probe);
#ifdef CONFIG_PCI
+struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ struct device_node *np;
+
+ for_each_node_by_type(np, "pci") {
+ const void *prop;
+ unsigned int bus_min;
+
+ prop = of_get_property(np, "bus-range", NULL);
+ if (!prop)
+ continue;
+ bus_min = be32_to_cpup(prop);
+ if (bus->number == bus_min)
+ return np;
+ }
+ return NULL;
+}
+
static int x86_of_pci_irq_enable(struct pci_dev *dev)
{
struct of_irq oirq;
@@ -165,50 +183,8 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev)
void __cpuinit x86_of_pci_init(void)
{
- struct device_node *np;
-
pcibios_enable_irq = x86_of_pci_irq_enable;
pcibios_disable_irq = x86_of_pci_irq_disable;
-
- for_each_node_by_type(np, "pci") {
- const void *prop;
- struct pci_bus *bus;
- unsigned int bus_min;
- struct device_node *child;
-
- prop = of_get_property(np, "bus-range", NULL);
- if (!prop)
- continue;
- bus_min = be32_to_cpup(prop);
-
- bus = pci_find_bus(0, bus_min);
- if (!bus) {
- printk(KERN_ERR "Can't find a node for bus %s.\n",
- np->full_name);
- continue;
- }
-
- if (bus->self)
- bus->self->dev.of_node = np;
- else
- bus->dev.of_node = np;
-
- for_each_child_of_node(np, child) {
- struct pci_dev *dev;
- u32 devfn;
-
- prop = of_get_property(child, "reg", NULL);
- if (!prop)
- continue;
-
- devfn = (be32_to_cpup(prop) >> 8) & 0xff;
- dev = pci_get_slot(bus, devfn);
- if (!dev)
- continue;
- dev->dev.of_node = child;
- pci_dev_put(dev);
- }
- }
}
#endif
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 4f0d46fefa7..9242436e993 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -419,6 +419,30 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
},
},
+ { /* Handle problems with rebooting on the Latitude E6320. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E6320",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
+ },
+ },
+ { /* Handle problems with rebooting on the Latitude E5420. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E5420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5420"),
+ },
+ },
+ { /* Handle problems with rebooting on the Latitude E6420. */
+ .callback = set_pci_reboot,
+ .ident = "Dell Latitude E6420",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
+ },
+ },
{ }
};
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index db832fd65ec..13ee258442a 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -71,7 +71,8 @@
#include <asm/stackprotector.h>
#include <asm/reboot.h> /* for struct machine_ops */
-/*G:010 Welcome to the Guest!
+/*G:010
+ * Welcome to the Guest!
*
* The Guest in our tale is a simple creature: identical to the Host but
* behaving in simplified but equivalent ways. In particular, the Guest is the
@@ -190,15 +191,23 @@ static void lazy_hcall4(unsigned long call,
#endif
/*G:036
- * When lazy mode is turned off reset the per-cpu lazy mode variable and then
- * issue the do-nothing hypercall to flush any stored calls.
-:*/
+ * When lazy mode is turned off, we issue the do-nothing hypercall to
+ * flush any stored calls, and call the generic helper to reset the
+ * per-cpu lazy mode variable.
+ */
static void lguest_leave_lazy_mmu_mode(void)
{
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
paravirt_leave_lazy_mmu();
}
+/*
+ * We also catch the end of context switch; we enter lazy mode for much of
+ * that too, so again we need to flush here.
+ *
+ * (Technically, this is lazy CPU mode, and normally we're in lazy MMU
+ * mode, but unlike Xen, lguest doesn't care about the difference).
+ */
static void lguest_end_context_switch(struct task_struct *next)
{
hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0, 0);
@@ -391,7 +400,7 @@ static void lguest_load_tr_desc(void)
* giant ball of hair. Its entry in the current Intel manual runs to 28 pages.
*
* This instruction even it has its own Wikipedia entry. The Wikipedia entry
- * has been translated into 5 languages. I am not making this up!
+ * has been translated into 6 languages. I am not making this up!
*
* We could get funky here and identify ourselves as "GenuineLguest", but
* instead we just use the real "cpuid" instruction. Then I pretty much turned
@@ -458,7 +467,7 @@ static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
/*
* PAE systems can mark pages as non-executable. Linux calls this the
* NX bit. Intel calls it XD (eXecute Disable), AMD EVP (Enhanced
- * Virus Protection). We just switch turn if off here, since we don't
+ * Virus Protection). We just switch it off here, since we don't
* support it.
*/
case 0x80000001:
@@ -520,17 +529,16 @@ static unsigned long lguest_read_cr2(void)
/* See lguest_set_pte() below. */
static bool cr3_changed = false;
+static unsigned long current_cr3;
/*
* cr3 is the current toplevel pagetable page: the principle is the same as
- * cr0. Keep a local copy, and tell the Host when it changes. The only
- * difference is that our local copy is in lguest_data because the Host needs
- * to set it upon our initial hypercall.
+ * cr0. Keep a local copy, and tell the Host when it changes.
*/
static void lguest_write_cr3(unsigned long cr3)
{
- lguest_data.pgdir = cr3;
lazy_hcall1(LHCALL_NEW_PGTABLE, cr3);
+ current_cr3 = cr3;
/* These two page tables are simple, linear, and used during boot */
if (cr3 != __pa(swapper_pg_dir) && cr3 != __pa(initial_page_table))
@@ -539,7 +547,7 @@ static void lguest_write_cr3(unsigned long cr3)
static unsigned long lguest_read_cr3(void)
{
- return lguest_data.pgdir;
+ return current_cr3;
}
/* cr4 is used to enable and disable PGE, but we don't care. */
@@ -641,7 +649,7 @@ static void lguest_write_cr4(unsigned long val)
/*
* The Guest calls this after it has set a second-level entry (pte), ie. to map
- * a page into a process' address space. Wetell the Host the toplevel and
+ * a page into a process' address space. We tell the Host the toplevel and
* address this corresponds to. The Guest uses one pagetable per process, so
* we need to tell the Host which one we're changing (mm->pgd).
*/
@@ -758,7 +766,7 @@ static void lguest_pmd_clear(pmd_t *pmdp)
static void lguest_flush_tlb_single(unsigned long addr)
{
/* Simply set it to zero: if it was not, it will fault back in. */
- lazy_hcall3(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0);
+ lazy_hcall3(LHCALL_SET_PTE, current_cr3, addr, 0);
}
/*
@@ -1140,7 +1148,7 @@ static struct notifier_block paniced = {
static __init char *lguest_memory_setup(void)
{
/*
- *The Linux bootloader header contains an "e820" memory map: the
+ * The Linux bootloader header contains an "e820" memory map: the
* Launcher populated the first entry with our memory limit.
*/
e820_add_region(boot_params.e820_map[0].addr,
diff --git a/arch/x86/lguest/i386_head.S b/arch/x86/lguest/i386_head.S
index 4f420c2f2d5..6ddfe4fc23c 100644
--- a/arch/x86/lguest/i386_head.S
+++ b/arch/x86/lguest/i386_head.S
@@ -6,18 +6,22 @@
#include <asm/processor-flags.h>
/*G:020
- * Our story starts with the kernel booting into startup_32 in
- * arch/x86/kernel/head_32.S. It expects a boot header, which is created by
- * the bootloader (the Launcher in our case).
+
+ * Our story starts with the bzImage: booting starts at startup_32 in
+ * arch/x86/boot/compressed/head_32.S. This merely uncompresses the real
+ * kernel in place and then jumps into it: startup_32 in
+ * arch/x86/kernel/head_32.S. Both routines expects a boot header in the %esi
+ * register, which is created by the bootloader (the Launcher in our case).
*
* The startup_32 function does very little: it clears the uninitialized global
* C variables which we expect to be zero (ie. BSS) and then copies the boot
- * header and kernel command line somewhere safe. Finally it checks the
- * 'hardware_subarch' field. This was introduced in 2.6.24 for lguest and Xen:
- * if it's set to '1' (lguest's assigned number), then it calls us here.
+ * header and kernel command line somewhere safe, and populates some initial
+ * page tables. Finally it checks the 'hardware_subarch' field. This was
+ * introduced in 2.6.24 for lguest and Xen: if it's set to '1' (lguest's
+ * assigned number), then it calls us here.
*
* WARNING: be very careful here! We're running at addresses equal to physical
- * addesses (around 0), not above PAGE_OFFSET as most code expectes
+ * addresses (around 0), not above PAGE_OFFSET as most code expects
* (eg. 0xC0000000). Jumps are relative, so they're OK, but we can't touch any
* data without remembering to subtract __PAGE_OFFSET!
*
@@ -27,13 +31,18 @@
.section .init.text, "ax", @progbits
ENTRY(lguest_entry)
/*
- * We make the "initialization" hypercall now to tell the Host about
- * us, and also find out where it put our page tables.
+ * We make the "initialization" hypercall now to tell the Host where
+ * our lguest_data struct is.
*/
movl $LHCALL_LGUEST_INIT, %eax
movl $lguest_data - __PAGE_OFFSET, %ebx
int $LGUEST_TRAP_ENTRY
+ /* Now turn our pagetables on; setup by arch/x86/kernel/head_32.S. */
+ movl $LHCALL_NEW_PGTABLE, %eax
+ movl $(initial_page_table - __PAGE_OFFSET), %ebx
+ int $LGUEST_TRAP_ENTRY
+
/* Set up the initial stack so we can run C code. */
movl $(init_thread_union+THREAD_SIZE),%esp
@@ -96,12 +105,8 @@ send_interrupts:
*/
pushl %eax
movl $LHCALL_SEND_INTERRUPTS, %eax
- /*
- * This is a vmcall instruction (same thing that KVM uses). Older
- * assembler versions might not know the "vmcall" instruction, so we
- * create one manually here.
- */
- .byte 0x0f,0x01,0xc1 /* KVM_HYPERCALL */
+ /* This is the actual hypercall trap. */
+ int $LGUEST_TRAP_ENTRY
/* Put eax back the way we found it. */
popl %eax
ret
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index f567965c062..1017c7bee38 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -1,8 +1,13 @@
/*
- * Xen PCI Frontend Stub - puts some "dummy" functions in to the Linux
- * x86 PCI core to support the Xen PCI Frontend
+ * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
+ * initial domain support. We also handle the DSDT _PRT callbacks for GSI's
+ * used in HVM and initial domain mode (PV does not parse ACPI, so it has no
+ * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
+ * 0xcf8 PCI configuration read/write.
*
* Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ * Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ * Stefano Stabellini <stefano.stabellini@eu.citrix.com>
*/
#include <linux/module.h>
#include <linux/init.h>
@@ -19,22 +24,53 @@
#include <xen/events.h>
#include <asm/xen/pci.h>
+static int xen_pcifront_enable_irq(struct pci_dev *dev)
+{
+ int rc;
+ int share = 1;
+ int pirq;
+ u8 gsi;
+
+ rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
+ if (rc < 0) {
+ dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
+ rc);
+ return rc;
+ }
+ /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
+ pirq = gsi;
+
+ if (gsi < NR_IRQS_LEGACY)
+ share = 0;
+
+ rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
+ if (rc < 0) {
+ dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
+ gsi, pirq, rc);
+ return rc;
+ }
+
+ dev->irq = rc;
+ dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
+ return 0;
+}
+
#ifdef CONFIG_ACPI
-static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
- int trigger, int polarity)
+static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
+ bool set_pirq)
{
- int rc, irq;
+ int rc, pirq = -1, irq = -1;
struct physdev_map_pirq map_irq;
int shareable = 0;
char *name;
- if (!xen_hvm_domain())
- return -1;
+ if (set_pirq)
+ pirq = gsi;
map_irq.domid = DOMID_SELF;
map_irq.type = MAP_PIRQ_TYPE_GSI;
map_irq.index = gsi;
- map_irq.pirq = -1;
+ map_irq.pirq = pirq;
rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
if (rc) {
@@ -42,7 +78,7 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
return -1;
}
- if (trigger == ACPI_EDGE_SENSITIVE) {
+ if (triggering == ACPI_EDGE_SENSITIVE) {
shareable = 0;
name = "ioapic-edge";
} else {
@@ -50,12 +86,63 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
name = "ioapic-level";
}
+ if (gsi_override >= 0)
+ gsi = gsi_override;
+
irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
+ if (irq < 0)
+ goto out;
+
+ printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi);
+out:
+ return irq;
+}
+
+static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
+ if (!xen_hvm_domain())
+ return -1;
- printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
+ return xen_register_pirq(gsi, -1 /* no GSI override */, trigger,
+ false /* no mapping of GSI to PIRQ */);
+}
+
+#ifdef CONFIG_XEN_DOM0
+static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
+{
+ int rc, irq;
+ struct physdev_setup_gsi setup_gsi;
+
+ if (!xen_pv_domain())
+ return -1;
+
+ printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
+ gsi, triggering, polarity);
+
+ irq = xen_register_pirq(gsi, gsi_override, triggering, true);
+
+ setup_gsi.gsi = gsi;
+ setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
+ setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
+ if (rc == -EEXIST)
+ printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
+ else if (rc) {
+ printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
+ gsi, rc);
+ }
return irq;
}
+
+static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
+ int trigger, int polarity)
+{
+ return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
+}
+#endif
#endif
#if defined(CONFIG_PCI_MSI)
@@ -65,6 +152,43 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
struct xen_pci_frontend_ops *xen_pci_frontend;
EXPORT_SYMBOL_GPL(xen_pci_frontend);
+static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ int irq, ret, i;
+ struct msi_desc *msidesc;
+ int *v;
+
+ v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
+ if (!v)
+ return -ENOMEM;
+
+ if (type == PCI_CAP_ID_MSIX)
+ ret = xen_pci_frontend_enable_msix(dev, v, nvec);
+ else
+ ret = xen_pci_frontend_enable_msi(dev, v);
+ if (ret)
+ goto error;
+ i = 0;
+ list_for_each_entry(msidesc, &dev->msi_list, list) {
+ irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
+ (type == PCI_CAP_ID_MSIX) ?
+ "pcifront-msi-x" :
+ "pcifront-msi",
+ DOMID_SELF);
+ if (irq < 0)
+ goto free;
+ i++;
+ }
+ kfree(v);
+ return 0;
+
+error:
+ dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+free:
+ kfree(v);
+ return ret;
+}
+
#define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \
MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
@@ -123,67 +247,6 @@ error:
return -ENODEV;
}
-/*
- * For MSI interrupts we have to use drivers/xen/event.s functions to
- * allocate an irq_desc and setup the right */
-
-
-static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- int irq, ret, i;
- struct msi_desc *msidesc;
- int *v;
-
- v = kzalloc(sizeof(int) * max(1, nvec), GFP_KERNEL);
- if (!v)
- return -ENOMEM;
-
- if (type == PCI_CAP_ID_MSIX)
- ret = xen_pci_frontend_enable_msix(dev, v, nvec);
- else
- ret = xen_pci_frontend_enable_msi(dev, v);
- if (ret)
- goto error;
- i = 0;
- list_for_each_entry(msidesc, &dev->msi_list, list) {
- irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0,
- (type == PCI_CAP_ID_MSIX) ?
- "pcifront-msi-x" :
- "pcifront-msi",
- DOMID_SELF);
- if (irq < 0)
- goto free;
- i++;
- }
- kfree(v);
- return 0;
-
-error:
- dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
-free:
- kfree(v);
- return ret;
-}
-
-static void xen_teardown_msi_irqs(struct pci_dev *dev)
-{
- struct msi_desc *msidesc;
-
- msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
- if (msidesc->msi_attrib.is_msix)
- xen_pci_frontend_disable_msix(dev);
- else
- xen_pci_frontend_disable_msi(dev);
-
- /* Free the IRQ's and the msidesc using the generic code. */
- default_teardown_msi_irqs(dev);
-}
-
-static void xen_teardown_msi_irq(unsigned int irq)
-{
- xen_destroy_irq(irq);
-}
-
#ifdef CONFIG_XEN_DOM0
static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
@@ -242,45 +305,28 @@ out:
return ret;
}
#endif
-#endif
-static int xen_pcifront_enable_irq(struct pci_dev *dev)
+static void xen_teardown_msi_irqs(struct pci_dev *dev)
{
- int rc;
- int share = 1;
- int pirq;
- u8 gsi;
-
- rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
- if (rc < 0) {
- dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
- rc);
- return rc;
- }
-
- rc = xen_allocate_pirq_gsi(gsi);
- if (rc < 0) {
- dev_warn(&dev->dev, "Xen PCI: failed to allocate a PIRQ for GSI%d: %d\n",
- gsi, rc);
- return rc;
- }
- pirq = rc;
+ struct msi_desc *msidesc;
- if (gsi < NR_IRQS_LEGACY)
- share = 0;
+ msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
+ if (msidesc->msi_attrib.is_msix)
+ xen_pci_frontend_disable_msix(dev);
+ else
+ xen_pci_frontend_disable_msi(dev);
- rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
- if (rc < 0) {
- dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
- gsi, pirq, rc);
- return rc;
- }
+ /* Free the IRQ's and the msidesc using the generic code. */
+ default_teardown_msi_irqs(dev);
+}
- dev->irq = rc;
- dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
- return 0;
+static void xen_teardown_msi_irq(unsigned int irq)
+{
+ xen_destroy_irq(irq);
}
+#endif
+
int __init pci_xen_init(void)
{
if (!xen_pv_domain() || xen_initial_domain())
@@ -327,79 +373,6 @@ int __init pci_xen_hvm_init(void)
}
#ifdef CONFIG_XEN_DOM0
-static int xen_register_pirq(u32 gsi, int gsi_override, int triggering)
-{
- int rc, pirq, irq = -1;
- struct physdev_map_pirq map_irq;
- int shareable = 0;
- char *name;
-
- if (!xen_pv_domain())
- return -1;
-
- if (triggering == ACPI_EDGE_SENSITIVE) {
- shareable = 0;
- name = "ioapic-edge";
- } else {
- shareable = 1;
- name = "ioapic-level";
- }
- pirq = xen_allocate_pirq_gsi(gsi);
- if (pirq < 0)
- goto out;
-
- if (gsi_override >= 0)
- irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name);
- else
- irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
- if (irq < 0)
- goto out;
-
- printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", pirq, irq, gsi);
-
- map_irq.domid = DOMID_SELF;
- map_irq.type = MAP_PIRQ_TYPE_GSI;
- map_irq.index = gsi;
- map_irq.pirq = pirq;
-
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
- if (rc) {
- printk(KERN_WARNING "xen map irq failed %d\n", rc);
- return -1;
- }
-
-out:
- return irq;
-}
-
-static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
-{
- int rc, irq;
- struct physdev_setup_gsi setup_gsi;
-
- if (!xen_pv_domain())
- return -1;
-
- printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
- gsi, triggering, polarity);
-
- irq = xen_register_pirq(gsi, gsi_override, triggering);
-
- setup_gsi.gsi = gsi;
- setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
- setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
-
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
- if (rc == -EEXIST)
- printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
- else if (rc) {
- printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
- gsi, rc);
- }
-
- return irq;
-}
-
static __init void xen_setup_acpi_sci(void)
{
int rc;
@@ -419,7 +392,7 @@ static __init void xen_setup_acpi_sci(void)
}
trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
-
+
printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
"polarity=%d\n", gsi, trigger, polarity);
@@ -434,10 +407,9 @@ static __init void xen_setup_acpi_sci(void)
* the ACPI interpreter and keels over since IRQ 9 has not been
* setup as we had setup IRQ 20 for it).
*/
- /* Check whether the GSI != IRQ */
if (acpi_gsi_to_irq(gsi, &irq) == 0) {
- if (irq >= 0 && irq != gsi)
- /* Bugger, we MUST have that IRQ. */
+ /* Use the provided value if it's valid. */
+ if (irq >= 0)
gsi_override = irq;
}
@@ -447,41 +419,16 @@ static __init void xen_setup_acpi_sci(void)
return;
}
-static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
- int trigger, int polarity)
+int __init pci_xen_initial_domain(void)
{
- return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
-}
+ int irq;
-static int __init pci_xen_initial_domain(void)
-{
#ifdef CONFIG_PCI_MSI
x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
#endif
xen_setup_acpi_sci();
__acpi_register_gsi = acpi_register_gsi_xen;
-
- return 0;
-}
-
-void __init xen_setup_pirqs(void)
-{
- int pirq, irq;
-
- pci_xen_initial_domain();
-
- if (0 == nr_ioapics) {
- for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
- pirq = xen_allocate_pirq_gsi(irq);
- if (WARN(pirq < 0,
- "Could not allocate PIRQ for legacy interrupt\n"))
- break;
- irq = xen_bind_pirq_gsi_to_irq(irq, pirq, 0, "xt-pic");
- }
- return;
- }
-
/* Pre-allocate legacy irqs */
for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
int trigger, polarity;
@@ -490,12 +437,16 @@ void __init xen_setup_pirqs(void)
continue;
xen_register_pirq(irq, -1 /* no GSI override */,
- trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
+ trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
+ true /* Map GSI to PIRQ */);
}
+ if (0 == nr_ioapics) {
+ for (irq = 0; irq < NR_IRQS_LEGACY; irq++)
+ xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
+ }
+ return 0;
}
-#endif
-#ifdef CONFIG_XEN_DOM0
struct xen_device_domain_owner {
domid_t domain;
struct pci_dev *dev;
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 17c565de3d6..a6575b949b1 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -18,5 +18,5 @@ obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
-
+obj-$(CONFIG_XEN_DOM0) += vga.o
obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 5525163a039..53257421082 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1248,6 +1248,14 @@ asmlinkage void __init xen_start_kernel(void)
if (pci_xen)
x86_init.pci.arch_init = pci_xen_init;
} else {
+ const struct dom0_vga_console_info *info =
+ (void *)((char *)xen_start_info +
+ xen_start_info->console.dom0.info_off);
+
+ xen_init_vga(info, xen_start_info->console.dom0.info_size);
+ xen_start_info->console.domU.mfn = 0;
+ xen_start_info->console.domU.evtchn = 0;
+
/* Make sure ACS will be enabled */
pci_request_acs();
}
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 25c52f94a27..ffcf2615640 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -35,7 +35,7 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
#ifdef CONFIG_XEN_PVHVM
static int xen_emul_unplug;
-static int __init check_platform_magic(void)
+static int check_platform_magic(void)
{
short magic;
char protocol;
diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c
new file mode 100644
index 00000000000..1cd7f4d11e2
--- /dev/null
+++ b/arch/x86/xen/vga.c
@@ -0,0 +1,67 @@
+#include <linux/screen_info.h>
+#include <linux/init.h>
+
+#include <asm/bootparam.h>
+#include <asm/setup.h>
+
+#include <xen/interface/xen.h>
+
+#include "xen-ops.h"
+
+void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
+{
+ struct screen_info *screen_info = &boot_params.screen_info;
+
+ /* This is drawn from a dump from vgacon:startup in
+ * standard Linux. */
+ screen_info->orig_video_mode = 3;
+ screen_info->orig_video_isVGA = 1;
+ screen_info->orig_video_lines = 25;
+ screen_info->orig_video_cols = 80;
+ screen_info->orig_video_ega_bx = 3;
+ screen_info->orig_video_points = 16;
+ screen_info->orig_y = screen_info->orig_video_lines - 1;
+
+ switch (info->video_type) {
+ case XEN_VGATYPE_TEXT_MODE_3:
+ if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3)
+ + sizeof(info->u.text_mode_3))
+ break;
+ screen_info->orig_video_lines = info->u.text_mode_3.rows;
+ screen_info->orig_video_cols = info->u.text_mode_3.columns;
+ screen_info->orig_x = info->u.text_mode_3.cursor_x;
+ screen_info->orig_y = info->u.text_mode_3.cursor_y;
+ screen_info->orig_video_points =
+ info->u.text_mode_3.font_height;
+ break;
+
+ case XEN_VGATYPE_VESA_LFB:
+ if (size < offsetof(struct dom0_vga_console_info,
+ u.vesa_lfb.gbl_caps))
+ break;
+ screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB;
+ screen_info->lfb_width = info->u.vesa_lfb.width;
+ screen_info->lfb_height = info->u.vesa_lfb.height;
+ screen_info->lfb_depth = info->u.vesa_lfb.bits_per_pixel;
+ screen_info->lfb_base = info->u.vesa_lfb.lfb_base;
+ screen_info->lfb_size = info->u.vesa_lfb.lfb_size;
+ screen_info->lfb_linelength = info->u.vesa_lfb.bytes_per_line;
+ screen_info->red_size = info->u.vesa_lfb.red_size;
+ screen_info->red_pos = info->u.vesa_lfb.red_pos;
+ screen_info->green_size = info->u.vesa_lfb.green_size;
+ screen_info->green_pos = info->u.vesa_lfb.green_pos;
+ screen_info->blue_size = info->u.vesa_lfb.blue_size;
+ screen_info->blue_pos = info->u.vesa_lfb.blue_pos;
+ screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size;
+ screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos;
+ if (size >= offsetof(struct dom0_vga_console_info,
+ u.vesa_lfb.gbl_caps)
+ + sizeof(info->u.vesa_lfb.gbl_caps))
+ screen_info->capabilities = info->u.vesa_lfb.gbl_caps;
+ if (size >= offsetof(struct dom0_vga_console_info,
+ u.vesa_lfb.mode_attrs)
+ + sizeof(info->u.vesa_lfb.mode_attrs))
+ screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
+ break;
+ }
+}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 97dfdc8757b..b095739ccd4 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -88,6 +88,17 @@ static inline void xen_uninit_lock_cpu(int cpu)
}
#endif
+struct dom0_vga_console_info;
+
+#ifdef CONFIG_XEN_DOM0
+void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
+#else
+static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
+ size_t size)
+{
+}
+#endif
+
/* Declare an asm function, along with symbols needed to make it
inlineable */
#define DECL_ASM(ret, name, ...) \
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 3647e114d0e..2639ae79a37 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index ad367c4139b..a846b2f95cf 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -15,9 +15,9 @@
#include <linux/slab.h>
#include <linux/err.h>
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
-struct pm_runtime_clk_data {
+struct pm_clk_data {
struct list_head clock_list;
struct mutex lock;
};
@@ -36,25 +36,25 @@ struct pm_clock_entry {
enum pce_status status;
};
-static struct pm_runtime_clk_data *__to_prd(struct device *dev)
+static struct pm_clk_data *__to_pcd(struct device *dev)
{
return dev ? dev->power.subsys_data : NULL;
}
/**
- * pm_runtime_clk_add - Start using a device clock for runtime PM.
- * @dev: Device whose clock is going to be used for runtime PM.
+ * pm_clk_add - Start using a device clock for power management.
+ * @dev: Device whose clock is going to be used for power management.
* @con_id: Connection ID of the clock.
*
* Add the clock represented by @con_id to the list of clocks used for
- * the runtime PM of @dev.
+ * the power management of @dev.
*/
-int pm_runtime_clk_add(struct device *dev, const char *con_id)
+int pm_clk_add(struct device *dev, const char *con_id)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
- if (!prd)
+ if (!pcd)
return -EINVAL;
ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -73,20 +73,20 @@ int pm_runtime_clk_add(struct device *dev, const char *con_id)
}
}
- mutex_lock(&prd->lock);
- list_add_tail(&ce->node, &prd->clock_list);
- mutex_unlock(&prd->lock);
+ mutex_lock(&pcd->lock);
+ list_add_tail(&ce->node, &pcd->clock_list);
+ mutex_unlock(&pcd->lock);
return 0;
}
/**
- * __pm_runtime_clk_remove - Destroy runtime PM clock entry.
- * @ce: Runtime PM clock entry to destroy.
+ * __pm_clk_remove - Destroy PM clock entry.
+ * @ce: PM clock entry to destroy.
*
- * This routine must be called under the mutex protecting the runtime PM list
- * of clocks corresponding the the @ce's device.
+ * This routine must be called under the mutex protecting the PM list of clocks
+ * corresponding the the @ce's device.
*/
-static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
+static void __pm_clk_remove(struct pm_clock_entry *ce)
{
if (!ce)
return;
@@ -108,95 +108,99 @@ static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
}
/**
- * pm_runtime_clk_remove - Stop using a device clock for runtime PM.
- * @dev: Device whose clock should not be used for runtime PM any more.
+ * pm_clk_remove - Stop using a device clock for power management.
+ * @dev: Device whose clock should not be used for PM any more.
* @con_id: Connection ID of the clock.
*
* Remove the clock represented by @con_id from the list of clocks used for
- * the runtime PM of @dev.
+ * the power management of @dev.
*/
-void pm_runtime_clk_remove(struct device *dev, const char *con_id)
+void pm_clk_remove(struct device *dev, const char *con_id)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
- if (!prd)
+ if (!pcd)
return;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry(ce, &prd->clock_list, node) {
+ list_for_each_entry(ce, &pcd->clock_list, node) {
if (!con_id && !ce->con_id) {
- __pm_runtime_clk_remove(ce);
+ __pm_clk_remove(ce);
break;
} else if (!con_id || !ce->con_id) {
continue;
} else if (!strcmp(con_id, ce->con_id)) {
- __pm_runtime_clk_remove(ce);
+ __pm_clk_remove(ce);
break;
}
}
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
}
/**
- * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks.
- * @dev: Device to initialize the list of runtime PM clocks for.
+ * pm_clk_init - Initialize a device's list of power management clocks.
+ * @dev: Device to initialize the list of PM clocks for.
*
- * Allocate a struct pm_runtime_clk_data object, initialize its lock member and
+ * Allocate a struct pm_clk_data object, initialize its lock member and
* make the @dev's power.subsys_data field point to it.
*/
-int pm_runtime_clk_init(struct device *dev)
+int pm_clk_init(struct device *dev)
{
- struct pm_runtime_clk_data *prd;
+ struct pm_clk_data *pcd;
- prd = kzalloc(sizeof(*prd), GFP_KERNEL);
- if (!prd) {
- dev_err(dev, "Not enough memory fo runtime PM data.\n");
+ pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
+ if (!pcd) {
+ dev_err(dev, "Not enough memory for PM clock data.\n");
return -ENOMEM;
}
- INIT_LIST_HEAD(&prd->clock_list);
- mutex_init(&prd->lock);
- dev->power.subsys_data = prd;
+ INIT_LIST_HEAD(&pcd->clock_list);
+ mutex_init(&pcd->lock);
+ dev->power.subsys_data = pcd;
return 0;
}
/**
- * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks.
- * @dev: Device to destroy the list of runtime PM clocks for.
+ * pm_clk_destroy - Destroy a device's list of power management clocks.
+ * @dev: Device to destroy the list of PM clocks for.
*
* Clear the @dev's power.subsys_data field, remove the list of clock entries
- * from the struct pm_runtime_clk_data object pointed to by it before and free
+ * from the struct pm_clk_data object pointed to by it before and free
* that object.
*/
-void pm_runtime_clk_destroy(struct device *dev)
+void pm_clk_destroy(struct device *dev)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce, *c;
- if (!prd)
+ if (!pcd)
return;
dev->power.subsys_data = NULL;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node)
- __pm_runtime_clk_remove(ce);
+ list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
+ __pm_clk_remove(ce);
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
- kfree(prd);
+ kfree(pcd);
}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_RUNTIME
+
/**
- * pm_runtime_clk_acquire - Acquire a device clock.
+ * pm_clk_acquire - Acquire a device clock.
* @dev: Device whose clock is to be acquired.
* @con_id: Connection ID of the clock.
*/
-static void pm_runtime_clk_acquire(struct device *dev,
+static void pm_clk_acquire(struct device *dev,
struct pm_clock_entry *ce)
{
ce->clk = clk_get(dev, ce->con_id);
@@ -209,24 +213,24 @@ static void pm_runtime_clk_acquire(struct device *dev,
}
/**
- * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list.
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
* @dev: Device to disable the clocks for.
*/
-int pm_runtime_clk_suspend(struct device *dev)
+int pm_clk_suspend(struct device *dev)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
- if (!prd)
+ if (!pcd)
return 0;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry_reverse(ce, &prd->clock_list, node) {
+ list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
- pm_runtime_clk_acquire(dev, ce);
+ pm_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_disable(ce->clk);
@@ -234,30 +238,30 @@ int pm_runtime_clk_suspend(struct device *dev)
}
}
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
return 0;
}
/**
- * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list.
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
* @dev: Device to enable the clocks for.
*/
-int pm_runtime_clk_resume(struct device *dev)
+int pm_clk_resume(struct device *dev)
{
- struct pm_runtime_clk_data *prd = __to_prd(dev);
+ struct pm_clk_data *pcd = __to_pcd(dev);
struct pm_clock_entry *ce;
dev_dbg(dev, "%s()\n", __func__);
- if (!prd)
+ if (!pcd)
return 0;
- mutex_lock(&prd->lock);
+ mutex_lock(&pcd->lock);
- list_for_each_entry(ce, &prd->clock_list, node) {
+ list_for_each_entry(ce, &pcd->clock_list, node) {
if (ce->status == PCE_STATUS_NONE)
- pm_runtime_clk_acquire(dev, ce);
+ pm_clk_acquire(dev, ce);
if (ce->status < PCE_STATUS_ERROR) {
clk_enable(ce->clk);
@@ -265,28 +269,28 @@ int pm_runtime_clk_resume(struct device *dev)
}
}
- mutex_unlock(&prd->lock);
+ mutex_unlock(&pcd->lock);
return 0;
}
/**
- * pm_runtime_clk_notify - Notify routine for device addition and removal.
+ * pm_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
*
* For this function to work, @nb must be a member of an object of type
* struct pm_clk_notifier_block containing all of the requisite data.
- * Specifically, the pwr_domain member of that object is copied to the device's
- * pwr_domain field and its con_ids member is used to populate the device's list
- * of runtime PM clocks, depending on @action.
+ * Specifically, the pm_domain member of that object is copied to the device's
+ * pm_domain field and its con_ids member is used to populate the device's list
+ * of PM clocks, depending on @action.
*
- * If the device's pwr_domain field is already populated with a value different
+ * If the device's pm_domain field is already populated with a value different
* from the one stored in the struct pm_clk_notifier_block object, the function
* does nothing.
*/
-static int pm_runtime_clk_notify(struct notifier_block *nb,
+static int pm_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
@@ -300,28 +304,28 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
- if (dev->pwr_domain)
+ if (dev->pm_domain)
break;
- error = pm_runtime_clk_init(dev);
+ error = pm_clk_init(dev);
if (error)
break;
- dev->pwr_domain = clknb->pwr_domain;
+ dev->pm_domain = clknb->pm_domain;
if (clknb->con_ids[0]) {
for (con_id = clknb->con_ids; *con_id; con_id++)
- pm_runtime_clk_add(dev, *con_id);
+ pm_clk_add(dev, *con_id);
} else {
- pm_runtime_clk_add(dev, NULL);
+ pm_clk_add(dev, NULL);
}
break;
case BUS_NOTIFY_DEL_DEVICE:
- if (dev->pwr_domain != clknb->pwr_domain)
+ if (dev->pm_domain != clknb->pm_domain)
break;
- dev->pwr_domain = NULL;
- pm_runtime_clk_destroy(dev);
+ dev->pm_domain = NULL;
+ pm_clk_destroy(dev);
break;
}
@@ -330,6 +334,60 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
#else /* !CONFIG_PM_RUNTIME */
+#ifdef CONFIG_PM
+
+/**
+ * pm_clk_suspend - Disable clocks in a device's PM clock list.
+ * @dev: Device to disable the clocks for.
+ */
+int pm_clk_suspend(struct device *dev)
+{
+ struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_clock_entry *ce;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ /* If there is no driver, the clocks are already disabled. */
+ if (!pcd || !dev->driver)
+ return 0;
+
+ mutex_lock(&pcd->lock);
+
+ list_for_each_entry_reverse(ce, &pcd->clock_list, node)
+ clk_disable(ce->clk);
+
+ mutex_unlock(&pcd->lock);
+
+ return 0;
+}
+
+/**
+ * pm_clk_resume - Enable clocks in a device's PM clock list.
+ * @dev: Device to enable the clocks for.
+ */
+int pm_clk_resume(struct device *dev)
+{
+ struct pm_clk_data *pcd = __to_pcd(dev);
+ struct pm_clock_entry *ce;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ /* If there is no driver, the clocks should remain disabled. */
+ if (!pcd || !dev->driver)
+ return 0;
+
+ mutex_lock(&pcd->lock);
+
+ list_for_each_entry(ce, &pcd->clock_list, node)
+ clk_enable(ce->clk);
+
+ mutex_unlock(&pcd->lock);
+
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+
/**
* enable_clock - Enable a device clock.
* @dev: Device whose clock is to be enabled.
@@ -365,7 +423,7 @@ static void disable_clock(struct device *dev, const char *con_id)
}
/**
- * pm_runtime_clk_notify - Notify routine for device addition and removal.
+ * pm_clk_notify - Notify routine for device addition and removal.
* @nb: Notifier block object this function is a member of.
* @action: Operation being carried out by the caller.
* @data: Device the routine is being run for.
@@ -375,7 +433,7 @@ static void disable_clock(struct device *dev, const char *con_id)
* Specifically, the con_ids member of that object is used to enable or disable
* the device's clocks, depending on @action.
*/
-static int pm_runtime_clk_notify(struct notifier_block *nb,
+static int pm_clk_notify(struct notifier_block *nb,
unsigned long action, void *data)
{
struct pm_clk_notifier_block *clknb;
@@ -411,21 +469,21 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
#endif /* !CONFIG_PM_RUNTIME */
/**
- * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks.
+ * pm_clk_add_notifier - Add bus type notifier for power management clocks.
* @bus: Bus type to add the notifier to.
* @clknb: Notifier to be added to the given bus type.
*
* The nb member of @clknb is not expected to be initialized and its
- * notifier_call member will be replaced with pm_runtime_clk_notify(). However,
+ * notifier_call member will be replaced with pm_clk_notify(). However,
* the remaining members of @clknb should be populated prior to calling this
* routine.
*/
-void pm_runtime_clk_add_notifier(struct bus_type *bus,
+void pm_clk_add_notifier(struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
if (!bus || !clknb)
return;
- clknb->nb.notifier_call = pm_runtime_clk_notify;
+ clknb->nb.notifier_call = pm_clk_notify;
bus_register_notifier(bus, &clknb->nb);
}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
new file mode 100644
index 00000000000..be8714aa9dd
--- /dev/null
+++ b/drivers/base/power/domain.c
@@ -0,0 +1,1273 @@
+/*
+ * drivers/base/power/domain.c - Common code related to device power domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+
+static LIST_HEAD(gpd_list);
+static DEFINE_MUTEX(gpd_list_lock);
+
+#ifdef CONFIG_PM
+
+static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+{
+ if (IS_ERR_OR_NULL(dev->pm_domain))
+ return ERR_PTR(-EINVAL);
+
+ return pd_to_genpd(dev->pm_domain);
+}
+
+static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
+{
+ if (!WARN_ON(genpd->sd_count == 0))
+ genpd->sd_count--;
+}
+
+static void genpd_acquire_lock(struct generic_pm_domain *genpd)
+{
+ DEFINE_WAIT(wait);
+
+ mutex_lock(&genpd->lock);
+ /*
+ * Wait for the domain to transition into either the active,
+ * or the power off state.
+ */
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (genpd->status == GPD_STATE_ACTIVE
+ || genpd->status == GPD_STATE_POWER_OFF)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
+ mutex_lock(&genpd->lock);
+ }
+ finish_wait(&genpd->status_wait_queue, &wait);
+}
+
+static void genpd_release_lock(struct generic_pm_domain *genpd)
+{
+ mutex_unlock(&genpd->lock);
+}
+
+static void genpd_set_active(struct generic_pm_domain *genpd)
+{
+ if (genpd->resume_count == 0)
+ genpd->status = GPD_STATE_ACTIVE;
+}
+
+/**
+ * pm_genpd_poweron - Restore power to a given PM domain and its parents.
+ * @genpd: PM domain to power up.
+ *
+ * Restore power to @genpd and all of its parents so that it is possible to
+ * resume a device belonging to it.
+ */
+int pm_genpd_poweron(struct generic_pm_domain *genpd)
+{
+ struct generic_pm_domain *parent = genpd->parent;
+ DEFINE_WAIT(wait);
+ int ret = 0;
+
+ start:
+ if (parent) {
+ genpd_acquire_lock(parent);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+ } else {
+ mutex_lock(&genpd->lock);
+ }
+
+ if (genpd->status == GPD_STATE_ACTIVE
+ || (genpd->prepared_count > 0 && genpd->suspend_power_off))
+ goto out;
+
+ if (genpd->status != GPD_STATE_POWER_OFF) {
+ genpd_set_active(genpd);
+ goto out;
+ }
+
+ if (parent && parent->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&genpd->lock);
+ genpd_release_lock(parent);
+
+ ret = pm_genpd_poweron(parent);
+ if (ret)
+ return ret;
+
+ goto start;
+ }
+
+ if (genpd->power_on) {
+ int ret = genpd->power_on(genpd);
+ if (ret)
+ goto out;
+ }
+
+ genpd_set_active(genpd);
+ if (parent)
+ parent->sd_count++;
+
+ out:
+ mutex_unlock(&genpd->lock);
+ if (parent)
+ genpd_release_lock(parent);
+
+ return ret;
+}
+
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_RUNTIME
+
+/**
+ * __pm_genpd_save_device - Save the pre-suspend state of a device.
+ * @dle: Device list entry of the device to save the state of.
+ * @genpd: PM domain the device belongs to.
+ */
+static int __pm_genpd_save_device(struct dev_list_entry *dle,
+ struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
+{
+ struct device *dev = dle->dev;
+ struct device_driver *drv = dev->driver;
+ int ret = 0;
+
+ if (dle->need_restore)
+ return 0;
+
+ mutex_unlock(&genpd->lock);
+
+ if (drv && drv->pm && drv->pm->runtime_suspend) {
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ ret = drv->pm->runtime_suspend(dev);
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+ }
+
+ mutex_lock(&genpd->lock);
+
+ if (!ret)
+ dle->need_restore = true;
+
+ return ret;
+}
+
+/**
+ * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
+ * @dle: Device list entry of the device to restore the state of.
+ * @genpd: PM domain the device belongs to.
+ */
+static void __pm_genpd_restore_device(struct dev_list_entry *dle,
+ struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
+{
+ struct device *dev = dle->dev;
+ struct device_driver *drv = dev->driver;
+
+ if (!dle->need_restore)
+ return;
+
+ mutex_unlock(&genpd->lock);
+
+ if (drv && drv->pm && drv->pm->runtime_resume) {
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ drv->pm->runtime_resume(dev);
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+ }
+
+ mutex_lock(&genpd->lock);
+
+ dle->need_restore = false;
+}
+
+/**
+ * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
+ * @genpd: PM domain to check.
+ *
+ * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
+ * a "power off" operation, which means that a "power on" has occured in the
+ * meantime, or if its resume_count field is different from zero, which means
+ * that one of its devices has been resumed in the meantime.
+ */
+static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
+{
+ return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
+}
+
+/**
+ * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
+ * @genpd: PM domait to power off.
+ *
+ * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+ * before.
+ */
+void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+{
+ if (!work_pending(&genpd->power_off_work))
+ queue_work(pm_wq, &genpd->power_off_work);
+}
+
+/**
+ * pm_genpd_poweroff - Remove power from a given PM domain.
+ * @genpd: PM domain to power down.
+ *
+ * If all of the @genpd's devices have been suspended and all of its subdomains
+ * have been powered down, run the runtime suspend callbacks provided by all of
+ * the @genpd's devices' drivers and remove power from @genpd.
+ */
+static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+ __releases(&genpd->lock) __acquires(&genpd->lock)
+{
+ struct generic_pm_domain *parent;
+ struct dev_list_entry *dle;
+ unsigned int not_suspended;
+ int ret = 0;
+
+ start:
+ /*
+ * Do not try to power off the domain in the following situations:
+ * (1) The domain is already in the "power off" state.
+ * (2) System suspend is in progress.
+ * (3) One of the domain's devices is being resumed right now.
+ */
+ if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
+ || genpd->resume_count > 0)
+ return 0;
+
+ if (genpd->sd_count > 0)
+ return -EBUSY;
+
+ not_suspended = 0;
+ list_for_each_entry(dle, &genpd->dev_list, node)
+ if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
+ not_suspended++;
+
+ if (not_suspended > genpd->in_progress)
+ return -EBUSY;
+
+ if (genpd->poweroff_task) {
+ /*
+ * Another instance of pm_genpd_poweroff() is executing
+ * callbacks, so tell it to start over and return.
+ */
+ genpd->status = GPD_STATE_REPEAT;
+ return 0;
+ }
+
+ if (genpd->gov && genpd->gov->power_down_ok) {
+ if (!genpd->gov->power_down_ok(&genpd->domain))
+ return -EAGAIN;
+ }
+
+ genpd->status = GPD_STATE_BUSY;
+ genpd->poweroff_task = current;
+
+ list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
+ ret = __pm_genpd_save_device(dle, genpd);
+ if (ret) {
+ genpd_set_active(genpd);
+ goto out;
+ }
+
+ if (genpd_abort_poweroff(genpd))
+ goto out;
+
+ if (genpd->status == GPD_STATE_REPEAT) {
+ genpd->poweroff_task = NULL;
+ goto start;
+ }
+ }
+
+ parent = genpd->parent;
+ if (parent) {
+ mutex_unlock(&genpd->lock);
+
+ genpd_acquire_lock(parent);
+ mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
+
+ if (genpd_abort_poweroff(genpd)) {
+ genpd_release_lock(parent);
+ goto out;
+ }
+ }
+
+ if (genpd->power_off) {
+ ret = genpd->power_off(genpd);
+ if (ret == -EBUSY) {
+ genpd_set_active(genpd);
+ if (parent)
+ genpd_release_lock(parent);
+
+ goto out;
+ }
+ }
+
+ genpd->status = GPD_STATE_POWER_OFF;
+
+ if (parent) {
+ genpd_sd_counter_dec(parent);
+ if (parent->sd_count == 0)
+ genpd_queue_power_off_work(parent);
+
+ genpd_release_lock(parent);
+ }
+
+ out:
+ genpd->poweroff_task = NULL;
+ wake_up_all(&genpd->status_wait_queue);
+ return ret;
+}
+
+/**
+ * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
+ * @work: Work structure used for scheduling the execution of this function.
+ */
+static void genpd_power_off_work_fn(struct work_struct *work)
+{
+ struct generic_pm_domain *genpd;
+
+ genpd = container_of(work, struct generic_pm_domain, power_off_work);
+
+ genpd_acquire_lock(genpd);
+ pm_genpd_poweroff(genpd);
+ genpd_release_lock(genpd);
+}
+
+/**
+ * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a runtime suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_runtime_suspend(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->stop_device) {
+ int ret = genpd->stop_device(dev);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&genpd->lock);
+ genpd->in_progress++;
+ pm_genpd_poweroff(genpd);
+ genpd->in_progress--;
+ mutex_unlock(&genpd->lock);
+
+ return 0;
+}
+
+/**
+ * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ * @genpd: PM domain the device belongs to.
+ */
+static void __pm_genpd_runtime_resume(struct device *dev,
+ struct generic_pm_domain *genpd)
+{
+ struct dev_list_entry *dle;
+
+ list_for_each_entry(dle, &genpd->dev_list, node) {
+ if (dle->dev == dev) {
+ __pm_genpd_restore_device(dle, genpd);
+ break;
+ }
+ }
+}
+
+/**
+ * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
+ * @dev: Device to resume.
+ *
+ * Carry out a runtime resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_runtime_resume(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ DEFINE_WAIT(wait);
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ ret = pm_genpd_poweron(genpd);
+ if (ret)
+ return ret;
+
+ mutex_lock(&genpd->lock);
+ genpd->status = GPD_STATE_BUSY;
+ genpd->resume_count++;
+ for (;;) {
+ prepare_to_wait(&genpd->status_wait_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ /*
+ * If current is the powering off task, we have been called
+ * reentrantly from one of the device callbacks, so we should
+ * not wait.
+ */
+ if (!genpd->poweroff_task || genpd->poweroff_task == current)
+ break;
+ mutex_unlock(&genpd->lock);
+
+ schedule();
+
+ mutex_lock(&genpd->lock);
+ }
+ finish_wait(&genpd->status_wait_queue, &wait);
+ __pm_genpd_runtime_resume(dev, genpd);
+ genpd->resume_count--;
+ genpd_set_active(genpd);
+ wake_up_all(&genpd->status_wait_queue);
+ mutex_unlock(&genpd->lock);
+
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return 0;
+}
+
+#else
+
+static inline void genpd_power_off_work_fn(struct work_struct *work) {}
+static inline void __pm_genpd_runtime_resume(struct device *dev,
+ struct generic_pm_domain *genpd) {}
+
+#define pm_genpd_runtime_suspend NULL
+#define pm_genpd_runtime_resume NULL
+
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
+ * @genpd: PM domain to power off, if possible.
+ *
+ * Check if the given PM domain can be powered off (during system suspend or
+ * hibernation) and do that if so. Also, in that case propagate to its parent.
+ *
+ * This function is only called in "noirq" stages of system power transitions,
+ * so it need not acquire locks (all of the "noirq" callbacks are executed
+ * sequentially, so it is guaranteed that it will never run twice in parallel).
+ */
+static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
+{
+ struct generic_pm_domain *parent = genpd->parent;
+
+ if (genpd->status == GPD_STATE_POWER_OFF)
+ return;
+
+ if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
+ return;
+
+ if (genpd->power_off)
+ genpd->power_off(genpd);
+
+ genpd->status = GPD_STATE_POWER_OFF;
+ if (parent) {
+ genpd_sd_counter_dec(parent);
+ pm_genpd_sync_poweroff(parent);
+ }
+}
+
+/**
+ * resume_needed - Check whether to resume a device before system suspend.
+ * @dev: Device to check.
+ * @genpd: PM domain the device belongs to.
+ *
+ * There are two cases in which a device that can wake up the system from sleep
+ * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
+ * to wake up the system and it has to remain active for this purpose while the
+ * system is in the sleep state and (2) if the device is not enabled to wake up
+ * the system from sleep states and it generally doesn't generate wakeup signals
+ * by itself (those signals are generated on its behalf by other parts of the
+ * system). In the latter case it may be necessary to reconfigure the device's
+ * wakeup settings during system suspend, because it may have been set up to
+ * signal remote wakeup from the system's working state as needed by runtime PM.
+ * Return 'true' in either of the above cases.
+ */
+static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
+{
+ bool active_wakeup;
+
+ if (!device_can_wakeup(dev))
+ return false;
+
+ active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+ return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
+}
+
+/**
+ * pm_genpd_prepare - Start power transition of a device in a PM domain.
+ * @dev: Device to start the transition of.
+ *
+ * Start a power transition of a device (during a system-wide power transition)
+ * under the assumption that its pm_domain field points to the domain member of
+ * an object of type struct generic_pm_domain representing a PM domain
+ * consisting of I/O devices.
+ */
+static int pm_genpd_prepare(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * If a wakeup request is pending for the device, it should be woken up
+ * at this point and a system wakeup event should be reported if it's
+ * set up to wake up the system from sleep states.
+ */
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
+
+ if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
+ return -EBUSY;
+ }
+
+ if (resume_needed(dev, genpd))
+ pm_runtime_resume(dev);
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->prepared_count++ == 0)
+ genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
+
+ genpd_release_lock(genpd);
+
+ if (genpd->suspend_power_off) {
+ pm_runtime_put_noidle(dev);
+ return 0;
+ }
+
+ /*
+ * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
+ * so pm_genpd_poweron() will return immediately, but if the device
+ * is suspended (e.g. it's been stopped by .stop_device()), we need
+ * to make it operational.
+ */
+ pm_runtime_resume(dev);
+ __pm_runtime_disable(dev, false);
+
+ ret = pm_generic_prepare(dev);
+ if (ret) {
+ mutex_lock(&genpd->lock);
+
+ if (--genpd->prepared_count == 0)
+ genpd->suspend_power_off = false;
+
+ mutex_unlock(&genpd->lock);
+ pm_runtime_enable(dev);
+ }
+
+ pm_runtime_put_sync(dev);
+ return ret;
+}
+
+/**
+ * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Suspend a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_suspend(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
+}
+
+/**
+ * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a late suspend of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_suspend_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ ret = pm_generic_suspend_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (device_may_wakeup(dev)
+ && genpd->active_wakeup && genpd->active_wakeup(dev))
+ return 0;
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ genpd->suspended_count++;
+ pm_genpd_sync_poweroff(genpd);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Carry out an early resume of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_resume_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ pm_genpd_poweron(genpd);
+ genpd->suspended_count--;
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return pm_generic_resume_noirq(dev);
+}
+
+/**
+ * pm_genpd_resume - Resume a device belonging to an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Resume a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_resume(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
+}
+
+/**
+ * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
+ * @dev: Device to freeze.
+ *
+ * Freeze a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_freeze(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
+}
+
+/**
+ * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
+ * @dev: Device to freeze.
+ *
+ * Carry out a late freeze of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_freeze_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ ret = pm_generic_freeze_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
+ * @dev: Device to thaw.
+ *
+ * Carry out an early thaw of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_thaw_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
+ * @dev: Device to thaw.
+ *
+ * Thaw a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_thaw(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
+}
+
+/**
+ * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
+ * @dev: Device to suspend.
+ *
+ * Power off a device under the assumption that its pm_domain field points to
+ * the domain member of an object of type struct generic_pm_domain representing
+ * a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_dev_poweroff(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
+}
+
+/**
+ * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
+ * @dev: Device to suspend.
+ *
+ * Carry out a late powering off of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a PM domain consisting of I/O devices.
+ */
+static int pm_genpd_dev_poweroff_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ if (genpd->suspend_power_off)
+ return 0;
+
+ ret = pm_generic_poweroff_noirq(dev);
+ if (ret)
+ return ret;
+
+ if (device_may_wakeup(dev)
+ && genpd->active_wakeup && genpd->active_wakeup(dev))
+ return 0;
+
+ if (genpd->stop_device)
+ genpd->stop_device(dev);
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ genpd->suspended_count++;
+ pm_genpd_sync_poweroff(genpd);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Carry out an early restore of a device under the assumption that its
+ * pm_domain field points to the domain member of an object of type
+ * struct generic_pm_domain representing a power domain consisting of I/O
+ * devices.
+ */
+static int pm_genpd_restore_noirq(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ /*
+ * Since all of the "noirq" callbacks are executed sequentially, it is
+ * guaranteed that this function will never run twice in parallel for
+ * the same PM domain, so it is not necessary to use locking here.
+ */
+ genpd->status = GPD_STATE_POWER_OFF;
+ if (genpd->suspend_power_off) {
+ /*
+ * The boot kernel might put the domain into the power on state,
+ * so make sure it really is powered off.
+ */
+ if (genpd->power_off)
+ genpd->power_off(genpd);
+ return 0;
+ }
+
+ pm_genpd_poweron(genpd);
+ genpd->suspended_count--;
+ if (genpd->start_device)
+ genpd->start_device(dev);
+
+ return pm_generic_restore_noirq(dev);
+}
+
+/**
+ * pm_genpd_restore - Restore a device belonging to an I/O power domain.
+ * @dev: Device to resume.
+ *
+ * Restore a device under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static int pm_genpd_restore(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return -EINVAL;
+
+ return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
+}
+
+/**
+ * pm_genpd_complete - Complete power transition of a device in a power domain.
+ * @dev: Device to complete the transition of.
+ *
+ * Complete a power transition of a device (during a system-wide power
+ * transition) under the assumption that its pm_domain field points to the
+ * domain member of an object of type struct generic_pm_domain representing
+ * a power domain consisting of I/O devices.
+ */
+static void pm_genpd_complete(struct device *dev)
+{
+ struct generic_pm_domain *genpd;
+ bool run_complete;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ genpd = dev_to_genpd(dev);
+ if (IS_ERR(genpd))
+ return;
+
+ mutex_lock(&genpd->lock);
+
+ run_complete = !genpd->suspend_power_off;
+ if (--genpd->prepared_count == 0)
+ genpd->suspend_power_off = false;
+
+ mutex_unlock(&genpd->lock);
+
+ if (run_complete) {
+ pm_generic_complete(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_idle(dev);
+ }
+}
+
+#else
+
+#define pm_genpd_prepare NULL
+#define pm_genpd_suspend NULL
+#define pm_genpd_suspend_noirq NULL
+#define pm_genpd_resume_noirq NULL
+#define pm_genpd_resume NULL
+#define pm_genpd_freeze NULL
+#define pm_genpd_freeze_noirq NULL
+#define pm_genpd_thaw_noirq NULL
+#define pm_genpd_thaw NULL
+#define pm_genpd_dev_poweroff_noirq NULL
+#define pm_genpd_dev_poweroff NULL
+#define pm_genpd_restore_noirq NULL
+#define pm_genpd_restore NULL
+#define pm_genpd_complete NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * @genpd: PM domain to add the device to.
+ * @dev: Device to be added.
+ */
+int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+{
+ struct dev_list_entry *dle;
+ int ret = 0;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->status == GPD_STATE_POWER_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (genpd->prepared_count > 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ list_for_each_entry(dle, &genpd->dev_list, node)
+ if (dle->dev == dev) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dle = kzalloc(sizeof(*dle), GFP_KERNEL);
+ if (!dle) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dle->dev = dev;
+ dle->need_restore = false;
+ list_add_tail(&dle->node, &genpd->dev_list);
+ genpd->device_count++;
+
+ spin_lock_irq(&dev->power.lock);
+ dev->pm_domain = &genpd->domain;
+ spin_unlock_irq(&dev->power.lock);
+
+ out:
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_remove_device - Remove a device from an I/O PM domain.
+ * @genpd: PM domain to remove the device from.
+ * @dev: Device to be removed.
+ */
+int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ struct dev_list_entry *dle;
+ int ret = -EINVAL;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
+ return -EINVAL;
+
+ genpd_acquire_lock(genpd);
+
+ if (genpd->prepared_count > 0) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ list_for_each_entry(dle, &genpd->dev_list, node) {
+ if (dle->dev != dev)
+ continue;
+
+ spin_lock_irq(&dev->power.lock);
+ dev->pm_domain = NULL;
+ spin_unlock_irq(&dev->power.lock);
+
+ genpd->device_count--;
+ list_del(&dle->node);
+ kfree(dle);
+
+ ret = 0;
+ break;
+ }
+
+ out:
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
+ * @genpd: Master PM domain to add the subdomain to.
+ * @new_subdomain: Subdomain to be added.
+ */
+int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *new_subdomain)
+{
+ struct generic_pm_domain *subdomain;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
+ return -EINVAL;
+
+ start:
+ genpd_acquire_lock(genpd);
+ mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
+
+ if (new_subdomain->status != GPD_STATE_POWER_OFF
+ && new_subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&new_subdomain->lock);
+ genpd_release_lock(genpd);
+ goto start;
+ }
+
+ if (genpd->status == GPD_STATE_POWER_OFF
+ && new_subdomain->status != GPD_STATE_POWER_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+ if (subdomain == new_subdomain) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
+ new_subdomain->parent = genpd;
+ if (subdomain->status != GPD_STATE_POWER_OFF)
+ genpd->sd_count++;
+
+ out:
+ mutex_unlock(&new_subdomain->lock);
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
+ * @genpd: Master PM domain to remove the subdomain from.
+ * @target: Subdomain to be removed.
+ */
+int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *target)
+{
+ struct generic_pm_domain *subdomain;
+ int ret = -EINVAL;
+
+ if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
+ return -EINVAL;
+
+ start:
+ genpd_acquire_lock(genpd);
+
+ list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
+ if (subdomain != target)
+ continue;
+
+ mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+
+ if (subdomain->status != GPD_STATE_POWER_OFF
+ && subdomain->status != GPD_STATE_ACTIVE) {
+ mutex_unlock(&subdomain->lock);
+ genpd_release_lock(genpd);
+ goto start;
+ }
+
+ list_del(&subdomain->sd_node);
+ subdomain->parent = NULL;
+ if (subdomain->status != GPD_STATE_POWER_OFF)
+ genpd_sd_counter_dec(genpd);
+
+ mutex_unlock(&subdomain->lock);
+
+ ret = 0;
+ break;
+ }
+
+ genpd_release_lock(genpd);
+
+ return ret;
+}
+
+/**
+ * pm_genpd_init - Initialize a generic I/O PM domain object.
+ * @genpd: PM domain object to initialize.
+ * @gov: PM domain governor to associate with the domain (may be NULL).
+ * @is_off: Initial value of the domain's power_is_off field.
+ */
+void pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off)
+{
+ if (IS_ERR_OR_NULL(genpd))
+ return;
+
+ INIT_LIST_HEAD(&genpd->sd_node);
+ genpd->parent = NULL;
+ INIT_LIST_HEAD(&genpd->dev_list);
+ INIT_LIST_HEAD(&genpd->sd_list);
+ mutex_init(&genpd->lock);
+ genpd->gov = gov;
+ INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
+ genpd->in_progress = 0;
+ genpd->sd_count = 0;
+ genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
+ init_waitqueue_head(&genpd->status_wait_queue);
+ genpd->poweroff_task = NULL;
+ genpd->resume_count = 0;
+ genpd->device_count = 0;
+ genpd->suspended_count = 0;
+ genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
+ genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
+ genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
+ genpd->domain.ops.prepare = pm_genpd_prepare;
+ genpd->domain.ops.suspend = pm_genpd_suspend;
+ genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
+ genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
+ genpd->domain.ops.resume = pm_genpd_resume;
+ genpd->domain.ops.freeze = pm_genpd_freeze;
+ genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
+ genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
+ genpd->domain.ops.thaw = pm_genpd_thaw;
+ genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
+ genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
+ genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
+ genpd->domain.ops.restore = pm_genpd_restore;
+ genpd->domain.ops.complete = pm_genpd_complete;
+ mutex_lock(&gpd_list_lock);
+ list_add(&genpd->gpd_list_node, &gpd_list);
+ mutex_unlock(&gpd_list_lock);
+}
+
+/**
+ * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ */
+void pm_genpd_poweroff_unused(void)
+{
+ struct generic_pm_domain *genpd;
+
+ mutex_lock(&gpd_list_lock);
+
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node)
+ genpd_queue_power_off_work(genpd);
+
+ mutex_unlock(&gpd_list_lock);
+}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index cb3bb368681..9508df71274 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -94,12 +94,13 @@ int pm_generic_prepare(struct device *dev)
* __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
* @dev: Device to handle.
* @event: PM transition of the system under way.
+ * @bool: Whether or not this is the "noirq" stage.
*
* If the device has not been suspended at run time, execute the
* suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
* return its error code. Otherwise, return zero.
*/
-static int __pm_generic_call(struct device *dev, int event)
+static int __pm_generic_call(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
@@ -109,16 +110,16 @@ static int __pm_generic_call(struct device *dev, int event)
switch (event) {
case PM_EVENT_SUSPEND:
- callback = pm->suspend;
+ callback = noirq ? pm->suspend_noirq : pm->suspend;
break;
case PM_EVENT_FREEZE:
- callback = pm->freeze;
+ callback = noirq ? pm->freeze_noirq : pm->freeze;
break;
case PM_EVENT_HIBERNATE:
- callback = pm->poweroff;
+ callback = noirq ? pm->poweroff_noirq : pm->poweroff;
break;
case PM_EVENT_THAW:
- callback = pm->thaw;
+ callback = noirq ? pm->thaw_noirq : pm->thaw;
break;
default:
callback = NULL;
@@ -129,42 +130,82 @@ static int __pm_generic_call(struct device *dev, int event)
}
/**
+ * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
+
+/**
* pm_generic_suspend - Generic suspend callback for subsystems.
* @dev: Device to suspend.
*/
int pm_generic_suspend(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_SUSPEND);
+ return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
}
EXPORT_SYMBOL_GPL(pm_generic_suspend);
/**
+ * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
+
+/**
* pm_generic_freeze - Generic freeze callback for subsystems.
* @dev: Device to freeze.
*/
int pm_generic_freeze(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_FREEZE);
+ return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_freeze);
/**
+ * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
+
+/**
* pm_generic_poweroff - Generic poweroff callback for subsystems.
* @dev: Device to handle.
*/
int pm_generic_poweroff(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_HIBERNATE);
+ return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_poweroff);
/**
+ * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw_noirq(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_THAW, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
+
+/**
* pm_generic_thaw - Generic thaw callback for subsystems.
* @dev: Device to thaw.
*/
int pm_generic_thaw(struct device *dev)
{
- return __pm_generic_call(dev, PM_EVENT_THAW);
+ return __pm_generic_call(dev, PM_EVENT_THAW, false);
}
EXPORT_SYMBOL_GPL(pm_generic_thaw);
@@ -172,12 +213,13 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
* __pm_generic_resume - Generic resume/restore callback for subsystems.
* @dev: Device to handle.
* @event: PM transition of the system under way.
+ * @bool: Whether or not this is the "noirq" stage.
*
* Execute the resume/resotre callback provided by the @dev's driver, if
* defined. If it returns 0, change the device's runtime PM status to 'active'.
* Return the callback's error code.
*/
-static int __pm_generic_resume(struct device *dev, int event)
+static int __pm_generic_resume(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
@@ -188,10 +230,10 @@ static int __pm_generic_resume(struct device *dev, int event)
switch (event) {
case PM_EVENT_RESUME:
- callback = pm->resume;
+ callback = noirq ? pm->resume_noirq : pm->resume;
break;
case PM_EVENT_RESTORE:
- callback = pm->restore;
+ callback = noirq ? pm->restore_noirq : pm->restore;
break;
default:
callback = NULL;
@@ -202,7 +244,7 @@ static int __pm_generic_resume(struct device *dev, int event)
return 0;
ret = callback(dev);
- if (!ret && pm_runtime_enabled(dev)) {
+ if (!ret && !noirq && pm_runtime_enabled(dev)) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
@@ -212,22 +254,42 @@ static int __pm_generic_resume(struct device *dev, int event)
}
/**
+ * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume_noirq(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
+
+/**
* pm_generic_resume - Generic resume callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME);
+ return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
/**
+ * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore_noirq(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
+
+/**
* pm_generic_restore - Generic restore callback for subsystems.
* @dev: Device to restore.
*/
int pm_generic_restore(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE);
+ return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
@@ -256,11 +318,17 @@ struct dev_pm_ops generic_subsys_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.prepare = pm_generic_prepare,
.suspend = pm_generic_suspend,
+ .suspend_noirq = pm_generic_suspend_noirq,
.resume = pm_generic_resume,
+ .resume_noirq = pm_generic_resume_noirq,
.freeze = pm_generic_freeze,
+ .freeze_noirq = pm_generic_freeze_noirq,
.thaw = pm_generic_thaw,
+ .thaw_noirq = pm_generic_thaw_noirq,
.poweroff = pm_generic_poweroff,
+ .poweroff_noirq = pm_generic_poweroff_noirq,
.restore = pm_generic_restore,
+ .restore_noirq = pm_generic_restore_noirq,
.complete = pm_generic_complete,
#endif
#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 06f09bf89cb..a85459126bc 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -425,9 +425,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
TRACE_DEVICE(dev);
TRACE_RESUME(0);
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "EARLY power domain ");
- error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "EARLY type ");
error = pm_noirq_op(dev, dev->type->pm, state);
@@ -505,6 +505,7 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
static int device_resume(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
+ bool put = false;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
@@ -521,9 +522,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
if (!dev->power.is_suspended)
goto Unlock;
- if (dev->pwr_domain) {
+ pm_runtime_enable(dev);
+ put = true;
+
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_op(dev, &dev->pm_domain->ops, state);
goto End;
}
@@ -563,6 +567,10 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
complete_all(&dev->power.completion);
TRACE_RESUME(error);
+
+ if (put)
+ pm_runtime_put_sync(dev);
+
return error;
}
@@ -641,10 +649,10 @@ static void device_complete(struct device *dev, pm_message_t state)
{
device_lock(dev);
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "completing power domain ");
- if (dev->pwr_domain->ops.complete)
- dev->pwr_domain->ops.complete(dev);
+ if (dev->pm_domain->ops.complete)
+ dev->pm_domain->ops.complete(dev);
} else if (dev->type && dev->type->pm) {
pm_dev_dbg(dev, state, "completing type ");
if (dev->type->pm->complete)
@@ -744,9 +752,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error;
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "LATE power domain ");
- error = pm_noirq_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
if (error)
return error;
} else if (dev->type && dev->type->pm) {
@@ -843,19 +851,25 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
int error = 0;
dpm_wait_for_children(dev, async);
- device_lock(dev);
if (async_error)
- goto Unlock;
+ return 0;
+
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
async_error = -EBUSY;
- goto Unlock;
+ return 0;
}
- if (dev->pwr_domain) {
+ device_lock(dev);
+
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pwr_domain->ops, state);
+ error = pm_op(dev, &dev->pm_domain->ops, state);
goto End;
}
@@ -890,12 +904,15 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
End:
dev->power.is_suspended = !error;
- Unlock:
device_unlock(dev);
complete_all(&dev->power.completion);
- if (error)
+ if (error) {
+ pm_runtime_put_sync(dev);
async_error = error;
+ } else if (dev->power.is_suspended) {
+ __pm_runtime_disable(dev, false);
+ }
return error;
}
@@ -982,11 +999,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
device_lock(dev);
- if (dev->pwr_domain) {
+ if (dev->pm_domain) {
pm_dev_dbg(dev, state, "preparing power domain ");
- if (dev->pwr_domain->ops.prepare)
- error = dev->pwr_domain->ops.prepare(dev);
- suspend_report_result(dev->pwr_domain->ops.prepare, error);
+ if (dev->pm_domain->ops.prepare)
+ error = dev->pm_domain->ops.prepare(dev);
+ suspend_report_result(dev->pm_domain->ops.prepare, error);
if (error)
goto End;
} else if (dev->type && dev->type->pm) {
@@ -1035,13 +1052,7 @@ int dpm_prepare(pm_message_t state)
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- pm_runtime_get_noresume(dev);
- if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
- pm_wakeup_event(dev, 0);
-
- pm_runtime_put_sync(dev);
- error = pm_wakeup_pending() ?
- -EBUSY : device_prepare(dev, state);
+ error = device_prepare(dev, state);
mutex_lock(&dpm_list_mtx);
if (error) {
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 56a6899f5e9..5cc12322ef3 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
+
+/**
+ * opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by opp_init_cpufreq_table
+ */
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0d4587b15c5..8dc247c974a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,5 +1,5 @@
/*
- * drivers/base/power/runtime.c - Helper functions for device run-time PM
+ * drivers/base/power/runtime.c - Helper functions for device runtime PM
*
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
* Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
@@ -135,8 +135,9 @@ static int rpm_check_suspend_allowed(struct device *dev)
if (dev->power.runtime_error)
retval = -EINVAL;
- else if (atomic_read(&dev->power.usage_count) > 0
- || dev->power.disable_depth > 0)
+ else if (dev->power.disable_depth > 0)
+ retval = -EACCES;
+ else if (atomic_read(&dev->power.usage_count) > 0)
retval = -EAGAIN;
else if (!pm_children_suspended(dev))
retval = -EBUSY;
@@ -158,7 +159,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
* @dev: Device to notify the bus type about.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be suspended. If
+ * Check if the device's runtime PM status allows it to be suspended. If
* another idle notification has been started earlier, return immediately. If
* the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
* run the ->runtime_idle() callback directly.
@@ -213,8 +214,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- if (dev->pwr_domain)
- callback = dev->pwr_domain->ops.runtime_idle;
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.runtime_idle;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_idle;
else if (dev->class && dev->class->pm)
@@ -262,15 +263,15 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
spin_lock_irq(&dev->power.lock);
}
dev->power.runtime_error = retval;
- return retval;
+ return retval != -EACCES ? retval : -EIO;
}
/**
- * rpm_suspend - Carry out run-time suspend of given device.
+ * rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be suspended. If
+ * Check if the device's runtime PM status allows it to be suspended. If
* another suspend has been started earlier, either return immediately or wait
* for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
* pending idle notification. If the RPM_ASYNC flag is set then queue a
@@ -374,8 +375,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->pwr_domain)
- callback = dev->pwr_domain->ops.runtime_suspend;
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_suspend;
else if (dev->class && dev->class->pm)
@@ -388,7 +389,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_ACTIVE);
- dev->power.deferred_resume = 0;
+ dev->power.deferred_resume = false;
if (retval == -EAGAIN || retval == -EBUSY)
dev->power.runtime_error = 0;
else
@@ -429,11 +430,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
}
/**
- * rpm_resume - Carry out run-time resume of given device.
+ * rpm_resume - Carry out runtime resume of given device.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
- * Check if the device's run-time PM status allows it to be resumed. Cancel
+ * Check if the device's runtime PM status allows it to be resumed. Cancel
* any scheduled or pending requests. If another resume has been started
* earlier, either return immediately or wait for it to finish, depending on the
* RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
@@ -458,7 +459,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.runtime_error)
retval = -EINVAL;
else if (dev->power.disable_depth > 0)
- retval = -EAGAIN;
+ retval = -EACCES;
if (retval)
goto out;
@@ -550,7 +551,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
spin_lock(&parent->power.lock);
/*
- * We can resume if the parent's run-time PM is disabled or it
+ * We can resume if the parent's runtime PM is disabled or it
* is set to ignore children.
*/
if (!parent->power.disable_depth
@@ -573,8 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->pwr_domain)
- callback = dev->pwr_domain->ops.runtime_resume;
+ if (dev->pm_domain)
+ callback = dev->pm_domain->ops.runtime_resume;
else if (dev->type && dev->type->pm)
callback = dev->type->pm->runtime_resume;
else if (dev->class && dev->class->pm)
@@ -614,11 +615,11 @@ static int rpm_resume(struct device *dev, int rpmflags)
}
/**
- * pm_runtime_work - Universal run-time PM work function.
+ * pm_runtime_work - Universal runtime PM work function.
* @work: Work structure used for scheduling the execution of this function.
*
* Use @work to get the device object the work is to be done for, determine what
- * is to be done and execute the appropriate run-time PM function.
+ * is to be done and execute the appropriate runtime PM function.
*/
static void pm_runtime_work(struct work_struct *work)
{
@@ -717,7 +718,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
EXPORT_SYMBOL_GPL(pm_schedule_suspend);
/**
- * __pm_runtime_idle - Entry point for run-time idle operations.
+ * __pm_runtime_idle - Entry point for runtime idle operations.
* @dev: Device to send idle notification for.
* @rpmflags: Flag bits.
*
@@ -746,7 +747,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_idle);
/**
- * __pm_runtime_suspend - Entry point for run-time put/suspend operations.
+ * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
* @dev: Device to suspend.
* @rpmflags: Flag bits.
*
@@ -775,7 +776,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
/**
- * __pm_runtime_resume - Entry point for run-time resume operations.
+ * __pm_runtime_resume - Entry point for runtime resume operations.
* @dev: Device to resume.
* @rpmflags: Flag bits.
*
@@ -801,11 +802,11 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
EXPORT_SYMBOL_GPL(__pm_runtime_resume);
/**
- * __pm_runtime_set_status - Set run-time PM status of a device.
+ * __pm_runtime_set_status - Set runtime PM status of a device.
* @dev: Device to handle.
- * @status: New run-time PM status of the device.
+ * @status: New runtime PM status of the device.
*
- * If run-time PM of the device is disabled or its power.runtime_error field is
+ * If runtime PM of the device is disabled or its power.runtime_error field is
* different from zero, the status may be changed either to RPM_ACTIVE, or to
* RPM_SUSPENDED, as long as that reflects the actual state of the device.
* However, if the device has a parent and the parent is not active, and the
@@ -851,7 +852,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
/*
* It is invalid to put an active child under a parent that is
- * not active, has run-time PM enabled and the
+ * not active, has runtime PM enabled and the
* 'power.ignore_children' flag unset.
*/
if (!parent->power.disable_depth
@@ -885,7 +886,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
* @dev: Device to handle.
*
* Flush all pending requests for the device from pm_wq and wait for all
- * run-time PM operations involving the device in progress to complete.
+ * runtime PM operations involving the device in progress to complete.
*
* Should be called under dev->power.lock with interrupts disabled.
*/
@@ -933,7 +934,7 @@ static void __pm_runtime_barrier(struct device *dev)
* Prevent the device from being suspended by incrementing its usage counter and
* if there's a pending resume request for the device, wake the device up.
* Next, make sure that all pending requests for the device have been flushed
- * from pm_wq and wait for all run-time PM operations involving the device in
+ * from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
*
* Return value:
@@ -963,18 +964,18 @@ int pm_runtime_barrier(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
/**
- * __pm_runtime_disable - Disable run-time PM of a device.
+ * __pm_runtime_disable - Disable runtime PM of a device.
* @dev: Device to handle.
* @check_resume: If set, check if there's a resume request for the device.
*
* Increment power.disable_depth for the device and if was zero previously,
- * cancel all pending run-time PM requests for the device and wait for all
+ * cancel all pending runtime PM requests for the device and wait for all
* operations in progress to complete. The device can be either active or
- * suspended after its run-time PM has been disabled.
+ * suspended after its runtime PM has been disabled.
*
* If @check_resume is set and there's a resume request pending when
* __pm_runtime_disable() is called and power.disable_depth is zero, the
- * function will wake up the device before disabling its run-time PM.
+ * function will wake up the device before disabling its runtime PM.
*/
void __pm_runtime_disable(struct device *dev, bool check_resume)
{
@@ -987,7 +988,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
/*
* Wake up the device if there's a resume request pending, because that
- * means there probably is some I/O to process and disabling run-time PM
+ * means there probably is some I/O to process and disabling runtime PM
* shouldn't prevent the device from processing the I/O.
*/
if (check_resume && dev->power.request_pending
@@ -1012,7 +1013,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
EXPORT_SYMBOL_GPL(__pm_runtime_disable);
/**
- * pm_runtime_enable - Enable run-time PM of a device.
+ * pm_runtime_enable - Enable runtime PM of a device.
* @dev: Device to handle.
*/
void pm_runtime_enable(struct device *dev)
@@ -1031,7 +1032,7 @@ void pm_runtime_enable(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_enable);
/**
- * pm_runtime_forbid - Block run-time PM of a device.
+ * pm_runtime_forbid - Block runtime PM of a device.
* @dev: Device to handle.
*
* Increase the device's usage count and clear its power.runtime_auto flag,
@@ -1054,7 +1055,7 @@ void pm_runtime_forbid(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_forbid);
/**
- * pm_runtime_allow - Unblock run-time PM of a device.
+ * pm_runtime_allow - Unblock runtime PM of a device.
* @dev: Device to handle.
*
* Decrease the device's usage count and set its power.runtime_auto flag.
@@ -1075,12 +1076,12 @@ void pm_runtime_allow(struct device *dev)
EXPORT_SYMBOL_GPL(pm_runtime_allow);
/**
- * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device.
+ * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
* @dev: Device to handle.
*
* Set the power.no_callbacks flag, which tells the PM core that this
- * device is power-managed through its parent and has no run-time PM
- * callbacks of its own. The run-time sysfs attributes will be removed.
+ * device is power-managed through its parent and has no runtime PM
+ * callbacks of its own. The runtime sysfs attributes will be removed.
*/
void pm_runtime_no_callbacks(struct device *dev)
{
@@ -1156,8 +1157,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
* @delay: Value of the new delay in milliseconds.
*
* Set the device's power.autosuspend_delay value. If it changes to negative
- * and the power.use_autosuspend flag is set, prevent run-time suspends. If it
- * changes the other way, allow run-time suspends.
+ * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
+ * changes the other way, allow runtime suspends.
*/
void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
{
@@ -1177,7 +1178,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
* @dev: Device to handle.
* @use: New value for use_autosuspend.
*
- * Set the device's power.use_autosuspend flag, and allow or prevent run-time
+ * Set the device's power.use_autosuspend flag, and allow or prevent runtime
* suspends as needed.
*/
void __pm_runtime_use_autosuspend(struct device *dev, bool use)
@@ -1194,7 +1195,7 @@ void __pm_runtime_use_autosuspend(struct device *dev, bool use)
EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
/**
- * pm_runtime_init - Initialize run-time PM fields in given device object.
+ * pm_runtime_init - Initialize runtime PM fields in given device object.
* @dev: Device object to initialize.
*/
void pm_runtime_init(struct device *dev)
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a9f5b897961..942d6a7c9ae 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -116,12 +116,14 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
cp = memchr(buf, '\n', n);
if (cp)
len = cp - buf;
+ device_lock(dev);
if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
pm_runtime_allow(dev);
else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
pm_runtime_forbid(dev);
else
- return -EINVAL;
+ n = -EINVAL;
+ device_unlock(dev);
return n;
}
@@ -205,7 +207,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
return -EINVAL;
+ device_lock(dev);
pm_runtime_set_autosuspend_delay(dev, delay);
+ device_unlock(dev);
return n;
}
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index c80e138b62f..af10abecb99 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
unsigned int val;
get_rtc_time(&time);
- pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n",
+ pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
time.tm_hour, time.tm_min, time.tm_sec,
time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
val = time.tm_year; /* 100 years */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 6cc0db1bf52..3f129b45451 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -684,7 +684,7 @@ again:
err = xenbus_switch_state(dev, XenbusStateConnected);
if (err)
- xenbus_dev_fatal(dev, err, "switching to Connected state",
+ xenbus_dev_fatal(dev, err, "%s: switching to Connected state",
dev->nodename);
return;
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 6c7fd7db6df..fb1975d82a7 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1155,12 +1155,19 @@ static int __devinit ace_probe(struct platform_device *dev)
{
resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
- int id = dev->id;
+ u32 id = dev->id;
int irq = NO_IRQ;
int i;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
+ /* device id and bus width */
+ of_property_read_u32(dev->dev.of_node, "port-number", &id);
+ if (id < 0)
+ id = 0;
+ if (of_find_property(dev->dev.of_node, "8-bit", NULL))
+ bus_width = ACE_BUS_WIDTH_8;
+
for (i = 0; i < dev->num_resources; i++) {
if (dev->resource[i].flags & IORESOURCE_MEM)
physaddr = dev->resource[i].start;
@@ -1181,57 +1188,7 @@ static int __devexit ace_remove(struct platform_device *dev)
return 0;
}
-static struct platform_driver ace_platform_driver = {
- .probe = ace_probe,
- .remove = __devexit_p(ace_remove),
- .driver = {
- .owner = THIS_MODULE,
- .name = "xsysace",
- },
-};
-
-/* ---------------------------------------------------------------------
- * OF_Platform Bus Support
- */
-
#if defined(CONFIG_OF)
-static int __devinit ace_of_probe(struct platform_device *op)
-{
- struct resource res;
- resource_size_t physaddr;
- const u32 *id;
- int irq, bus_width, rc;
-
- /* device id */
- id = of_get_property(op->dev.of_node, "port-number", NULL);
-
- /* physaddr */
- rc = of_address_to_resource(op->dev.of_node, 0, &res);
- if (rc) {
- dev_err(&op->dev, "invalid address\n");
- return rc;
- }
- physaddr = res.start;
-
- /* irq */
- irq = irq_of_parse_and_map(op->dev.of_node, 0);
-
- /* bus width */
- bus_width = ACE_BUS_WIDTH_16;
- if (of_find_property(op->dev.of_node, "8-bit", NULL))
- bus_width = ACE_BUS_WIDTH_8;
-
- /* Call the bus-independent setup code */
- return ace_alloc(&op->dev, id ? be32_to_cpup(id) : 0,
- physaddr, irq, bus_width);
-}
-
-static int __devexit ace_of_remove(struct platform_device *op)
-{
- ace_free(&op->dev);
- return 0;
-}
-
/* Match table for of_platform binding */
static const struct of_device_id ace_of_match[] __devinitconst = {
{ .compatible = "xlnx,opb-sysace-1.00.b", },
@@ -1241,34 +1198,20 @@ static const struct of_device_id ace_of_match[] __devinitconst = {
{},
};
MODULE_DEVICE_TABLE(of, ace_of_match);
+#else /* CONFIG_OF */
+#define ace_of_match NULL
+#endif /* CONFIG_OF */
-static struct platform_driver ace_of_driver = {
- .probe = ace_of_probe,
- .remove = __devexit_p(ace_of_remove),
+static struct platform_driver ace_platform_driver = {
+ .probe = ace_probe,
+ .remove = __devexit_p(ace_remove),
.driver = {
- .name = "xsysace",
.owner = THIS_MODULE,
+ .name = "xsysace",
.of_match_table = ace_of_match,
},
};
-/* Registration helpers to keep the number of #ifdefs to a minimum */
-static inline int __init ace_of_register(void)
-{
- pr_debug("xsysace: registering OF binding\n");
- return platform_driver_register(&ace_of_driver);
-}
-
-static inline void __exit ace_of_unregister(void)
-{
- platform_driver_unregister(&ace_of_driver);
-}
-#else /* CONFIG_OF */
-/* CONFIG_OF not enabled; do nothing helpers */
-static inline int __init ace_of_register(void) { return 0; }
-static inline void __exit ace_of_unregister(void) { }
-#endif /* CONFIG_OF */
-
/* ---------------------------------------------------------------------
* Module init/exit routines
*/
@@ -1282,11 +1225,6 @@ static int __init ace_init(void)
goto err_blk;
}
- rc = ace_of_register();
- if (rc)
- goto err_of;
-
- pr_debug("xsysace: registering platform binding\n");
rc = platform_driver_register(&ace_platform_driver);
if (rc)
goto err_plat;
@@ -1295,21 +1233,17 @@ static int __init ace_init(void)
return 0;
err_plat:
- ace_of_unregister();
-err_of:
unregister_blkdev(ace_major, "xsysace");
err_blk:
printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc);
return rc;
}
+module_init(ace_init);
static void __exit ace_exit(void)
{
pr_debug("Unregistering Xilinx SystemACE driver\n");
platform_driver_unregister(&ace_platform_driver);
- ace_of_unregister();
unregister_blkdev(ace_major, "xsysace");
}
-
-module_init(ace_init);
module_exit(ace_exit);
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index 548708c4b2b..a7346ab97a3 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -606,7 +606,7 @@ static int apm_suspend_notifier(struct notifier_block *nb,
return NOTIFY_OK;
/* interrupted by signal */
- return NOTIFY_BAD;
+ return notifier_from_errno(err);
case PM_POST_SUSPEND:
/*
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 2b46a7efa0a..281902d3f7e 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/connector.h>
#include <linux/gfp.h>
+#include <linux/ptrace.h>
#include <asm/atomic.h>
#include <asm/unaligned.h>
@@ -166,6 +167,40 @@ void proc_sid_connector(struct task_struct *task)
cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
}
+void proc_ptrace_connector(struct task_struct *task, int ptrace_id)
+{
+ struct cn_msg *msg;
+ struct proc_event *ev;
+ struct timespec ts;
+ __u8 buffer[CN_PROC_MSG_SIZE];
+ struct task_struct *tracer;
+
+ if (atomic_read(&proc_event_num_listeners) < 1)
+ return;
+
+ msg = (struct cn_msg *)buffer;
+ ev = (struct proc_event *)msg->data;
+ get_seq(&msg->seq, &ev->cpu);
+ ktime_get_ts(&ts); /* get high res monotonic timestamp */
+ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
+ ev->what = PROC_EVENT_PTRACE;
+ ev->event_data.ptrace.process_pid = task->pid;
+ ev->event_data.ptrace.process_tgid = task->tgid;
+ if (ptrace_id == PTRACE_ATTACH) {
+ ev->event_data.ptrace.tracer_pid = current->pid;
+ ev->event_data.ptrace.tracer_tgid = current->tgid;
+ } else if (ptrace_id == PTRACE_DETACH) {
+ ev->event_data.ptrace.tracer_pid = 0;
+ ev->event_data.ptrace.tracer_tgid = 0;
+ } else
+ return;
+
+ memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
+ msg->ack = 0; /* not used */
+ msg->len = sizeof(*ev);
+ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
+}
+
void proc_exit_connector(struct task_struct *task)
{
struct cn_msg *msg;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 25cf327cd1c..2e3b3d38c46 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -237,6 +237,13 @@ config MXS_DMA
Support the MXS DMA engine. This engine including APBH-DMA
and APBX-DMA is integrated into Freescale i.MX23/28 chips.
+config EP93XX_DMA
+ bool "Cirrus Logic EP93xx DMA support"
+ depends on ARCH_EP93XX
+ select DMA_ENGINE
+ help
+ Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 836095ab3c5..30cf3b1f0c5 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
new file mode 100644
index 00000000000..0766c1e53b1
--- /dev/null
+++ b/drivers/dma/ep93xx_dma.c
@@ -0,0 +1,1355 @@
+/*
+ * Driver for the Cirrus Logic EP93xx DMA Controller
+ *
+ * Copyright (C) 2011 Mika Westerberg
+ *
+ * DMA M2P implementation is based on the original
+ * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
+ *
+ * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
+ * Copyright (C) 2006 Applied Data Systems
+ * Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
+ *
+ * This driver is based on dw_dmac and amba-pl08x drivers.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <mach/dma.h>
+
+/* M2P registers */
+#define M2P_CONTROL 0x0000
+#define M2P_CONTROL_STALLINT BIT(0)
+#define M2P_CONTROL_NFBINT BIT(1)
+#define M2P_CONTROL_CH_ERROR_INT BIT(3)
+#define M2P_CONTROL_ENABLE BIT(4)
+#define M2P_CONTROL_ICE BIT(6)
+
+#define M2P_INTERRUPT 0x0004
+#define M2P_INTERRUPT_STALL BIT(0)
+#define M2P_INTERRUPT_NFB BIT(1)
+#define M2P_INTERRUPT_ERROR BIT(3)
+
+#define M2P_PPALLOC 0x0008
+#define M2P_STATUS 0x000c
+
+#define M2P_MAXCNT0 0x0020
+#define M2P_BASE0 0x0024
+#define M2P_MAXCNT1 0x0030
+#define M2P_BASE1 0x0034
+
+#define M2P_STATE_IDLE 0
+#define M2P_STATE_STALL 1
+#define M2P_STATE_ON 2
+#define M2P_STATE_NEXT 3
+
+/* M2M registers */
+#define M2M_CONTROL 0x0000
+#define M2M_CONTROL_DONEINT BIT(2)
+#define M2M_CONTROL_ENABLE BIT(3)
+#define M2M_CONTROL_START BIT(4)
+#define M2M_CONTROL_DAH BIT(11)
+#define M2M_CONTROL_SAH BIT(12)
+#define M2M_CONTROL_PW_SHIFT 9
+#define M2M_CONTROL_PW_8 (0 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_16 (1 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_32 (2 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_PW_MASK (3 << M2M_CONTROL_PW_SHIFT)
+#define M2M_CONTROL_TM_SHIFT 13
+#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
+#define M2M_CONTROL_RSS_SHIFT 22
+#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_RSS_IDE (3 << M2M_CONTROL_RSS_SHIFT)
+#define M2M_CONTROL_NO_HDSK BIT(24)
+#define M2M_CONTROL_PWSC_SHIFT 25
+
+#define M2M_INTERRUPT 0x0004
+#define M2M_INTERRUPT_DONEINT BIT(1)
+
+#define M2M_BCR0 0x0010
+#define M2M_BCR1 0x0014
+#define M2M_SAR_BASE0 0x0018
+#define M2M_SAR_BASE1 0x001c
+#define M2M_DAR_BASE0 0x002c
+#define M2M_DAR_BASE1 0x0030
+
+#define DMA_MAX_CHAN_BYTES 0xffff
+#define DMA_MAX_CHAN_DESCRIPTORS 32
+
+struct ep93xx_dma_engine;
+
+/**
+ * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
+ * @src_addr: source address of the transaction
+ * @dst_addr: destination address of the transaction
+ * @size: size of the transaction (in bytes)
+ * @complete: this descriptor is completed
+ * @txd: dmaengine API descriptor
+ * @tx_list: list of linked descriptors
+ * @node: link used for putting this into a channel queue
+ */
+struct ep93xx_dma_desc {
+ u32 src_addr;
+ u32 dst_addr;
+ size_t size;
+ bool complete;
+ struct dma_async_tx_descriptor txd;
+ struct list_head tx_list;
+ struct list_head node;
+};
+
+/**
+ * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
+ * @chan: dmaengine API channel
+ * @edma: pointer to to the engine device
+ * @regs: memory mapped registers
+ * @irq: interrupt number of the channel
+ * @clk: clock used by this channel
+ * @tasklet: channel specific tasklet used for callbacks
+ * @lock: lock protecting the fields following
+ * @flags: flags for the channel
+ * @buffer: which buffer to use next (0/1)
+ * @last_completed: last completed cookie value
+ * @active: flattened chain of descriptors currently being processed
+ * @queue: pending descriptors which are handled next
+ * @free_list: list of free descriptors which can be used
+ * @runtime_addr: physical address currently used as dest/src (M2M only). This
+ * is set via %DMA_SLAVE_CONFIG before slave operation is
+ * prepared
+ * @runtime_ctrl: M2M runtime values for the control register.
+ *
+ * As EP93xx DMA controller doesn't support real chained DMA descriptors we
+ * will have slightly different scheme here: @active points to a head of
+ * flattened DMA descriptor chain.
+ *
+ * @queue holds pending transactions. These are linked through the first
+ * descriptor in the chain. When a descriptor is moved to the @active queue,
+ * the first and chained descriptors are flattened into a single list.
+ *
+ * @chan.private holds pointer to &struct ep93xx_dma_data which contains
+ * necessary channel configuration information. For memcpy channels this must
+ * be %NULL.
+ */
+struct ep93xx_dma_chan {
+ struct dma_chan chan;
+ const struct ep93xx_dma_engine *edma;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct tasklet_struct tasklet;
+ /* protects the fields following */
+ spinlock_t lock;
+ unsigned long flags;
+/* Channel is configured for cyclic transfers */
+#define EP93XX_DMA_IS_CYCLIC 0
+
+ int buffer;
+ dma_cookie_t last_completed;
+ struct list_head active;
+ struct list_head queue;
+ struct list_head free_list;
+ u32 runtime_addr;
+ u32 runtime_ctrl;
+};
+
+/**
+ * struct ep93xx_dma_engine - the EP93xx DMA engine instance
+ * @dma_dev: holds the dmaengine device
+ * @m2m: is this an M2M or M2P device
+ * @hw_setup: method which sets the channel up for operation
+ * @hw_shutdown: shuts the channel down and flushes whatever is left
+ * @hw_submit: pushes active descriptor(s) to the hardware
+ * @hw_interrupt: handle the interrupt
+ * @num_channels: number of channels for this instance
+ * @channels: array of channels
+ *
+ * There is one instance of this struct for the M2P channels and one for the
+ * M2M channels. hw_xxx() methods are used to perform operations which are
+ * different on M2M and M2P channels. These methods are called with channel
+ * lock held and interrupts disabled so they cannot sleep.
+ */
+struct ep93xx_dma_engine {
+ struct dma_device dma_dev;
+ bool m2m;
+ int (*hw_setup)(struct ep93xx_dma_chan *);
+ void (*hw_shutdown)(struct ep93xx_dma_chan *);
+ void (*hw_submit)(struct ep93xx_dma_chan *);
+ int (*hw_interrupt)(struct ep93xx_dma_chan *);
+#define INTERRUPT_UNKNOWN 0
+#define INTERRUPT_DONE 1
+#define INTERRUPT_NEXT_BUFFER 2
+
+ size_t num_channels;
+ struct ep93xx_dma_chan channels[];
+};
+
+static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
+{
+ return &edmac->chan.dev->device;
+}
+
+static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct ep93xx_dma_chan, chan);
+}
+
+/**
+ * ep93xx_dma_set_active - set new active descriptor chain
+ * @edmac: channel
+ * @desc: head of the new active descriptor chain
+ *
+ * Sets @desc to be the head of the new active descriptor chain. This is the
+ * chain which is processed next. The active list must be empty before calling
+ * this function.
+ *
+ * Called with @edmac->lock held and interrupts disabled.
+ */
+static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
+ struct ep93xx_dma_desc *desc)
+{
+ BUG_ON(!list_empty(&edmac->active));
+
+ list_add_tail(&desc->node, &edmac->active);
+
+ /* Flatten the @desc->tx_list chain into @edmac->active list */
+ while (!list_empty(&desc->tx_list)) {
+ struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
+ struct ep93xx_dma_desc, node);
+
+ /*
+ * We copy the callback parameters from the first descriptor
+ * to all the chained descriptors. This way we can call the
+ * callback without having to find out the first descriptor in
+ * the chain. Useful for cyclic transfers.
+ */
+ d->txd.callback = desc->txd.callback;
+ d->txd.callback_param = desc->txd.callback_param;
+
+ list_move_tail(&d->node, &edmac->active);
+ }
+}
+
+/* Called with @edmac->lock held and interrupts disabled */
+static struct ep93xx_dma_desc *
+ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
+{
+ return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
+}
+
+/**
+ * ep93xx_dma_advance_active - advances to the next active descriptor
+ * @edmac: channel
+ *
+ * Function advances active descriptor to the next in the @edmac->active and
+ * returns %true if we still have descriptors in the chain to process.
+ * Otherwise returns %false.
+ *
+ * When the channel is in cyclic mode always returns %true.
+ *
+ * Called with @edmac->lock held and interrupts disabled.
+ */
+static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
+{
+ list_rotate_left(&edmac->active);
+
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+ return true;
+
+ /*
+ * If txd.cookie is set it means that we are back in the first
+ * descriptor in the chain and hence done with it.
+ */
+ return !ep93xx_dma_get_active(edmac)->txd.cookie;
+}
+
+/*
+ * M2P DMA implementation
+ */
+
+static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
+{
+ writel(control, edmac->regs + M2P_CONTROL);
+ /*
+ * EP93xx User's Guide states that we must perform a dummy read after
+ * write to the control register.
+ */
+ readl(edmac->regs + M2P_CONTROL);
+}
+
+static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_data *data = edmac->chan.private;
+ u32 control;
+
+ writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
+
+ control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
+ | M2P_CONTROL_ENABLE;
+ m2p_set_control(edmac, control);
+
+ return 0;
+}
+
+static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
+{
+ return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
+}
+
+static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
+ u32 control;
+
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
+
+ while (m2p_channel_state(edmac) >= M2P_STATE_ON)
+ cpu_relax();
+
+ m2p_set_control(edmac, 0);
+
+ while (m2p_channel_state(edmac) == M2P_STATE_STALL)
+ cpu_relax();
+}
+
+static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ u32 bus_addr;
+
+ if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+ bus_addr = desc->src_addr;
+ else
+ bus_addr = desc->dst_addr;
+
+ if (edmac->buffer == 0) {
+ writel(desc->size, edmac->regs + M2P_MAXCNT0);
+ writel(bus_addr, edmac->regs + M2P_BASE0);
+ } else {
+ writel(desc->size, edmac->regs + M2P_MAXCNT1);
+ writel(bus_addr, edmac->regs + M2P_BASE1);
+ }
+
+ edmac->buffer ^= 1;
+}
+
+static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
+{
+ u32 control = readl(edmac->regs + M2P_CONTROL);
+
+ m2p_fill_desc(edmac);
+ control |= M2P_CONTROL_STALLINT;
+
+ if (ep93xx_dma_advance_active(edmac)) {
+ m2p_fill_desc(edmac);
+ control |= M2P_CONTROL_NFBINT;
+ }
+
+ m2p_set_control(edmac, control);
+}
+
+static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
+{
+ u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
+ u32 control;
+
+ if (irq_status & M2P_INTERRUPT_ERROR) {
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+
+ /* Clear the error interrupt */
+ writel(1, edmac->regs + M2P_INTERRUPT);
+
+ /*
+ * It seems that there is no easy way of reporting errors back
+ * to client so we just report the error here and continue as
+ * usual.
+ *
+ * Revisit this when there is a mechanism to report back the
+ * errors.
+ */
+ dev_err(chan2dev(edmac),
+ "DMA transfer failed! Details:\n"
+ "\tcookie : %d\n"
+ "\tsrc_addr : 0x%08x\n"
+ "\tdst_addr : 0x%08x\n"
+ "\tsize : %zu\n",
+ desc->txd.cookie, desc->src_addr, desc->dst_addr,
+ desc->size);
+ }
+
+ switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
+ case M2P_INTERRUPT_STALL:
+ /* Disable interrupts */
+ control = readl(edmac->regs + M2P_CONTROL);
+ control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+ m2p_set_control(edmac, control);
+
+ return INTERRUPT_DONE;
+
+ case M2P_INTERRUPT_NFB:
+ if (ep93xx_dma_advance_active(edmac))
+ m2p_fill_desc(edmac);
+
+ return INTERRUPT_NEXT_BUFFER;
+ }
+
+ return INTERRUPT_UNKNOWN;
+}
+
+/*
+ * M2M DMA implementation
+ *
+ * For the M2M transfers we don't use NFB at all. This is because it simply
+ * doesn't work well with memcpy transfers. When you submit both buffers it is
+ * extremely unlikely that you get an NFB interrupt, but it instead reports
+ * DONE interrupt and both buffers are already transferred which means that we
+ * weren't able to update the next buffer.
+ *
+ * So for now we "simulate" NFB by just submitting buffer after buffer
+ * without double buffering.
+ */
+
+static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
+{
+ const struct ep93xx_dma_data *data = edmac->chan.private;
+ u32 control = 0;
+
+ if (!data) {
+ /* This is memcpy channel, nothing to configure */
+ writel(control, edmac->regs + M2M_CONTROL);
+ return 0;
+ }
+
+ switch (data->port) {
+ case EP93XX_DMA_SSP:
+ /*
+ * This was found via experimenting - anything less than 5
+ * causes the channel to perform only a partial transfer which
+ * leads to problems since we don't get DONE interrupt then.
+ */
+ control = (5 << M2M_CONTROL_PWSC_SHIFT);
+ control |= M2M_CONTROL_NO_HDSK;
+
+ if (data->direction == DMA_TO_DEVICE) {
+ control |= M2M_CONTROL_DAH;
+ control |= M2M_CONTROL_TM_TX;
+ control |= M2M_CONTROL_RSS_SSPTX;
+ } else {
+ control |= M2M_CONTROL_SAH;
+ control |= M2M_CONTROL_TM_RX;
+ control |= M2M_CONTROL_RSS_SSPRX;
+ }
+ break;
+
+ case EP93XX_DMA_IDE:
+ /*
+ * This IDE part is totally untested. Values below are taken
+ * from the EP93xx Users's Guide and might not be correct.
+ */
+ control |= M2M_CONTROL_NO_HDSK;
+ control |= M2M_CONTROL_RSS_IDE;
+ control |= M2M_CONTROL_PW_16;
+
+ if (data->direction == DMA_TO_DEVICE) {
+ /* Worst case from the UG */
+ control = (3 << M2M_CONTROL_PWSC_SHIFT);
+ control |= M2M_CONTROL_DAH;
+ control |= M2M_CONTROL_TM_TX;
+ } else {
+ control = (2 << M2M_CONTROL_PWSC_SHIFT);
+ control |= M2M_CONTROL_SAH;
+ control |= M2M_CONTROL_TM_RX;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ writel(control, edmac->regs + M2M_CONTROL);
+ return 0;
+}
+
+static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
+{
+ /* Just disable the channel */
+ writel(0, edmac->regs + M2M_CONTROL);
+}
+
+static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+
+ if (edmac->buffer == 0) {
+ writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
+ writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
+ writel(desc->size, edmac->regs + M2M_BCR0);
+ } else {
+ writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
+ writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
+ writel(desc->size, edmac->regs + M2M_BCR1);
+ }
+
+ edmac->buffer ^= 1;
+}
+
+static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_data *data = edmac->chan.private;
+ u32 control = readl(edmac->regs + M2M_CONTROL);
+
+ /*
+ * Since we allow clients to configure PW (peripheral width) we always
+ * clear PW bits here and then set them according what is given in
+ * the runtime configuration.
+ */
+ control &= ~M2M_CONTROL_PW_MASK;
+ control |= edmac->runtime_ctrl;
+
+ m2m_fill_desc(edmac);
+ control |= M2M_CONTROL_DONEINT;
+
+ /*
+ * Now we can finally enable the channel. For M2M channel this must be
+ * done _after_ the BCRx registers are programmed.
+ */
+ control |= M2M_CONTROL_ENABLE;
+ writel(control, edmac->regs + M2M_CONTROL);
+
+ if (!data) {
+ /*
+ * For memcpy channels the software trigger must be asserted
+ * in order to start the memcpy operation.
+ */
+ control |= M2M_CONTROL_START;
+ writel(control, edmac->regs + M2M_CONTROL);
+ }
+}
+
+static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
+{
+ u32 control;
+
+ if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
+ return INTERRUPT_UNKNOWN;
+
+ /* Clear the DONE bit */
+ writel(0, edmac->regs + M2M_INTERRUPT);
+
+ /* Disable interrupts and the channel */
+ control = readl(edmac->regs + M2M_CONTROL);
+ control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
+ writel(control, edmac->regs + M2M_CONTROL);
+
+ /*
+ * Since we only get DONE interrupt we have to find out ourselves
+ * whether there still is something to process. So we try to advance
+ * the chain an see whether it succeeds.
+ */
+ if (ep93xx_dma_advance_active(edmac)) {
+ edmac->edma->hw_submit(edmac);
+ return INTERRUPT_NEXT_BUFFER;
+ }
+
+ return INTERRUPT_DONE;
+}
+
+/*
+ * DMA engine API implementation
+ */
+
+static struct ep93xx_dma_desc *
+ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc, *_desc;
+ struct ep93xx_dma_desc *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
+ if (async_tx_test_ack(&desc->txd)) {
+ list_del_init(&desc->node);
+
+ /* Re-initialize the descriptor */
+ desc->src_addr = 0;
+ desc->dst_addr = 0;
+ desc->size = 0;
+ desc->complete = false;
+ desc->txd.cookie = 0;
+ desc->txd.callback = NULL;
+ desc->txd.callback_param = NULL;
+
+ ret = desc;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ return ret;
+}
+
+static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
+ struct ep93xx_dma_desc *desc)
+{
+ if (desc) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ list_splice_init(&desc->tx_list, &edmac->free_list);
+ list_add(&desc->node, &edmac->free_list);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ }
+}
+
+/**
+ * ep93xx_dma_advance_work - start processing the next pending transaction
+ * @edmac: channel
+ *
+ * If we have pending transactions queued and we are currently idling, this
+ * function takes the next queued transaction from the @edmac->queue and
+ * pushes it to the hardware for execution.
+ */
+static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *new;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ return;
+ }
+
+ /* Take the next descriptor from the pending queue */
+ new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
+ list_del_init(&new->node);
+
+ ep93xx_dma_set_active(edmac, new);
+
+ /* Push it to the hardware */
+ edmac->edma->hw_submit(edmac);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+}
+
+static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
+{
+ struct device *dev = desc->txd.chan->device->dev;
+
+ if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
+ if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
+ dma_unmap_single(dev, desc->src_addr, desc->size,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, desc->src_addr, desc->size,
+ DMA_TO_DEVICE);
+ }
+ if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
+ if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
+ dma_unmap_single(dev, desc->dst_addr, desc->size,
+ DMA_FROM_DEVICE);
+ else
+ dma_unmap_page(dev, desc->dst_addr, desc->size,
+ DMA_FROM_DEVICE);
+ }
+}
+
+static void ep93xx_dma_tasklet(unsigned long data)
+{
+ struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
+ struct ep93xx_dma_desc *desc, *d;
+ dma_async_tx_callback callback;
+ void *callback_param;
+ LIST_HEAD(list);
+
+ spin_lock_irq(&edmac->lock);
+ desc = ep93xx_dma_get_active(edmac);
+ if (desc->complete) {
+ edmac->last_completed = desc->txd.cookie;
+ list_splice_init(&edmac->active, &list);
+ }
+ spin_unlock_irq(&edmac->lock);
+
+ /* Pick up the next descriptor from the queue */
+ ep93xx_dma_advance_work(edmac);
+
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
+
+ /* Now we can release all the chained descriptors */
+ list_for_each_entry_safe(desc, d, &list, node) {
+ /*
+ * For the memcpy channels the API requires us to unmap the
+ * buffers unless requested otherwise.
+ */
+ if (!edmac->chan.private)
+ ep93xx_dma_unmap_buffers(desc);
+
+ ep93xx_dma_desc_put(edmac, desc);
+ }
+
+ if (callback)
+ callback(callback_param);
+}
+
+static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
+{
+ struct ep93xx_dma_chan *edmac = dev_id;
+ irqreturn_t ret = IRQ_HANDLED;
+
+ spin_lock(&edmac->lock);
+
+ switch (edmac->edma->hw_interrupt(edmac)) {
+ case INTERRUPT_DONE:
+ ep93xx_dma_get_active(edmac)->complete = true;
+ tasklet_schedule(&edmac->tasklet);
+ break;
+
+ case INTERRUPT_NEXT_BUFFER:
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
+ tasklet_schedule(&edmac->tasklet);
+ break;
+
+ default:
+ dev_warn(chan2dev(edmac), "unknown interrupt!\n");
+ ret = IRQ_NONE;
+ break;
+ }
+
+ spin_unlock(&edmac->lock);
+ return ret;
+}
+
+/**
+ * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
+ * @tx: descriptor to be executed
+ *
+ * Function will execute given descriptor on the hardware or if the hardware
+ * is busy, queue the descriptor to be executed later on. Returns cookie which
+ * can be used to poll the status of the descriptor.
+ */
+static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
+ struct ep93xx_dma_desc *desc;
+ dma_cookie_t cookie;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+
+ cookie = edmac->chan.cookie;
+
+ if (++cookie < 0)
+ cookie = 1;
+
+ desc = container_of(tx, struct ep93xx_dma_desc, txd);
+
+ edmac->chan.cookie = cookie;
+ desc->txd.cookie = cookie;
+
+ /*
+ * If nothing is currently prosessed, we push this descriptor
+ * directly to the hardware. Otherwise we put the descriptor
+ * to the pending queue.
+ */
+ if (list_empty(&edmac->active)) {
+ ep93xx_dma_set_active(edmac, desc);
+ edmac->edma->hw_submit(edmac);
+ } else {
+ list_add_tail(&desc->node, &edmac->queue);
+ }
+
+ spin_unlock_irqrestore(&edmac->lock, flags);
+ return cookie;
+}
+
+/**
+ * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
+ * @chan: channel to allocate resources
+ *
+ * Function allocates necessary resources for the given DMA channel and
+ * returns number of allocated descriptors for the channel. Negative errno
+ * is returned in case of failure.
+ */
+static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_data *data = chan->private;
+ const char *name = dma_chan_name(chan);
+ int ret, i;
+
+ /* Sanity check the channel parameters */
+ if (!edmac->edma->m2m) {
+ if (!data)
+ return -EINVAL;
+ if (data->port < EP93XX_DMA_I2S1 ||
+ data->port > EP93XX_DMA_IRDA)
+ return -EINVAL;
+ if (data->direction != ep93xx_dma_chan_direction(chan))
+ return -EINVAL;
+ } else {
+ if (data) {
+ switch (data->port) {
+ case EP93XX_DMA_SSP:
+ case EP93XX_DMA_IDE:
+ if (data->direction != DMA_TO_DEVICE &&
+ data->direction != DMA_FROM_DEVICE)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (data && data->name)
+ name = data->name;
+
+ ret = clk_enable(edmac->clk);
+ if (ret)
+ return ret;
+
+ ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
+ if (ret)
+ goto fail_clk_disable;
+
+ spin_lock_irq(&edmac->lock);
+ edmac->last_completed = 1;
+ edmac->chan.cookie = 1;
+ ret = edmac->edma->hw_setup(edmac);
+ spin_unlock_irq(&edmac->lock);
+
+ if (ret)
+ goto fail_free_irq;
+
+ for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
+ struct ep93xx_dma_desc *desc;
+
+ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "not enough descriptors\n");
+ break;
+ }
+
+ INIT_LIST_HEAD(&desc->tx_list);
+
+ dma_async_tx_descriptor_init(&desc->txd, chan);
+ desc->txd.flags = DMA_CTRL_ACK;
+ desc->txd.tx_submit = ep93xx_dma_tx_submit;
+
+ ep93xx_dma_desc_put(edmac, desc);
+ }
+
+ return i;
+
+fail_free_irq:
+ free_irq(edmac->irq, edmac);
+fail_clk_disable:
+ clk_disable(edmac->clk);
+
+ return ret;
+}
+
+/**
+ * ep93xx_dma_free_chan_resources - release resources for the channel
+ * @chan: channel
+ *
+ * Function releases all the resources allocated for the given channel.
+ * The channel must be idle when this is called.
+ */
+static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *d;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ BUG_ON(!list_empty(&edmac->active));
+ BUG_ON(!list_empty(&edmac->queue));
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ edmac->edma->hw_shutdown(edmac);
+ edmac->runtime_addr = 0;
+ edmac->runtime_ctrl = 0;
+ edmac->buffer = 0;
+ list_splice_init(&edmac->free_list, &list);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ list_for_each_entry_safe(desc, d, &list, node)
+ kfree(desc);
+
+ clk_disable(edmac->clk);
+ free_irq(edmac->irq, edmac);
+}
+
+/**
+ * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
+ * @chan: channel
+ * @dest: destination bus address
+ * @src: source bus address
+ * @len: size of the transaction
+ * @flags: flags for the descriptor
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+struct dma_async_tx_descriptor *
+ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
+ dma_addr_t src, size_t len, unsigned long flags)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *first;
+ size_t bytes, offset;
+
+ first = NULL;
+ for (offset = 0; offset < len; offset += bytes) {
+ desc = ep93xx_dma_desc_get(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+ goto fail;
+ }
+
+ bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
+
+ desc->src_addr = src + offset;
+ desc->dst_addr = dest + offset;
+ desc->size = bytes;
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+ first->txd.flags = flags;
+
+ return &first->txd;
+fail:
+ ep93xx_dma_desc_put(edmac, first);
+ return NULL;
+}
+
+/**
+ * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
+ * @chan: channel
+ * @sgl: list of buffers to transfer
+ * @sg_len: number of entries in @sgl
+ * @dir: direction of tha DMA transfer
+ * @flags: flags for the descriptor
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_data_direction dir,
+ unsigned long flags)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *first;
+ struct scatterlist *sg;
+ int i;
+
+ if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
+ dev_warn(chan2dev(edmac),
+ "channel was configured with different direction\n");
+ return NULL;
+ }
+
+ if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
+ dev_warn(chan2dev(edmac),
+ "channel is already used for cyclic transfers\n");
+ return NULL;
+ }
+
+ first = NULL;
+ for_each_sg(sgl, sg, sg_len, i) {
+ size_t sg_len = sg_dma_len(sg);
+
+ if (sg_len > DMA_MAX_CHAN_BYTES) {
+ dev_warn(chan2dev(edmac), "too big transfer size %d\n",
+ sg_len);
+ goto fail;
+ }
+
+ desc = ep93xx_dma_desc_get(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+ goto fail;
+ }
+
+ if (dir == DMA_TO_DEVICE) {
+ desc->src_addr = sg_dma_address(sg);
+ desc->dst_addr = edmac->runtime_addr;
+ } else {
+ desc->src_addr = edmac->runtime_addr;
+ desc->dst_addr = sg_dma_address(sg);
+ }
+ desc->size = sg_len;
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+ first->txd.flags = flags;
+
+ return &first->txd;
+
+fail:
+ ep93xx_dma_desc_put(edmac, first);
+ return NULL;
+}
+
+/**
+ * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
+ * @chan: channel
+ * @dma_addr: DMA mapped address of the buffer
+ * @buf_len: length of the buffer (in bytes)
+ * @period_len: lenght of a single period
+ * @dir: direction of the operation
+ *
+ * Prepares a descriptor for cyclic DMA operation. This means that once the
+ * descriptor is submitted, we will be submitting in a @period_len sized
+ * buffers and calling callback once the period has been elapsed. Transfer
+ * terminates only when client calls dmaengine_terminate_all() for this
+ * channel.
+ *
+ * Returns a valid DMA descriptor or %NULL in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
+ size_t buf_len, size_t period_len,
+ enum dma_data_direction dir)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct ep93xx_dma_desc *desc, *first;
+ size_t offset = 0;
+
+ if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
+ dev_warn(chan2dev(edmac),
+ "channel was configured with different direction\n");
+ return NULL;
+ }
+
+ if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
+ dev_warn(chan2dev(edmac),
+ "channel is already used for cyclic transfers\n");
+ return NULL;
+ }
+
+ if (period_len > DMA_MAX_CHAN_BYTES) {
+ dev_warn(chan2dev(edmac), "too big period length %d\n",
+ period_len);
+ return NULL;
+ }
+
+ /* Split the buffer into period size chunks */
+ first = NULL;
+ for (offset = 0; offset < buf_len; offset += period_len) {
+ desc = ep93xx_dma_desc_get(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "couln't get descriptor\n");
+ goto fail;
+ }
+
+ if (dir == DMA_TO_DEVICE) {
+ desc->src_addr = dma_addr + offset;
+ desc->dst_addr = edmac->runtime_addr;
+ } else {
+ desc->src_addr = edmac->runtime_addr;
+ desc->dst_addr = dma_addr + offset;
+ }
+
+ desc->size = period_len;
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->tx_list);
+ }
+
+ first->txd.cookie = -EBUSY;
+
+ return &first->txd;
+
+fail:
+ ep93xx_dma_desc_put(edmac, first);
+ return NULL;
+}
+
+/**
+ * ep93xx_dma_terminate_all - terminate all transactions
+ * @edmac: channel
+ *
+ * Stops all DMA transactions. All descriptors are put back to the
+ * @edmac->free_list and callbacks are _not_ called.
+ */
+static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
+{
+ struct ep93xx_dma_desc *desc, *_d;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ /* First we disable and flush the DMA channel */
+ edmac->edma->hw_shutdown(edmac);
+ clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
+ list_splice_init(&edmac->active, &list);
+ list_splice_init(&edmac->queue, &list);
+ /*
+ * We then re-enable the channel. This way we can continue submitting
+ * the descriptors by just calling ->hw_submit() again.
+ */
+ edmac->edma->hw_setup(edmac);
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ list_for_each_entry_safe(desc, _d, &list, node)
+ ep93xx_dma_desc_put(edmac, desc);
+
+ return 0;
+}
+
+static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
+ struct dma_slave_config *config)
+{
+ enum dma_slave_buswidth width;
+ unsigned long flags;
+ u32 addr, ctrl;
+
+ if (!edmac->edma->m2m)
+ return -EINVAL;
+
+ switch (config->direction) {
+ case DMA_FROM_DEVICE:
+ width = config->src_addr_width;
+ addr = config->src_addr;
+ break;
+
+ case DMA_TO_DEVICE:
+ width = config->dst_addr_width;
+ addr = config->dst_addr;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ switch (width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ ctrl = 0;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ ctrl = M2M_CONTROL_PW_16;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ ctrl = M2M_CONTROL_PW_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ edmac->runtime_addr = addr;
+ edmac->runtime_ctrl = ctrl;
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ return 0;
+}
+
+/**
+ * ep93xx_dma_control - manipulate all pending operations on a channel
+ * @chan: channel
+ * @cmd: control command to perform
+ * @arg: optional argument
+ *
+ * Controls the channel. Function returns %0 in case of success or negative
+ * error in case of failure.
+ */
+static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ struct dma_slave_config *config;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ return ep93xx_dma_terminate_all(edmac);
+
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ return ep93xx_dma_slave_config(edmac, config);
+
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+/**
+ * ep93xx_dma_tx_status - check if a transaction is completed
+ * @chan: channel
+ * @cookie: transaction specific cookie
+ * @state: state of the transaction is stored here if given
+ *
+ * This function can be used to query state of a given transaction.
+ */
+static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *state)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+ dma_cookie_t last_used, last_completed;
+ enum dma_status ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&edmac->lock, flags);
+ last_used = chan->cookie;
+ last_completed = edmac->last_completed;
+ spin_unlock_irqrestore(&edmac->lock, flags);
+
+ ret = dma_async_is_complete(cookie, last_completed, last_used);
+ dma_set_tx_state(state, last_completed, last_used, 0);
+
+ return ret;
+}
+
+/**
+ * ep93xx_dma_issue_pending - push pending transactions to the hardware
+ * @chan: channel
+ *
+ * When this function is called, all pending transactions are pushed to the
+ * hardware and executed.
+ */
+static void ep93xx_dma_issue_pending(struct dma_chan *chan)
+{
+ ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
+}
+
+static int __init ep93xx_dma_probe(struct platform_device *pdev)
+{
+ struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct ep93xx_dma_engine *edma;
+ struct dma_device *dma_dev;
+ size_t edma_size;
+ int ret, i;
+
+ edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
+ edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
+ if (!edma)
+ return -ENOMEM;
+
+ dma_dev = &edma->dma_dev;
+ edma->m2m = platform_get_device_id(pdev)->driver_data;
+ edma->num_channels = pdata->num_channels;
+
+ INIT_LIST_HEAD(&dma_dev->channels);
+ for (i = 0; i < pdata->num_channels; i++) {
+ const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
+ struct ep93xx_dma_chan *edmac = &edma->channels[i];
+
+ edmac->chan.device = dma_dev;
+ edmac->regs = cdata->base;
+ edmac->irq = cdata->irq;
+ edmac->edma = edma;
+
+ edmac->clk = clk_get(NULL, cdata->name);
+ if (IS_ERR(edmac->clk)) {
+ dev_warn(&pdev->dev, "failed to get clock for %s\n",
+ cdata->name);
+ continue;
+ }
+
+ spin_lock_init(&edmac->lock);
+ INIT_LIST_HEAD(&edmac->active);
+ INIT_LIST_HEAD(&edmac->queue);
+ INIT_LIST_HEAD(&edmac->free_list);
+ tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
+ (unsigned long)edmac);
+
+ list_add_tail(&edmac->chan.device_node,
+ &dma_dev->channels);
+ }
+
+ dma_cap_zero(dma_dev->cap_mask);
+ dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
+
+ dma_dev->dev = &pdev->dev;
+ dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
+ dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
+ dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
+ dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
+ dma_dev->device_control = ep93xx_dma_control;
+ dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
+ dma_dev->device_tx_status = ep93xx_dma_tx_status;
+
+ dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
+
+ if (edma->m2m) {
+ dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
+ dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
+
+ edma->hw_setup = m2m_hw_setup;
+ edma->hw_shutdown = m2m_hw_shutdown;
+ edma->hw_submit = m2m_hw_submit;
+ edma->hw_interrupt = m2m_hw_interrupt;
+ } else {
+ dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
+
+ edma->hw_setup = m2p_hw_setup;
+ edma->hw_shutdown = m2p_hw_shutdown;
+ edma->hw_submit = m2p_hw_submit;
+ edma->hw_interrupt = m2p_hw_interrupt;
+ }
+
+ ret = dma_async_device_register(dma_dev);
+ if (unlikely(ret)) {
+ for (i = 0; i < edma->num_channels; i++) {
+ struct ep93xx_dma_chan *edmac = &edma->channels[i];
+ if (!IS_ERR_OR_NULL(edmac->clk))
+ clk_put(edmac->clk);
+ }
+ kfree(edma);
+ } else {
+ dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
+ edma->m2m ? "M" : "P");
+ }
+
+ return ret;
+}
+
+static struct platform_device_id ep93xx_dma_driver_ids[] = {
+ { "ep93xx-dma-m2p", 0 },
+ { "ep93xx-dma-m2m", 1 },
+ { },
+};
+
+static struct platform_driver ep93xx_dma_driver = {
+ .driver = {
+ .name = "ep93xx-dma",
+ },
+ .id_table = ep93xx_dma_driver_ids,
+};
+
+static int __init ep93xx_dma_module_init(void)
+{
+ return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
+}
+subsys_initcall(ep93xx_dma_module_init);
+
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
+MODULE_DESCRIPTION("EP93xx DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index b1c11775839..e6ad3bb6c1a 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -253,14 +253,11 @@ static int fw_device_op_open(struct inode *inode, struct file *file)
init_waitqueue_head(&client->wait);
init_waitqueue_head(&client->tx_flush_wait);
INIT_LIST_HEAD(&client->phy_receiver_link);
+ INIT_LIST_HEAD(&client->link);
kref_init(&client->kref);
file->private_data = client;
- mutex_lock(&device->client_list_mutex);
- list_add_tail(&client->link, &device->client_list);
- mutex_unlock(&device->client_list_mutex);
-
return nonseekable_open(inode, file);
}
@@ -451,15 +448,20 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
if (ret != 0)
return -EFAULT;
+ mutex_lock(&client->device->client_list_mutex);
+
client->bus_reset_closure = a->bus_reset_closure;
if (a->bus_reset != 0) {
fill_bus_reset_event(&bus_reset, client);
- if (copy_to_user(u64_to_uptr(a->bus_reset),
- &bus_reset, sizeof(bus_reset)))
- return -EFAULT;
+ ret = copy_to_user(u64_to_uptr(a->bus_reset),
+ &bus_reset, sizeof(bus_reset));
}
+ if (ret == 0 && list_empty(&client->link))
+ list_add_tail(&client->link, &client->device->client_list);
- return 0;
+ mutex_unlock(&client->device->client_list_mutex);
+
+ return ret ? -EFAULT : 0;
}
static int add_client_resource(struct client *client,
@@ -1583,7 +1585,7 @@ static int dispatch_ioctl(struct client *client,
if (_IOC_TYPE(cmd) != '#' ||
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
_IOC_SIZE(cmd) > sizeof(buffer))
- return -EINVAL;
+ return -ENOTTY;
if (_IOC_DIR(cmd) == _IOC_READ)
memset(&buffer, 0, _IOC_SIZE(cmd));
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index eced1c25bf5..03a7a85d042 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -7,6 +7,7 @@
*/
#include <linux/bug.h>
+#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/ethtool.h>
@@ -73,7 +74,7 @@ struct rfc2734_arp {
__be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
__be32 sip; /* Sender's IP Address */
__be32 tip; /* IP Address of requested hw addr */
-} __attribute__((packed));
+} __packed;
/* This header format is specific to this driver implementation. */
#define FWNET_ALEN 8
@@ -81,7 +82,7 @@ struct rfc2734_arp {
struct fwnet_header {
u8 h_dest[FWNET_ALEN]; /* destination address */
__be16 h_proto; /* packet type ID field */
-} __attribute__((packed));
+} __packed;
/* IPv4 and IPv6 encapsulation header */
struct rfc2734_header {
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index ebb897329c1..bcf792fac44 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -253,7 +253,6 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
#define OHCI1394_REGISTER_SIZE 0x800
-#define OHCI_LOOP_COUNT 500
#define OHCI1394_PCI_HCI_Control 0x40
#define SELF_ID_BUF_SIZE 0x800
#define OHCI_TCODE_PHY_PACKET 0x0e
@@ -514,6 +513,12 @@ static inline void flush_writes(const struct fw_ohci *ohci)
reg_read(ohci, OHCI1394_Version);
}
+/*
+ * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
+ * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
+ * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
+ * directly. Exceptions are intrinsically serialized contexts like pci_probe.
+ */
static int read_phy_reg(struct fw_ohci *ohci, int addr)
{
u32 val;
@@ -522,6 +527,9 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr)
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
+ if (!~val)
+ return -ENODEV; /* Card was ejected. */
+
if (val & OHCI1394_PhyControl_ReadDone)
return OHCI1394_PhyControl_ReadData(val);
@@ -545,6 +553,9 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
OHCI1394_PhyControl_Write(addr, val));
for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
+ if (!~val)
+ return -ENODEV; /* Card was ejected. */
+
if (!(val & OHCI1394_PhyControl_WritePending))
return 0;
@@ -630,7 +641,6 @@ static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
ctx->last_buffer_index = index;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
- flush_writes(ctx->ohci);
}
static void ar_context_release(struct ar_context *ctx)
@@ -1002,7 +1012,6 @@ static void ar_context_run(struct ar_context *ctx)
reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
- flush_writes(ctx->ohci);
}
static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
@@ -1202,14 +1211,14 @@ static void context_stop(struct context *ctx)
reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
ctx->running = false;
- flush_writes(ctx->ohci);
- for (i = 0; i < 10; i++) {
+ for (i = 0; i < 1000; i++) {
reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
if ((reg & CONTEXT_ACTIVE) == 0)
return;
- mdelay(1);
+ if (i)
+ udelay(10);
}
fw_error("Error: DMA context still active (0x%08x)\n", reg);
}
@@ -1346,12 +1355,10 @@ static int at_context_queue_packet(struct context *ctx,
context_append(ctx, d, z, 4 - z);
- if (ctx->running) {
+ if (ctx->running)
reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
- flush_writes(ohci);
- } else {
+ else
context_run(ctx, 0);
- }
return 0;
}
@@ -1960,14 +1967,18 @@ static irqreturn_t irq_handler(int irq, void *data)
static int software_reset(struct fw_ohci *ohci)
{
+ u32 val;
int i;
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
+ for (i = 0; i < 500; i++) {
+ val = reg_read(ohci, OHCI1394_HCControlSet);
+ if (!~val)
+ return -ENODEV; /* Card was ejected. */
- for (i = 0; i < OHCI_LOOP_COUNT; i++) {
- if ((reg_read(ohci, OHCI1394_HCControlSet) &
- OHCI1394_HCControl_softReset) == 0)
+ if (!(val & OHCI1394_HCControl_softReset))
return 0;
+
msleep(1);
}
@@ -2197,7 +2208,9 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_LinkControl_rcvPhyPkt);
ar_context_run(&ohci->ar_request_ctx);
- ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */
+ ar_context_run(&ohci->ar_response_ctx);
+
+ flush_writes(ohci);
/* We are ready to go, reset bus to finish initialization. */
fw_schedule_bus_reset(&ohci->card, false, true);
@@ -3129,7 +3142,6 @@ static void ohci_flush_queue_iso(struct fw_iso_context *base)
&container_of(base, struct iso_context, base)->context;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
- flush_writes(ctx->ohci);
}
static const struct fw_card_driver ohci_driver = {
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 2967002a9f8..363498697c2 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -63,33 +63,58 @@ config GPIO_SYSFS
Kernel drivers may also request that a particular GPIO be
exported to userspace; this can be useful when debugging.
+config GPIO_GENERIC
+ tristate
+
# put drivers in the right section, in alphabetical order
+config GPIO_DA9052
+ tristate "Dialog DA9052 GPIO"
+ depends on PMIC_DA9052
+ help
+ Say yes here to enable the GPIO driver for the DA9052 chip.
+
config GPIO_MAX730X
tristate
comment "Memory mapped GPIO drivers:"
-config GPIO_BASIC_MMIO_CORE
- tristate
- help
- Provides core functionality for basic memory-mapped GPIO controllers.
-
-config GPIO_BASIC_MMIO
- tristate "Basic memory-mapped GPIO controllers support"
- select GPIO_BASIC_MMIO_CORE
+config GPIO_GENERIC_PLATFORM
+ tristate "Generic memory-mapped GPIO controller support (MMIO platform device)"
+ select GPIO_GENERIC
help
- Say yes here to support basic memory-mapped GPIO controllers.
+ Say yes here to support basic platform_device memory-mapped GPIO controllers.
config GPIO_IT8761E
tristate "IT8761E GPIO support"
help
Say yes here to support GPIO functionality of IT8761E super I/O chip.
+config GPIO_EP93XX
+ def_bool y
+ depends on ARCH_EP93XX
+ select GPIO_GENERIC
+
config GPIO_EXYNOS4
def_bool y
depends on CPU_EXYNOS4210
+config GPIO_MPC5200
+ def_bool y
+ depends on PPC_MPC52xx
+
+config GPIO_MXC
+ def_bool y
+ depends on ARCH_MXC
+ select GPIO_GENERIC
+ select GENERIC_IRQ_CHIP
+
+config GPIO_MXS
+ def_bool y
+ depends on ARCH_MXS
+ select GPIO_GENERIC
+ select GENERIC_IRQ_CHIP
+
config GPIO_PLAT_SAMSUNG
def_bool y
depends on SAMSUNG_GPIOLIB_4BIT
@@ -137,9 +162,6 @@ config GPIO_SCH
The Intel Tunnel Creek processor has 5 GPIOs powered by the
core power rail and 9 from suspend power supply.
- This driver can also be built as a module. If so, the module
- will be called sch-gpio.
-
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
depends on MFD_SUPPORT && PCI
@@ -202,9 +224,6 @@ config GPIO_PCA953X
16 bits: pca9535, pca9539, pca9555, tca6416
- This driver can also be built as a module. If so, the module
- will be called pca953x.
-
config GPIO_PCA953X_IRQ
bool "Interrupt controller support for PCA953x"
depends on GPIO_PCA953X=y
@@ -296,17 +315,12 @@ config GPIO_ADP5520
This option enables support for on-chip GPIO found
on Analog Devices ADP5520 PMICs.
- To compile this driver as a module, choose M here: the module will
- be called adp5520-gpio.
-
config GPIO_ADP5588
tristate "ADP5588 I2C GPIO expander"
depends on I2C
help
This option enables support for 18 GPIOs found
on Analog Devices ADP5588 GPIO Expanders.
- To compile this driver as a module, choose M here: the module will be
- called adp5588-gpio.
config GPIO_ADP5588_IRQ
bool "Interrupt controller support for ADP5588"
@@ -398,10 +412,11 @@ config GPIO_MAX7301
GPIO driver for Maxim MAX7301 SPI-based GPIO expander.
config GPIO_MCP23S08
- tristate "Microchip MCP23Sxx I/O expander"
- depends on SPI_MASTER
+ tristate "Microchip MCP23xxx I/O expander"
+ depends on SPI_MASTER || I2C
help
- SPI driver for Microchip MCP23S08/MPC23S17 I/O expanders.
+ SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
+ I/O expanders.
This provides a GPIO interface supporting inputs and outputs.
config GPIO_MC33880
@@ -428,9 +443,6 @@ config GPIO_UCB1400
This enables support for the Philips UCB1400 GPIO pins.
The UCB1400 is an AC97 audio codec.
- To compile this driver as a module, choose M here: the
- module will be called ucb1400_gpio.
-
comment "MODULbus GPIO expanders:"
config GPIO_JANZ_TTL
@@ -441,7 +453,7 @@ config GPIO_JANZ_TTL
This driver provides support for driving the pins in output
mode only. Input mode is not supported.
-config AB8500_GPIO
+config GPIO_AB8500
bool "ST-Ericsson AB8500 Mixed Signal Circuit gpio functions"
depends on AB8500_CORE && BROKEN
help
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index b605f8ec6fb..72071125139 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -4,47 +4,56 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o
-obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
-obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
-obj-$(CONFIG_GPIO_BASIC_MMIO_CORE) += basic_mmio_gpio.o
-obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o
+# Device drivers. Generally keep list sorted alphabetically
+obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
+
+obj-$(CONFIG_GPIO_74X164) += gpio-74x164.o
+obj-$(CONFIG_GPIO_AB8500) += gpio-ab8500.o
+obj-$(CONFIG_GPIO_ADP5520) += gpio-adp5520.o
+obj-$(CONFIG_GPIO_ADP5588) += gpio-adp5588.o
+obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o
+obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o
+obj-$(CONFIG_GPIO_DA9052) += gpio-da9052.o
+obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_EXYNOS4) += gpio-exynos4.o
+obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
+obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
+obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
+obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
+obj-$(CONFIG_GPIO_MAX7300) += gpio-max7300.o
+obj-$(CONFIG_GPIO_MAX7301) += gpio-max7301.o
+obj-$(CONFIG_GPIO_MAX732X) += gpio-max732x.o
+obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
+obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
+obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
+obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
+obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o
+obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o
+obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
+obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
+obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
+obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
+obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
+obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
+obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
+
obj-$(CONFIG_GPIO_PLAT_SAMSUNG) += gpio-plat-samsung.o
obj-$(CONFIG_GPIO_S5PC100) += gpio-s5pc100.o
obj-$(CONFIG_GPIO_S5PV210) += gpio-s5pv210.o
-obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
-obj-$(CONFIG_GPIO_MAX730X) += max730x.o
-obj-$(CONFIG_GPIO_MAX7300) += max7300.o
-obj-$(CONFIG_GPIO_MAX7301) += max7301.o
-obj-$(CONFIG_GPIO_MAX732X) += max732x.o
-obj-$(CONFIG_GPIO_MC33880) += mc33880.o
-obj-$(CONFIG_GPIO_MCP23S08) += mcp23s08.o
-obj-$(CONFIG_GPIO_74X164) += 74x164.o
-obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o
-obj-$(CONFIG_GPIO_PCA953X) += pca953x.o
-obj-$(CONFIG_GPIO_PCF857X) += pcf857x.o
-obj-$(CONFIG_GPIO_PCH) += pch_gpio.o
-obj-$(CONFIG_GPIO_PL061) += pl061.o
-obj-$(CONFIG_GPIO_STMPE) += stmpe-gpio.o
-obj-$(CONFIG_GPIO_TC3589X) += tc3589x-gpio.o
-obj-$(CONFIG_GPIO_TIMBERDALE) += timbgpio.o
-obj-$(CONFIG_GPIO_TWL4030) += twl4030-gpio.o
-obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
-obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
-obj-$(CONFIG_GPIO_CS5535) += cs5535-gpio.o
-obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
-obj-$(CONFIG_GPIO_IT8761E) += it8761e_gpio.o
-obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
-obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
-obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
-obj-$(CONFIG_GPIO_WM8994) += wm8994-gpio.o
-obj-$(CONFIG_GPIO_SCH) += sch_gpio.o
+
+obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
+obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
+obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
+obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
+obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
+obj-$(CONFIG_GPIO_TIMBERDALE) += gpio-timberdale.o
+obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
+obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
obj-$(CONFIG_MACH_U300) += gpio-u300.o
-obj-$(CONFIG_PLAT_NOMADIK) += gpio-nomadik.o
-obj-$(CONFIG_GPIO_RDC321X) += rdc321x-gpio.o
-obj-$(CONFIG_GPIO_JANZ_TTL) += janz-ttl.o
-obj-$(CONFIG_GPIO_SX150X) += sx150x.o
-obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o
-obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o
-obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o
-obj-$(CONFIG_GPIO_TPS65910) += tps65910-gpio.o
+obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
+obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
+obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
+obj-$(CONFIG_GPIO_WM831X) += gpio-wm831x.o
+obj-$(CONFIG_GPIO_WM8350) += gpio-wm8350.o
+obj-$(CONFIG_GPIO_WM8994) += gpio-wm8994.o
+obj-$(CONFIG_GPIO_XILINX) += gpio-xilinx.o
diff --git a/drivers/gpio/74x164.c b/drivers/gpio/gpio-74x164.c
index 84e07021983..ff525c0958d 100644
--- a/drivers/gpio/74x164.c
+++ b/drivers/gpio/gpio-74x164.c
@@ -16,9 +16,6 @@
#include <linux/gpio.h>
#include <linux/slab.h>
-#define GEN_74X164_GPIO_COUNT 8
-
-
struct gen_74x164_chip {
struct spi_device *spi;
struct gpio_chip gpio_chip;
@@ -26,9 +23,7 @@ struct gen_74x164_chip {
u8 port_config;
};
-static void gen_74x164_set_value(struct gpio_chip *, unsigned, int);
-
-static struct gen_74x164_chip *gpio_to_chip(struct gpio_chip *gc)
+static struct gen_74x164_chip *gpio_to_74x164_chip(struct gpio_chip *gc)
{
return container_of(gc, struct gen_74x164_chip, gpio_chip);
}
@@ -39,16 +34,9 @@ static int __gen_74x164_write_config(struct gen_74x164_chip *chip)
&chip->port_config, sizeof(chip->port_config));
}
-static int gen_74x164_direction_output(struct gpio_chip *gc,
- unsigned offset, int val)
-{
- gen_74x164_set_value(gc, offset, val);
- return 0;
-}
-
static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
{
- struct gen_74x164_chip *chip = gpio_to_chip(gc);
+ struct gen_74x164_chip *chip = gpio_to_74x164_chip(gc);
int ret;
mutex_lock(&chip->lock);
@@ -61,7 +49,7 @@ static int gen_74x164_get_value(struct gpio_chip *gc, unsigned offset)
static void gen_74x164_set_value(struct gpio_chip *gc,
unsigned offset, int val)
{
- struct gen_74x164_chip *chip = gpio_to_chip(gc);
+ struct gen_74x164_chip *chip = gpio_to_74x164_chip(gc);
mutex_lock(&chip->lock);
if (val)
@@ -73,6 +61,13 @@ static void gen_74x164_set_value(struct gpio_chip *gc,
mutex_unlock(&chip->lock);
}
+static int gen_74x164_direction_output(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ gen_74x164_set_value(gc, offset, val);
+ return 0;
+}
+
static int __devinit gen_74x164_probe(struct spi_device *spi)
{
struct gen_74x164_chip *chip;
@@ -104,12 +99,12 @@ static int __devinit gen_74x164_probe(struct spi_device *spi)
chip->spi = spi;
- chip->gpio_chip.label = GEN_74X164_DRIVER_NAME,
- chip->gpio_chip.direction_output = gen_74x164_direction_output;
+ chip->gpio_chip.label = spi->modalias;
+ chip->gpio_chip.direction_output = gen_74x164_direction_output;
chip->gpio_chip.get = gen_74x164_get_value;
chip->gpio_chip.set = gen_74x164_set_value;
chip->gpio_chip.base = pdata->base;
- chip->gpio_chip.ngpio = GEN_74X164_GPIO_COUNT;
+ chip->gpio_chip.ngpio = 8;
chip->gpio_chip.can_sleep = 1;
chip->gpio_chip.dev = &spi->dev;
chip->gpio_chip.owner = THIS_MODULE;
@@ -157,7 +152,7 @@ static int __devexit gen_74x164_remove(struct spi_device *spi)
static struct spi_driver gen_74x164_driver = {
.driver = {
- .name = GEN_74X164_DRIVER_NAME,
+ .name = "74x164",
.owner = THIS_MODULE,
},
.probe = gen_74x164_probe,
diff --git a/drivers/gpio/ab8500-gpio.c b/drivers/gpio/gpio-ab8500.c
index 970053c89ff..970053c89ff 100644
--- a/drivers/gpio/ab8500-gpio.c
+++ b/drivers/gpio/gpio-ab8500.c
diff --git a/drivers/gpio/adp5520-gpio.c b/drivers/gpio/gpio-adp5520.c
index 9f278153700..9f278153700 100644
--- a/drivers/gpio/adp5520-gpio.c
+++ b/drivers/gpio/gpio-adp5520.c
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/gpio-adp5588.c
index 3525ad91877..3525ad91877 100644
--- a/drivers/gpio/adp5588-gpio.c
+++ b/drivers/gpio/gpio-adp5588.c
diff --git a/drivers/gpio/bt8xxgpio.c b/drivers/gpio/gpio-bt8xx.c
index aa4f09ad3ce..aa4f09ad3ce 100644
--- a/drivers/gpio/bt8xxgpio.c
+++ b/drivers/gpio/gpio-bt8xx.c
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/gpio-cs5535.c
index 6e16cba56ad..6e16cba56ad 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/gpio-cs5535.c
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
new file mode 100644
index 00000000000..038f5eb8b13
--- /dev/null
+++ b/drivers/gpio/gpio-da9052.c
@@ -0,0 +1,277 @@
+/*
+ * GPIO Driver for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/syscalls.h>
+#include <linux/seq_file.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/pdata.h>
+#include <linux/mfd/da9052/gpio.h>
+
+#define DA9052_INPUT 1
+#define DA9052_OUTPUT_OPENDRAIN 2
+#define DA9052_OUTPUT_PUSHPULL 3
+
+#define DA9052_SUPPLY_VDD_IO1 0
+
+#define DA9052_DEBOUNCING_OFF 0
+#define DA9052_DEBOUNCING_ON 1
+
+#define DA9052_OUTPUT_LOWLEVEL 0
+
+#define DA9052_ACTIVE_LOW 0
+#define DA9052_ACTIVE_HIGH 1
+
+#define DA9052_GPIO_MAX_PORTS_PER_REGISTER 8
+#define DA9052_GPIO_SHIFT_COUNT(no) (no%8)
+#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
+#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
+#define DA9052_GPIO_NIBBLE_SHIFT 4
+
+struct da9052_gpio {
+ struct da9052 *da9052;
+ struct gpio_chip gp;
+};
+
+static inline struct da9052_gpio *to_da9052_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct da9052_gpio, gp);
+}
+
+static unsigned char da9052_gpio_port_odd(unsigned offset)
+{
+ return offset % 2;
+}
+
+static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+ struct da9052_gpio *gpio = to_da9052_gpio(gc);
+ int da9052_port_direction = 0;
+ int ret;
+
+ ret = da9052_reg_read(gpio->da9052,
+ DA9052_GPIO_0_1_REG + (offset >> 1));
+ if (ret < 0)
+ return ret;
+
+ if (da9052_gpio_port_odd(offset)) {
+ da9052_port_direction = ret & DA9052_GPIO_ODD_PORT_PIN;
+ da9052_port_direction >>= 4;
+ } else {
+ da9052_port_direction = ret & DA9052_GPIO_EVEN_PORT_PIN;
+ }
+
+ switch (da9052_port_direction) {
+ case DA9052_INPUT:
+ if (offset < DA9052_GPIO_MAX_PORTS_PER_REGISTER)
+ ret = da9052_reg_read(gpio->da9052,
+ DA9052_STATUS_C_REG);
+ else
+ ret = da9052_reg_read(gpio->da9052,
+ DA9052_STATUS_D_REG);
+ if (ret < 0)
+ return ret;
+ if (ret & (1 << DA9052_GPIO_SHIFT_COUNT(offset)))
+ return 1;
+ else
+ return 0;
+ case DA9052_OUTPUT_PUSHPULL:
+ if (da9052_gpio_port_odd(offset))
+ return ret & DA9052_GPIO_ODD_PORT_MODE;
+ else
+ return ret & DA9052_GPIO_EVEN_PORT_MODE;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct da9052_gpio *gpio = to_da9052_gpio(gc);
+ unsigned char register_value = 0;
+ int ret;
+
+ if (da9052_gpio_port_odd(offset)) {
+ if (value) {
+ register_value = DA9052_GPIO_ODD_PORT_MODE;
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_ODD_PORT_MODE,
+ register_value);
+ if (ret != 0)
+ dev_err(gpio->da9052->dev,
+ "Failed to updated gpio odd reg,%d",
+ ret);
+ }
+ } else {
+ if (value) {
+ register_value = DA9052_GPIO_EVEN_PORT_MODE;
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_EVEN_PORT_MODE,
+ register_value);
+ if (ret != 0)
+ dev_err(gpio->da9052->dev,
+ "Failed to updated gpio even reg,%d",
+ ret);
+ }
+ }
+}
+
+static int da9052_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
+{
+ struct da9052_gpio *gpio = to_da9052_gpio(gc);
+ unsigned char register_value;
+ int ret;
+
+ /* Format: function - 2 bits type - 1 bit mode - 1 bit */
+ register_value = DA9052_INPUT | DA9052_ACTIVE_LOW << 2 |
+ DA9052_DEBOUNCING_ON << 3;
+
+ if (da9052_gpio_port_odd(offset))
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_MASK_UPPER_NIBBLE,
+ (register_value <<
+ DA9052_GPIO_NIBBLE_SHIFT));
+ else
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_MASK_LOWER_NIBBLE,
+ register_value);
+
+ return ret;
+}
+
+static int da9052_gpio_direction_output(struct gpio_chip *gc,
+ unsigned offset, int value)
+{
+ struct da9052_gpio *gpio = to_da9052_gpio(gc);
+ unsigned char register_value;
+ int ret;
+
+ /* Format: Function - 2 bits Type - 1 bit Mode - 1 bit */
+ register_value = DA9052_OUTPUT_PUSHPULL | DA9052_SUPPLY_VDD_IO1 << 2 |
+ value << 3;
+
+ if (da9052_gpio_port_odd(offset))
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_MASK_UPPER_NIBBLE,
+ (register_value <<
+ DA9052_GPIO_NIBBLE_SHIFT));
+ else
+ ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
+ DA9052_GPIO_0_1_REG,
+ DA9052_GPIO_MASK_LOWER_NIBBLE,
+ register_value);
+
+ return ret;
+}
+
+static int da9052_gpio_to_irq(struct gpio_chip *gc, u32 offset)
+{
+ struct da9052_gpio *gpio = to_da9052_gpio(gc);
+ struct da9052 *da9052 = gpio->da9052;
+
+ return da9052->irq_base + DA9052_IRQ_GPI0 + offset;
+}
+
+static struct gpio_chip reference_gp __devinitdata = {
+ .label = "da9052-gpio",
+ .owner = THIS_MODULE,
+ .get = da9052_gpio_get,
+ .set = da9052_gpio_set,
+ .direction_input = da9052_gpio_direction_input,
+ .direction_output = da9052_gpio_direction_output,
+ .to_irq = da9052_gpio_to_irq,
+ .can_sleep = 1;
+ .ngpio = 16;
+ .base = -1;
+};
+
+static int __devinit da9052_gpio_probe(struct platform_device *pdev)
+{
+ struct da9052_gpio *gpio;
+ struct da9052_pdata *pdata;
+ int ret;
+
+ gpio = kzalloc(sizeof(*gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return -ENOMEM;
+
+ gpio->da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = gpio->da9052->dev->platform_data;
+
+ gpio->gp = reference_gp;
+ if (pdata && pdata->gpio_base)
+ gpio->gp.base = pdata->gpio_base;
+
+ ret = gpiochip_add(&gpio->gp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
+ goto err_mem;
+ }
+
+ platform_set_drvdata(pdev, gpio);
+
+ return 0;
+
+err_mem:
+ kfree(gpio);
+ return ret;
+}
+
+static int __devexit da9052_gpio_remove(struct platform_device *pdev)
+{
+ struct da9052_gpio *gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&gpio->gp);
+ if (ret == 0)
+ kfree(gpio);
+
+ return ret;
+}
+
+static struct platform_driver da9052_gpio_driver = {
+ .probe = da9052_gpio_probe,
+ .remove = __devexit_p(da9052_gpio_remove),
+ .driver = {
+ .name = "da9052-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_gpio_init(void)
+{
+ return platform_driver_register(&da9052_gpio_driver);
+}
+module_init(da9052_gpio_init);
+
+static void __exit da9052_gpio_exit(void)
+{
+ return platform_driver_unregister(&da9052_gpio_driver);
+}
+module_exit(da9052_gpio_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 GPIO Device Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-gpio");
diff --git a/arch/arm/mach-ep93xx/gpio.c b/drivers/gpio/gpio-ep93xx.c
index 415dce37b88..3bfd3417ab1 100644
--- a/arch/arm/mach-ep93xx/gpio.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -1,9 +1,8 @@
/*
- * linux/arch/arm/mach-ep93xx/gpio.c
- *
* Generic EP93xx GPIO handling
*
* Copyright (c) 2008 Ryan Mallon <ryan@bluewatersys.com>
+ * Copyright (c) 2011 H Hartley Sweeten <hsweeten@visionengravers.com>
*
* Based on code originally from:
* linux/arch/arm/mach-ep93xx/core.c
@@ -13,17 +12,23 @@
* published by the Free Software Foundation.
*/
-#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
+#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/basic_mmio_gpio.h>
#include <mach/hardware.h>
+struct ep93xx_gpio {
+ void __iomem *mmio_base;
+ struct bgpio_chip bgc[8];
+};
+
/*************************************************************************
* Interrupt handling for EP93xx on-chip GPIOs
*************************************************************************/
@@ -225,7 +230,7 @@ static struct irq_chip ep93xx_gpio_irq_chip = {
.irq_set_type = ep93xx_gpio_irq_type,
};
-void __init ep93xx_gpio_init_irq(void)
+static void ep93xx_gpio_init_irq(void)
{
int gpio_irq;
@@ -260,151 +265,141 @@ void __init ep93xx_gpio_init_irq(void)
/*************************************************************************
* gpiolib interface for EP93xx on-chip GPIOs
*************************************************************************/
-struct ep93xx_gpio_chip {
- struct gpio_chip chip;
-
- void __iomem *data_reg;
- void __iomem *data_dir_reg;
+struct ep93xx_gpio_bank {
+ const char *label;
+ int data;
+ int dir;
+ int base;
+ bool has_debounce;
};
-#define to_ep93xx_gpio_chip(c) container_of(c, struct ep93xx_gpio_chip, chip)
+#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _debounce) \
+ { \
+ .label = _label, \
+ .data = _data, \
+ .dir = _dir, \
+ .base = _base, \
+ .has_debounce = _debounce, \
+ }
+
+static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
+ EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true),
+ EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true),
+ EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false),
+ EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false),
+ EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false),
+ EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, true),
+ EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false),
+ EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false),
+};
-static int ep93xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
+ unsigned offset, unsigned debounce)
{
- struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
- unsigned long flags;
- u8 v;
+ int gpio = chip->base + offset;
+ int irq = gpio_to_irq(gpio);
+
+ if (irq < 0)
+ return -EINVAL;
- local_irq_save(flags);
- v = __raw_readb(ep93xx_chip->data_dir_reg);
- v &= ~(1 << offset);
- __raw_writeb(v, ep93xx_chip->data_dir_reg);
- local_irq_restore(flags);
+ ep93xx_gpio_int_debounce(irq, debounce ? true : false);
return 0;
}
-static int ep93xx_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int val)
+static int ep93xx_gpio_add_bank(struct bgpio_chip *bgc, struct device *dev,
+ void __iomem *mmio_base, struct ep93xx_gpio_bank *bank)
{
- struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
- unsigned long flags;
- int line;
- u8 v;
+ void __iomem *data = mmio_base + bank->data;
+ void __iomem *dir = mmio_base + bank->dir;
+ int err;
- local_irq_save(flags);
+ err = bgpio_init(bgc, dev, 1, data, NULL, NULL, dir, NULL, false);
+ if (err)
+ return err;
- /* Set the value */
- v = __raw_readb(ep93xx_chip->data_reg);
- if (val)
- v |= (1 << offset);
- else
- v &= ~(1 << offset);
- __raw_writeb(v, ep93xx_chip->data_reg);
-
- /* Drive as an output */
- line = chip->base + offset;
- if (line <= EP93XX_GPIO_LINE_MAX_IRQ) {
- /* Ports A/B/F */
- ep93xx_gpio_int_mask(line);
- ep93xx_gpio_update_int_params(line >> 3);
- }
+ bgc->gc.label = bank->label;
+ bgc->gc.base = bank->base;
- v = __raw_readb(ep93xx_chip->data_dir_reg);
- v |= (1 << offset);
- __raw_writeb(v, ep93xx_chip->data_dir_reg);
+ if (bank->has_debounce)
+ bgc->gc.set_debounce = ep93xx_gpio_set_debounce;
- local_irq_restore(flags);
-
- return 0;
+ return gpiochip_add(&bgc->gc);
}
-static int ep93xx_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int __devinit ep93xx_gpio_probe(struct platform_device *pdev)
{
- struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
+ struct ep93xx_gpio *ep93xx_gpio;
+ struct resource *res;
+ void __iomem *mmio;
+ int i;
+ int ret;
- return !!(__raw_readb(ep93xx_chip->data_reg) & (1 << offset));
-}
+ ep93xx_gpio = kzalloc(sizeof(*ep93xx_gpio), GFP_KERNEL);
+ if (!ep93xx_gpio)
+ return -ENOMEM;
-static void ep93xx_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
-{
- struct ep93xx_gpio_chip *ep93xx_chip = to_ep93xx_gpio_chip(chip);
- unsigned long flags;
- u8 v;
-
- local_irq_save(flags);
- v = __raw_readb(ep93xx_chip->data_reg);
- if (val)
- v |= (1 << offset);
- else
- v &= ~(1 << offset);
- __raw_writeb(v, ep93xx_chip->data_reg);
- local_irq_restore(flags);
-}
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENXIO;
+ goto exit_free;
+ }
-static int ep93xx_gpio_set_debounce(struct gpio_chip *chip,
- unsigned offset, unsigned debounce)
-{
- int gpio = chip->base + offset;
- int irq = gpio_to_irq(gpio);
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ ret = -EBUSY;
+ goto exit_free;
+ }
- if (irq < 0)
- return -EINVAL;
+ mmio = ioremap(res->start, resource_size(res));
+ if (!mmio) {
+ ret = -ENXIO;
+ goto exit_release;
+ }
+ ep93xx_gpio->mmio_base = mmio;
- ep93xx_gpio_int_debounce(irq, debounce ? true : false);
+ /* Default all ports to GPIO */
+ ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
+ EP93XX_SYSCON_DEVCFG_GONK |
+ EP93XX_SYSCON_DEVCFG_EONIDE |
+ EP93XX_SYSCON_DEVCFG_GONIDE |
+ EP93XX_SYSCON_DEVCFG_HONIDE);
- return 0;
-}
+ for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
+ struct bgpio_chip *bgc = &ep93xx_gpio->bgc[i];
+ struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
-#define EP93XX_GPIO_BANK(name, dr, ddr, base_gpio) \
- { \
- .chip = { \
- .label = name, \
- .direction_input = ep93xx_gpio_direction_input, \
- .direction_output = ep93xx_gpio_direction_output, \
- .get = ep93xx_gpio_get, \
- .set = ep93xx_gpio_set, \
- .base = base_gpio, \
- .ngpio = 8, \
- }, \
- .data_reg = EP93XX_GPIO_REG(dr), \
- .data_dir_reg = EP93XX_GPIO_REG(ddr), \
+ if (ep93xx_gpio_add_bank(bgc, &pdev->dev, mmio, bank))
+ dev_warn(&pdev->dev, "Unable to add gpio bank %s\n",
+ bank->label);
}
-static struct ep93xx_gpio_chip ep93xx_gpio_banks[] = {
- EP93XX_GPIO_BANK("A", 0x00, 0x10, 0),
- EP93XX_GPIO_BANK("B", 0x04, 0x14, 8),
- EP93XX_GPIO_BANK("C", 0x08, 0x18, 40),
- EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24),
- EP93XX_GPIO_BANK("E", 0x20, 0x24, 32),
- EP93XX_GPIO_BANK("F", 0x30, 0x34, 16),
- EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48),
- EP93XX_GPIO_BANK("H", 0x40, 0x44, 56),
-};
+ ep93xx_gpio_init_irq();
-void __init ep93xx_gpio_init(void)
-{
- int i;
+ return 0;
- /* Set Ports C, D, E, G, and H for GPIO use */
- ep93xx_devcfg_set_bits(EP93XX_SYSCON_DEVCFG_KEYS |
- EP93XX_SYSCON_DEVCFG_GONK |
- EP93XX_SYSCON_DEVCFG_EONIDE |
- EP93XX_SYSCON_DEVCFG_GONIDE |
- EP93XX_SYSCON_DEVCFG_HONIDE);
+exit_release:
+ release_mem_region(res->start, resource_size(res));
+exit_free:
+ kfree(ep93xx_gpio);
+ dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, ret);
+ return ret;
+}
- for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
- struct gpio_chip *chip = &ep93xx_gpio_banks[i].chip;
-
- /*
- * Ports A, B, and F support input debouncing when
- * used as interrupts.
- */
- if (!strcmp(chip->label, "A") ||
- !strcmp(chip->label, "B") ||
- !strcmp(chip->label, "F"))
- chip->set_debounce = ep93xx_gpio_set_debounce;
-
- gpiochip_add(chip);
- }
+static struct platform_driver ep93xx_gpio_driver = {
+ .driver = {
+ .name = "gpio-ep93xx",
+ .owner = THIS_MODULE,
+ },
+ .probe = ep93xx_gpio_probe,
+};
+
+static int __init ep93xx_gpio_init(void)
+{
+ return platform_driver_register(&ep93xx_gpio_driver);
}
+postcore_initcall(ep93xx_gpio_init);
+
+MODULE_AUTHOR("Ryan Mallon <ryan@bluewatersys.com> "
+ "H Hartley Sweeten <hsweeten@visionengravers.com>");
+MODULE_DESCRIPTION("EP93XX GPIO driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-exynos4.c b/drivers/gpio/gpio-exynos4.c
index 9029835112e..d24b337cf1a 100644
--- a/drivers/gpio/gpio-exynos4.c
+++ b/drivers/gpio/gpio-exynos4.c
@@ -1,10 +1,9 @@
-/* linux/arch/arm/mach-exynos4/gpiolib.c
+/*
+ * EXYNOS4 - GPIOlib support
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 - GPIOlib support
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/gpio/basic_mmio_gpio.c b/drivers/gpio/gpio-generic.c
index 8152e9f516b..231714def4d 100644
--- a/drivers/gpio/basic_mmio_gpio.c
+++ b/drivers/gpio/gpio-generic.c
@@ -1,5 +1,5 @@
/*
- * Driver for basic memory-mapped GPIO controllers.
+ * Generic driver for memory-mapped GPIO controllers.
*
* Copyright 2008 MontaVista Software, Inc.
* Copyright 2008,2010 Anton Vorontsov <cbouatmailru@gmail.com>
@@ -404,7 +404,7 @@ int __devinit bgpio_init(struct bgpio_chip *bgc,
}
EXPORT_SYMBOL_GPL(bgpio_init);
-#ifdef CONFIG_GPIO_BASIC_MMIO
+#ifdef CONFIG_GPIO_GENERIC_PLATFORM
static void __iomem *bgpio_map(struct platform_device *pdev,
const char *name,
@@ -541,7 +541,7 @@ static void __exit bgpio_platform_exit(void)
}
module_exit(bgpio_platform_exit);
-#endif /* CONFIG_GPIO_BASIC_MMIO */
+#endif /* CONFIG_GPIO_GENERIC_PLATFORM */
MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers");
MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/gpio-it8761e.c
index 48fc43c4bdd..278b8131701 100644
--- a/drivers/gpio/it8761e_gpio.c
+++ b/drivers/gpio/gpio-it8761e.c
@@ -1,5 +1,5 @@
/*
- * it8761_gpio.c - GPIO interface for IT8761E Super I/O chip
+ * GPIO interface for IT8761E Super I/O chip
*
* Author: Denis Turischev <denis@compulab.co.il>
*
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 813ac077e5d..813ac077e5d 100644
--- a/drivers/gpio/janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/gpio-langwell.c
index 644ba1255d3..d2eb57c60e0 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -1,4 +1,6 @@
-/* langwell_gpio.c Moorestown platform Langwell chip GPIO driver
+/*
+ * Moorestown platform Langwell chip GPIO driver
+ *
* Copyright (c) 2008 - 2009, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/gpio/max7300.c b/drivers/gpio/gpio-max7300.c
index 962f661c18c..a5ca0ab1b37 100644
--- a/drivers/gpio/max7300.c
+++ b/drivers/gpio/gpio-max7300.c
@@ -1,6 +1,4 @@
/*
- * drivers/gpio/max7300.c
- *
* Copyright (C) 2009 Wolfram Sang, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/gpio/max7301.c b/drivers/gpio/gpio-max7301.c
index 92a100ddef6..741acfcbe76 100644
--- a/drivers/gpio/max7301.c
+++ b/drivers/gpio/gpio-max7301.c
@@ -1,6 +1,4 @@
/*
- * drivers/gpio/max7301.c
- *
* Copyright (C) 2006 Juergen Beisert, Pengutronix
* Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix
* Copyright (C) 2009 Wolfram Sang, Pengutronix
diff --git a/drivers/gpio/max730x.c b/drivers/gpio/gpio-max730x.c
index 94ce773f95f..05e2dac60b3 100644
--- a/drivers/gpio/max730x.c
+++ b/drivers/gpio/gpio-max730x.c
@@ -1,6 +1,4 @@
/**
- * drivers/gpio/max7301.c
- *
* Copyright (C) 2006 Juergen Beisert, Pengutronix
* Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix
* Copyright (C) 2009 Wolfram Sang, Pengutronix
diff --git a/drivers/gpio/max732x.c b/drivers/gpio/gpio-max732x.c
index ad6951edc16..9504120812a 100644
--- a/drivers/gpio/max732x.c
+++ b/drivers/gpio/gpio-max732x.c
@@ -1,5 +1,5 @@
/*
- * max732x.c - I2C Port Expander with 8/16 I/O
+ * MAX732x I2C Port Expander with 8/16 I/O
*
* Copyright (C) 2007 Marvell International Ltd.
* Copyright (C) 2008 Jack Ren <jack.ren@marvell.com>
diff --git a/drivers/gpio/mc33880.c b/drivers/gpio/gpio-mc33880.c
index 4ec797593bd..b3b4652e89e 100644
--- a/drivers/gpio/mc33880.c
+++ b/drivers/gpio/gpio-mc33880.c
@@ -1,5 +1,5 @@
/*
- * mc33880.c MC33880 high-side/low-side switch GPIO driver
+ * MC33880 high-side/low-side switch GPIO driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/gpio/mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index 40e076083ec..1ef46e6c2a2 100644
--- a/drivers/gpio/mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -1,12 +1,12 @@
/*
- * mcp23s08.c - SPI gpio expander driver
+ * MCP23S08 SPI/GPIO gpio expander driver
*/
#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/workqueue.h>
#include <linux/mutex.h>
#include <linux/gpio.h>
+#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/spi/mcp23s08.h>
#include <linux/slab.h>
@@ -17,13 +17,13 @@
*/
#define MCP_TYPE_S08 0
#define MCP_TYPE_S17 1
+#define MCP_TYPE_008 2
+#define MCP_TYPE_017 3
/* Registers are all 8 bits wide.
*
* The mcp23s17 has twice as many bits, and can be configured to work
* with either 16 bit registers or with two adjacent 8 bit banks.
- *
- * Also, there are I2C versions of both chips.
*/
#define MCP_IODIR 0x00 /* init/reset: all ones */
#define MCP_IPOL 0x01
@@ -51,7 +51,6 @@ struct mcp23s08_ops {
};
struct mcp23s08 {
- struct spi_device *spi;
u8 addr;
u16 cache[11];
@@ -60,9 +59,8 @@ struct mcp23s08 {
struct gpio_chip chip;
- struct work_struct work;
-
const struct mcp23s08_ops *ops;
+ void *data; /* ops specific data */
};
/* A given spi_device can represent up to eight mcp23sxx chips
@@ -76,6 +74,74 @@ struct mcp23s08_driver_data {
struct mcp23s08 chip[];
};
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_I2C
+
+static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg)
+{
+ return i2c_smbus_read_byte_data(mcp->data, reg);
+}
+
+static int mcp23008_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
+{
+ return i2c_smbus_write_byte_data(mcp->data, reg, val);
+}
+
+static int
+mcp23008_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
+{
+ while (n--) {
+ int ret = mcp23008_read(mcp, reg++);
+ if (ret < 0)
+ return ret;
+ *vals++ = ret;
+ }
+
+ return 0;
+}
+
+static int mcp23017_read(struct mcp23s08 *mcp, unsigned reg)
+{
+ return i2c_smbus_read_word_data(mcp->data, reg << 1);
+}
+
+static int mcp23017_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
+{
+ return i2c_smbus_write_word_data(mcp->data, reg << 1, val);
+}
+
+static int
+mcp23017_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
+{
+ while (n--) {
+ int ret = mcp23017_read(mcp, reg++);
+ if (ret < 0)
+ return ret;
+ *vals++ = ret;
+ }
+
+ return 0;
+}
+
+static const struct mcp23s08_ops mcp23008_ops = {
+ .read = mcp23008_read,
+ .write = mcp23008_write,
+ .read_regs = mcp23008_read_regs,
+};
+
+static const struct mcp23s08_ops mcp23017_ops = {
+ .read = mcp23017_read,
+ .write = mcp23017_write,
+ .read_regs = mcp23017_read_regs,
+};
+
+#endif /* CONFIG_I2C */
+
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_SPI_MASTER
+
static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg)
{
u8 tx[2], rx[1];
@@ -83,7 +149,7 @@ static int mcp23s08_read(struct mcp23s08 *mcp, unsigned reg)
tx[0] = mcp->addr | 0x01;
tx[1] = reg;
- status = spi_write_then_read(mcp->spi, tx, sizeof tx, rx, sizeof rx);
+ status = spi_write_then_read(mcp->data, tx, sizeof tx, rx, sizeof rx);
return (status < 0) ? status : rx[0];
}
@@ -94,7 +160,7 @@ static int mcp23s08_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
tx[0] = mcp->addr;
tx[1] = reg;
tx[2] = val;
- return spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
+ return spi_write_then_read(mcp->data, tx, sizeof tx, NULL, 0);
}
static int
@@ -109,7 +175,7 @@ mcp23s08_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
tx[1] = reg;
tmp = (u8 *)vals;
- status = spi_write_then_read(mcp->spi, tx, sizeof tx, tmp, n);
+ status = spi_write_then_read(mcp->data, tx, sizeof tx, tmp, n);
if (status >= 0) {
while (n--)
vals[n] = tmp[n]; /* expand to 16bit */
@@ -124,7 +190,7 @@ static int mcp23s17_read(struct mcp23s08 *mcp, unsigned reg)
tx[0] = mcp->addr | 0x01;
tx[1] = reg << 1;
- status = spi_write_then_read(mcp->spi, tx, sizeof tx, rx, sizeof rx);
+ status = spi_write_then_read(mcp->data, tx, sizeof tx, rx, sizeof rx);
return (status < 0) ? status : (rx[0] | (rx[1] << 8));
}
@@ -136,7 +202,7 @@ static int mcp23s17_write(struct mcp23s08 *mcp, unsigned reg, unsigned val)
tx[1] = reg << 1;
tx[2] = val;
tx[3] = val >> 8;
- return spi_write_then_read(mcp->spi, tx, sizeof tx, NULL, 0);
+ return spi_write_then_read(mcp->data, tx, sizeof tx, NULL, 0);
}
static int
@@ -150,7 +216,7 @@ mcp23s17_read_regs(struct mcp23s08 *mcp, unsigned reg, u16 *vals, unsigned n)
tx[0] = mcp->addr | 0x01;
tx[1] = reg << 1;
- status = spi_write_then_read(mcp->spi, tx, sizeof tx,
+ status = spi_write_then_read(mcp->data, tx, sizeof tx,
(u8 *)vals, n * 2);
if (status >= 0) {
while (n--)
@@ -172,6 +238,7 @@ static const struct mcp23s08_ops mcp23s17_ops = {
.read_regs = mcp23s17_read_regs,
};
+#endif /* CONFIG_SPI_MASTER */
/*----------------------------------------------------------------------*/
@@ -299,17 +366,16 @@ done:
/*----------------------------------------------------------------------*/
-static int mcp23s08_probe_one(struct spi_device *spi, unsigned addr,
+static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
+ void *data, unsigned addr,
unsigned type, unsigned base, unsigned pullups)
{
- struct mcp23s08_driver_data *data = spi_get_drvdata(spi);
- struct mcp23s08 *mcp = data->mcp[addr];
- int status;
+ int status;
mutex_init(&mcp->lock);
- mcp->spi = spi;
- mcp->addr = 0x40 | (addr << 1);
+ mcp->data = data;
+ mcp->addr = addr;
mcp->chip.direction_input = mcp23s08_direction_input;
mcp->chip.get = mcp23s08_get;
@@ -317,18 +383,43 @@ static int mcp23s08_probe_one(struct spi_device *spi, unsigned addr,
mcp->chip.set = mcp23s08_set;
mcp->chip.dbg_show = mcp23s08_dbg_show;
- if (type == MCP_TYPE_S17) {
+ switch (type) {
+#ifdef CONFIG_SPI_MASTER
+ case MCP_TYPE_S08:
+ mcp->ops = &mcp23s08_ops;
+ mcp->chip.ngpio = 8;
+ mcp->chip.label = "mcp23s08";
+ break;
+
+ case MCP_TYPE_S17:
mcp->ops = &mcp23s17_ops;
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23s17";
- } else {
- mcp->ops = &mcp23s08_ops;
+ break;
+#endif /* CONFIG_SPI_MASTER */
+
+#ifdef CONFIG_I2C
+ case MCP_TYPE_008:
+ mcp->ops = &mcp23008_ops;
mcp->chip.ngpio = 8;
- mcp->chip.label = "mcp23s08";
+ mcp->chip.label = "mcp23008";
+ break;
+
+ case MCP_TYPE_017:
+ mcp->ops = &mcp23017_ops;
+ mcp->chip.ngpio = 16;
+ mcp->chip.label = "mcp23017";
+ break;
+#endif /* CONFIG_I2C */
+
+ default:
+ dev_err(dev, "invalid device type (%d)\n", type);
+ return -EINVAL;
}
+
mcp->chip.base = base;
mcp->chip.can_sleep = 1;
- mcp->chip.dev = &spi->dev;
+ mcp->chip.dev = dev;
mcp->chip.owner = THIS_MODULE;
/* verify MCP_IOCON.SEQOP = 0, so sequential reads work,
@@ -374,11 +465,98 @@ static int mcp23s08_probe_one(struct spi_device *spi, unsigned addr,
status = gpiochip_add(&mcp->chip);
fail:
if (status < 0)
- dev_dbg(&spi->dev, "can't setup chip %d, --> %d\n",
- addr, status);
+ dev_dbg(dev, "can't setup chip %d, --> %d\n",
+ addr, status);
return status;
}
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_I2C
+
+static int __devinit mcp230xx_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct mcp23s08_platform_data *pdata;
+ struct mcp23s08 *mcp;
+ int status;
+
+ pdata = client->dev.platform_data;
+ if (!pdata || !gpio_is_valid(pdata->base)) {
+ dev_dbg(&client->dev, "invalid or missing platform data\n");
+ return -EINVAL;
+ }
+
+ mcp = kzalloc(sizeof *mcp, GFP_KERNEL);
+ if (!mcp)
+ return -ENOMEM;
+
+ status = mcp23s08_probe_one(mcp, &client->dev, client, client->addr,
+ id->driver_data, pdata->base,
+ pdata->chip[0].pullups);
+ if (status)
+ goto fail;
+
+ i2c_set_clientdata(client, mcp);
+
+ return 0;
+
+fail:
+ kfree(mcp);
+
+ return status;
+}
+
+static int __devexit mcp230xx_remove(struct i2c_client *client)
+{
+ struct mcp23s08 *mcp = i2c_get_clientdata(client);
+ int status;
+
+ status = gpiochip_remove(&mcp->chip);
+ if (status == 0)
+ kfree(mcp);
+
+ return status;
+}
+
+static const struct i2c_device_id mcp230xx_id[] = {
+ { "mcp23008", MCP_TYPE_008 },
+ { "mcp23017", MCP_TYPE_017 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, mcp230xx_id);
+
+static struct i2c_driver mcp230xx_driver = {
+ .driver = {
+ .name = "mcp230xx",
+ .owner = THIS_MODULE,
+ },
+ .probe = mcp230xx_probe,
+ .remove = __devexit_p(mcp230xx_remove),
+ .id_table = mcp230xx_id,
+};
+
+static int __init mcp23s08_i2c_init(void)
+{
+ return i2c_add_driver(&mcp230xx_driver);
+}
+
+static void mcp23s08_i2c_exit(void)
+{
+ i2c_del_driver(&mcp230xx_driver);
+}
+
+#else
+
+static int __init mcp23s08_i2c_init(void) { return 0; }
+static void mcp23s08_i2c_exit(void) { }
+
+#endif /* CONFIG_I2C */
+
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_SPI_MASTER
+
static int mcp23s08_probe(struct spi_device *spi)
{
struct mcp23s08_platform_data *pdata;
@@ -421,7 +599,8 @@ static int mcp23s08_probe(struct spi_device *spi)
continue;
chips--;
data->mcp[addr] = &data->chip[chips];
- status = mcp23s08_probe_one(spi, addr, type, base,
+ status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
+ 0x40 | (addr << 1), type, base,
pdata->chip[addr].pullups);
if (status < 0)
goto fail;
@@ -435,14 +614,6 @@ static int mcp23s08_probe(struct spi_device *spi)
* handled here...
*/
- if (pdata->setup) {
- status = pdata->setup(spi,
- pdata->base, data->ngpio,
- pdata->context);
- if (status < 0)
- dev_dbg(&spi->dev, "setup --> %d\n", status);
- }
-
return 0;
fail:
@@ -462,20 +633,9 @@ fail:
static int mcp23s08_remove(struct spi_device *spi)
{
struct mcp23s08_driver_data *data = spi_get_drvdata(spi);
- struct mcp23s08_platform_data *pdata = spi->dev.platform_data;
unsigned addr;
int status = 0;
- if (pdata->teardown) {
- status = pdata->teardown(spi,
- pdata->base, data->ngpio,
- pdata->context);
- if (status < 0) {
- dev_err(&spi->dev, "%s --> %d\n", "teardown", status);
- return status;
- }
- }
-
for (addr = 0; addr < ARRAY_SIZE(data->mcp); addr++) {
int tmp;
@@ -510,20 +670,53 @@ static struct spi_driver mcp23s08_driver = {
},
};
+static int __init mcp23s08_spi_init(void)
+{
+ return spi_register_driver(&mcp23s08_driver);
+}
+
+static void mcp23s08_spi_exit(void)
+{
+ spi_unregister_driver(&mcp23s08_driver);
+}
+
+#else
+
+static int __init mcp23s08_spi_init(void) { return 0; }
+static void mcp23s08_spi_exit(void) { }
+
+#endif /* CONFIG_SPI_MASTER */
+
/*----------------------------------------------------------------------*/
static int __init mcp23s08_init(void)
{
- return spi_register_driver(&mcp23s08_driver);
+ int ret;
+
+ ret = mcp23s08_spi_init();
+ if (ret)
+ goto spi_fail;
+
+ ret = mcp23s08_i2c_init();
+ if (ret)
+ goto i2c_fail;
+
+ return 0;
+
+ i2c_fail:
+ mcp23s08_spi_exit();
+ spi_fail:
+ return ret;
}
-/* register after spi postcore initcall and before
+/* register after spi/i2c postcore initcall and before
* subsys initcalls that may rely on these GPIOs
*/
subsys_initcall(mcp23s08_init);
static void __exit mcp23s08_exit(void)
{
- spi_unregister_driver(&mcp23s08_driver);
+ mcp23s08_spi_exit();
+ mcp23s08_i2c_exit();
}
module_exit(mcp23s08_exit);
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/gpio-ml-ioh.c
index 1bc621ac353..a9016f56ed7 100644
--- a/drivers/gpio/ml_ioh_gpio.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -233,7 +233,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
return 0;
err_gpiochip_add:
- for (; i != 0; i--) {
+ while (--i >= 0) {
chip--;
ret = gpiochip_remove(&chip->gpio);
if (ret)
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/drivers/gpio/gpio-mpc5200.c
index 1757d1db4b5..52d3ed20810 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
+++ b/drivers/gpio/gpio-mpc5200.c
@@ -184,15 +184,13 @@ static int mpc52xx_gpiochip_remove(struct platform_device *ofdev)
}
static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
- {
- .compatible = "fsl,mpc5200-gpio-wkup",
- },
+ { .compatible = "fsl,mpc5200-gpio-wkup", },
{}
};
static struct platform_driver mpc52xx_wkup_gpiochip_driver = {
.driver = {
- .name = "gpio_wkup",
+ .name = "mpc5200-gpio-wkup",
.owner = THIS_MODULE,
.of_match_table = mpc52xx_wkup_gpiochip_match,
},
@@ -341,15 +339,13 @@ static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev
}
static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
- {
- .compatible = "fsl,mpc5200-gpio",
- },
+ { .compatible = "fsl,mpc5200-gpio", },
{}
};
static struct platform_driver mpc52xx_simple_gpiochip_driver = {
.driver = {
- .name = "gpio",
+ .name = "mpc5200-gpio",
.owner = THIS_MODULE,
.of_match_table = mpc52xx_simple_gpiochip_match,
},
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
new file mode 100644
index 00000000000..4340acae3bd
--- /dev/null
+++ b/drivers/gpio/gpio-mxc.c
@@ -0,0 +1,460 @@
+/*
+ * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ *
+ * Based on code from Freescale,
+ * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/basic_mmio_gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm-generic/bug.h>
+
+enum mxc_gpio_hwtype {
+ IMX1_GPIO, /* runs on i.mx1 */
+ IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
+ IMX31_GPIO, /* runs on all other i.mx */
+};
+
+/* device type dependent stuff */
+struct mxc_gpio_hwdata {
+ unsigned dr_reg;
+ unsigned gdir_reg;
+ unsigned psr_reg;
+ unsigned icr1_reg;
+ unsigned icr2_reg;
+ unsigned imr_reg;
+ unsigned isr_reg;
+ unsigned low_level;
+ unsigned high_level;
+ unsigned rise_edge;
+ unsigned fall_edge;
+};
+
+struct mxc_gpio_port {
+ struct list_head node;
+ void __iomem *base;
+ int irq;
+ int irq_high;
+ int virtual_irq_start;
+ struct bgpio_chip bgc;
+ u32 both_edges;
+};
+
+static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = {
+ .dr_reg = 0x1c,
+ .gdir_reg = 0x00,
+ .psr_reg = 0x24,
+ .icr1_reg = 0x28,
+ .icr2_reg = 0x2c,
+ .imr_reg = 0x30,
+ .isr_reg = 0x34,
+ .low_level = 0x03,
+ .high_level = 0x02,
+ .rise_edge = 0x00,
+ .fall_edge = 0x01,
+};
+
+static struct mxc_gpio_hwdata imx31_gpio_hwdata = {
+ .dr_reg = 0x00,
+ .gdir_reg = 0x04,
+ .psr_reg = 0x08,
+ .icr1_reg = 0x0c,
+ .icr2_reg = 0x10,
+ .imr_reg = 0x14,
+ .isr_reg = 0x18,
+ .low_level = 0x00,
+ .high_level = 0x01,
+ .rise_edge = 0x02,
+ .fall_edge = 0x03,
+};
+
+static enum mxc_gpio_hwtype mxc_gpio_hwtype;
+static struct mxc_gpio_hwdata *mxc_gpio_hwdata;
+
+#define GPIO_DR (mxc_gpio_hwdata->dr_reg)
+#define GPIO_GDIR (mxc_gpio_hwdata->gdir_reg)
+#define GPIO_PSR (mxc_gpio_hwdata->psr_reg)
+#define GPIO_ICR1 (mxc_gpio_hwdata->icr1_reg)
+#define GPIO_ICR2 (mxc_gpio_hwdata->icr2_reg)
+#define GPIO_IMR (mxc_gpio_hwdata->imr_reg)
+#define GPIO_ISR (mxc_gpio_hwdata->isr_reg)
+
+#define GPIO_INT_LOW_LEV (mxc_gpio_hwdata->low_level)
+#define GPIO_INT_HIGH_LEV (mxc_gpio_hwdata->high_level)
+#define GPIO_INT_RISE_EDGE (mxc_gpio_hwdata->rise_edge)
+#define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge)
+#define GPIO_INT_NONE 0x4
+
+static struct platform_device_id mxc_gpio_devtype[] = {
+ {
+ .name = "imx1-gpio",
+ .driver_data = IMX1_GPIO,
+ }, {
+ .name = "imx21-gpio",
+ .driver_data = IMX21_GPIO,
+ }, {
+ .name = "imx31-gpio",
+ .driver_data = IMX31_GPIO,
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct of_device_id mxc_gpio_dt_ids[] = {
+ { .compatible = "fsl,imx1-gpio", .data = &mxc_gpio_devtype[IMX1_GPIO], },
+ { .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], },
+ { .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], },
+ { /* sentinel */ }
+};
+
+/*
+ * MX2 has one interrupt *for all* gpio ports. The list is used
+ * to save the references to all ports, so that mx2_gpio_irq_handler
+ * can walk through all interrupt status registers.
+ */
+static LIST_HEAD(mxc_gpio_ports);
+
+/* Note: This driver assumes 32 GPIOs are handled in one register */
+
+static int gpio_set_irq_type(struct irq_data *d, u32 type)
+{
+ u32 gpio = irq_to_gpio(d->irq);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mxc_gpio_port *port = gc->private;
+ u32 bit, val;
+ int edge;
+ void __iomem *reg = port->base;
+
+ port->both_edges &= ~(1 << (gpio & 31));
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ edge = GPIO_INT_RISE_EDGE;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = GPIO_INT_FALL_EDGE;
+ break;
+ case IRQ_TYPE_EDGE_BOTH:
+ val = gpio_get_value(gpio);
+ if (val) {
+ edge = GPIO_INT_LOW_LEV;
+ pr_debug("mxc: set GPIO %d to low trigger\n", gpio);
+ } else {
+ edge = GPIO_INT_HIGH_LEV;
+ pr_debug("mxc: set GPIO %d to high trigger\n", gpio);
+ }
+ port->both_edges |= 1 << (gpio & 31);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge = GPIO_INT_LOW_LEV;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge = GPIO_INT_HIGH_LEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
+ bit = gpio & 0xf;
+ val = readl(reg) & ~(0x3 << (bit << 1));
+ writel(val | (edge << (bit << 1)), reg);
+ writel(1 << (gpio & 0x1f), port->base + GPIO_ISR);
+
+ return 0;
+}
+
+static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
+{
+ void __iomem *reg = port->base;
+ u32 bit, val;
+ int edge;
+
+ reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
+ bit = gpio & 0xf;
+ val = readl(reg);
+ edge = (val >> (bit << 1)) & 3;
+ val &= ~(0x3 << (bit << 1));
+ if (edge == GPIO_INT_HIGH_LEV) {
+ edge = GPIO_INT_LOW_LEV;
+ pr_debug("mxc: switch GPIO %d to low trigger\n", gpio);
+ } else if (edge == GPIO_INT_LOW_LEV) {
+ edge = GPIO_INT_HIGH_LEV;
+ pr_debug("mxc: switch GPIO %d to high trigger\n", gpio);
+ } else {
+ pr_err("mxc: invalid configuration for GPIO %d: %x\n",
+ gpio, edge);
+ return;
+ }
+ writel(val | (edge << (bit << 1)), reg);
+}
+
+/* handle 32 interrupts in one status register */
+static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
+{
+ u32 gpio_irq_no_base = port->virtual_irq_start;
+
+ while (irq_stat != 0) {
+ int irqoffset = fls(irq_stat) - 1;
+
+ if (port->both_edges & (1 << irqoffset))
+ mxc_flip_edge(port, irqoffset);
+
+ generic_handle_irq(gpio_irq_no_base + irqoffset);
+
+ irq_stat &= ~(1 << irqoffset);
+ }
+}
+
+/* MX1 and MX3 has one interrupt *per* gpio port */
+static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+{
+ u32 irq_stat;
+ struct mxc_gpio_port *port = irq_get_handler_data(irq);
+
+ irq_stat = readl(port->base + GPIO_ISR) & readl(port->base + GPIO_IMR);
+
+ mxc_gpio_irq_handler(port, irq_stat);
+}
+
+/* MX2 has one interrupt *for all* gpio ports */
+static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+{
+ u32 irq_msk, irq_stat;
+ struct mxc_gpio_port *port;
+
+ /* walk through all interrupt status registers */
+ list_for_each_entry(port, &mxc_gpio_ports, node) {
+ irq_msk = readl(port->base + GPIO_IMR);
+ if (!irq_msk)
+ continue;
+
+ irq_stat = readl(port->base + GPIO_ISR) & irq_msk;
+ if (irq_stat)
+ mxc_gpio_irq_handler(port, irq_stat);
+ }
+}
+
+/*
+ * Set interrupt number "irq" in the GPIO as a wake-up source.
+ * While system is running, all registered GPIO interrupts need to have
+ * wake-up enabled. When system is suspended, only selected GPIO interrupts
+ * need to have wake-up enabled.
+ * @param irq interrupt source number
+ * @param enable enable as wake-up if equal to non-zero
+ * @return This function returns 0 on success.
+ */
+static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
+{
+ u32 gpio = irq_to_gpio(d->irq);
+ u32 gpio_idx = gpio & 0x1F;
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mxc_gpio_port *port = gc->private;
+
+ if (enable) {
+ if (port->irq_high && (gpio_idx >= 16))
+ enable_irq_wake(port->irq_high);
+ else
+ enable_irq_wake(port->irq);
+ } else {
+ if (port->irq_high && (gpio_idx >= 16))
+ disable_irq_wake(port->irq_high);
+ else
+ disable_irq_wake(port->irq);
+ }
+
+ return 0;
+}
+
+static void __init mxc_gpio_init_gc(struct mxc_gpio_port *port)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("gpio-mxc", 1, port->virtual_irq_start,
+ port->base, handle_level_irq);
+ gc->private = port;
+
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_set_type = gpio_set_irq_type;
+ ct->chip.irq_set_wake = gpio_set_wake_irq;
+ ct->regs.ack = GPIO_ISR;
+ ct->regs.mask = GPIO_IMR;
+
+ irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
+ IRQ_NOREQUEST, 0);
+}
+
+static void __devinit mxc_gpio_get_hw(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(mxc_gpio_dt_ids, &pdev->dev);
+ enum mxc_gpio_hwtype hwtype;
+
+ if (of_id)
+ pdev->id_entry = of_id->data;
+ hwtype = pdev->id_entry->driver_data;
+
+ if (mxc_gpio_hwtype) {
+ /*
+ * The driver works with a reasonable presupposition,
+ * that is all gpio ports must be the same type when
+ * running on one soc.
+ */
+ BUG_ON(mxc_gpio_hwtype != hwtype);
+ return;
+ }
+
+ if (hwtype == IMX31_GPIO)
+ mxc_gpio_hwdata = &imx31_gpio_hwdata;
+ else
+ mxc_gpio_hwdata = &imx1_imx21_gpio_hwdata;
+
+ mxc_gpio_hwtype = hwtype;
+}
+
+static int __devinit mxc_gpio_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct mxc_gpio_port *port;
+ struct resource *iores;
+ int err;
+
+ mxc_gpio_get_hw(pdev);
+
+ port = kzalloc(sizeof(struct mxc_gpio_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores) {
+ err = -ENODEV;
+ goto out_kfree;
+ }
+
+ if (!request_mem_region(iores->start, resource_size(iores),
+ pdev->name)) {
+ err = -EBUSY;
+ goto out_kfree;
+ }
+
+ port->base = ioremap(iores->start, resource_size(iores));
+ if (!port->base) {
+ err = -ENOMEM;
+ goto out_release_mem;
+ }
+
+ port->irq_high = platform_get_irq(pdev, 1);
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0) {
+ err = -EINVAL;
+ goto out_iounmap;
+ }
+
+ /* disable the interrupt and clear the status */
+ writel(0, port->base + GPIO_IMR);
+ writel(~0, port->base + GPIO_ISR);
+
+ if (mxc_gpio_hwtype == IMX21_GPIO) {
+ /* setup one handler for all GPIO interrupts */
+ if (pdev->id == 0)
+ irq_set_chained_handler(port->irq,
+ mx2_gpio_irq_handler);
+ } else {
+ /* setup one handler for each entry */
+ irq_set_chained_handler(port->irq, mx3_gpio_irq_handler);
+ irq_set_handler_data(port->irq, port);
+ if (port->irq_high > 0) {
+ /* setup handler for GPIO 16 to 31 */
+ irq_set_chained_handler(port->irq_high,
+ mx3_gpio_irq_handler);
+ irq_set_handler_data(port->irq_high, port);
+ }
+ }
+
+ err = bgpio_init(&port->bgc, &pdev->dev, 4,
+ port->base + GPIO_PSR,
+ port->base + GPIO_DR, NULL,
+ port->base + GPIO_GDIR, NULL, false);
+ if (err)
+ goto out_iounmap;
+
+ port->bgc.gc.base = pdev->id * 32;
+ port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir);
+ port->bgc.data = port->bgc.read_reg(port->bgc.reg_set);
+
+ err = gpiochip_add(&port->bgc.gc);
+ if (err)
+ goto out_bgpio_remove;
+
+ /*
+ * In dt case, we use gpio number range dynamically
+ * allocated by gpio core.
+ */
+ port->virtual_irq_start = MXC_GPIO_IRQ_START + (np ? port->bgc.gc.base :
+ pdev->id * 32);
+
+ /* gpio-mxc can be a generic irq chip */
+ mxc_gpio_init_gc(port);
+
+ list_add_tail(&port->node, &mxc_gpio_ports);
+
+ return 0;
+
+out_bgpio_remove:
+ bgpio_remove(&port->bgc);
+out_iounmap:
+ iounmap(port->base);
+out_release_mem:
+ release_mem_region(iores->start, resource_size(iores));
+out_kfree:
+ kfree(port);
+ dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
+ return err;
+}
+
+static struct platform_driver mxc_gpio_driver = {
+ .driver = {
+ .name = "gpio-mxc",
+ .owner = THIS_MODULE,
+ .of_match_table = mxc_gpio_dt_ids,
+ },
+ .probe = mxc_gpio_probe,
+ .id_table = mxc_gpio_devtype,
+};
+
+static int __init gpio_mxc_init(void)
+{
+ return platform_driver_register(&mxc_gpio_driver);
+}
+postcore_initcall(gpio_mxc_init);
+
+MODULE_AUTHOR("Freescale Semiconductor, "
+ "Daniel Mack <danielncaiaq.de>, "
+ "Juergen Beisert <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale MXC GPIO");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
new file mode 100644
index 00000000000..af55a8577c2
--- /dev/null
+++ b/drivers/gpio/gpio-mxs.c
@@ -0,0 +1,289 @@
+/*
+ * MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de>
+ * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
+ *
+ * Based on code from Freescale,
+ * Copyright (C) 2004-2010 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/basic_mmio_gpio.h>
+#include <mach/mxs.h>
+
+#define MXS_SET 0x4
+#define MXS_CLR 0x8
+
+#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10)
+#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10)
+#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10)
+#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10)
+#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10)
+#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10)
+#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10)
+#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10)
+
+#define GPIO_INT_FALL_EDGE 0x0
+#define GPIO_INT_LOW_LEV 0x1
+#define GPIO_INT_RISE_EDGE 0x2
+#define GPIO_INT_HIGH_LEV 0x3
+#define GPIO_INT_LEV_MASK (1 << 0)
+#define GPIO_INT_POL_MASK (1 << 1)
+
+struct mxs_gpio_port {
+ void __iomem *base;
+ int id;
+ int irq;
+ int virtual_irq_start;
+ struct bgpio_chip bgc;
+};
+
+/* Note: This driver assumes 32 GPIOs are handled in one register */
+
+static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
+{
+ u32 gpio = irq_to_gpio(d->irq);
+ u32 pin_mask = 1 << (gpio & 31);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mxs_gpio_port *port = gc->private;
+ void __iomem *pin_addr;
+ int edge;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ edge = GPIO_INT_RISE_EDGE;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ edge = GPIO_INT_FALL_EDGE;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ edge = GPIO_INT_LOW_LEV;
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ edge = GPIO_INT_HIGH_LEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set level or edge */
+ pin_addr = port->base + PINCTRL_IRQLEV(port->id);
+ if (edge & GPIO_INT_LEV_MASK)
+ writel(pin_mask, pin_addr + MXS_SET);
+ else
+ writel(pin_mask, pin_addr + MXS_CLR);
+
+ /* set polarity */
+ pin_addr = port->base + PINCTRL_IRQPOL(port->id);
+ if (edge & GPIO_INT_POL_MASK)
+ writel(pin_mask, pin_addr + MXS_SET);
+ else
+ writel(pin_mask, pin_addr + MXS_CLR);
+
+ writel(1 << (gpio & 0x1f),
+ port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+
+ return 0;
+}
+
+/* MXS has one interrupt *per* gpio port */
+static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+{
+ u32 irq_stat;
+ struct mxs_gpio_port *port = irq_get_handler_data(irq);
+ u32 gpio_irq_no_base = port->virtual_irq_start;
+
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+
+ irq_stat = readl(port->base + PINCTRL_IRQSTAT(port->id)) &
+ readl(port->base + PINCTRL_IRQEN(port->id));
+
+ while (irq_stat != 0) {
+ int irqoffset = fls(irq_stat) - 1;
+ generic_handle_irq(gpio_irq_no_base + irqoffset);
+ irq_stat &= ~(1 << irqoffset);
+ }
+}
+
+/*
+ * Set interrupt number "irq" in the GPIO as a wake-up source.
+ * While system is running, all registered GPIO interrupts need to have
+ * wake-up enabled. When system is suspended, only selected GPIO interrupts
+ * need to have wake-up enabled.
+ * @param irq interrupt source number
+ * @param enable enable as wake-up if equal to non-zero
+ * @return This function returns 0 on success.
+ */
+static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct mxs_gpio_port *port = gc->private;
+
+ if (enable)
+ enable_irq_wake(port->irq);
+ else
+ disable_irq_wake(port->irq);
+
+ return 0;
+}
+
+static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("gpio-mxs", 1, port->virtual_irq_start,
+ port->base, handle_level_irq);
+ gc->private = port;
+
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_set_type = mxs_gpio_set_irq_type;
+ ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
+ ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR;
+ ct->regs.mask = PINCTRL_IRQEN(port->id);
+
+ irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
+}
+
+static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
+{
+ struct bgpio_chip *bgc = to_bgpio_chip(gc);
+ struct mxs_gpio_port *port =
+ container_of(bgc, struct mxs_gpio_port, bgc);
+
+ return port->virtual_irq_start + offset;
+}
+
+static int __devinit mxs_gpio_probe(struct platform_device *pdev)
+{
+ static void __iomem *base;
+ struct mxs_gpio_port *port;
+ struct resource *iores = NULL;
+ int err;
+
+ port = kzalloc(sizeof(struct mxs_gpio_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->id = pdev->id;
+ port->virtual_irq_start = MXS_GPIO_IRQ_START + port->id * 32;
+
+ /*
+ * map memory region only once, as all the gpio ports
+ * share the same one
+ */
+ if (!base) {
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores) {
+ err = -ENODEV;
+ goto out_kfree;
+ }
+
+ if (!request_mem_region(iores->start, resource_size(iores),
+ pdev->name)) {
+ err = -EBUSY;
+ goto out_kfree;
+ }
+
+ base = ioremap(iores->start, resource_size(iores));
+ if (!base) {
+ err = -ENOMEM;
+ goto out_release_mem;
+ }
+ }
+ port->base = base;
+
+ port->irq = platform_get_irq(pdev, 0);
+ if (port->irq < 0) {
+ err = -EINVAL;
+ goto out_iounmap;
+ }
+
+ /*
+ * select the pin interrupt functionality but initially
+ * disable the interrupts
+ */
+ writel(~0U, port->base + PINCTRL_PIN2IRQ(port->id));
+ writel(0, port->base + PINCTRL_IRQEN(port->id));
+
+ /* clear address has to be used to clear IRQSTAT bits */
+ writel(~0U, port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR);
+
+ /* gpio-mxs can be a generic irq chip */
+ mxs_gpio_init_gc(port);
+
+ /* setup one handler for each entry */
+ irq_set_chained_handler(port->irq, mxs_gpio_irq_handler);
+ irq_set_handler_data(port->irq, port);
+
+ err = bgpio_init(&port->bgc, &pdev->dev, 4,
+ port->base + PINCTRL_DIN(port->id),
+ port->base + PINCTRL_DOUT(port->id), NULL,
+ port->base + PINCTRL_DOE(port->id), NULL, false);
+ if (err)
+ goto out_iounmap;
+
+ port->bgc.gc.to_irq = mxs_gpio_to_irq;
+ port->bgc.gc.base = port->id * 32;
+
+ err = gpiochip_add(&port->bgc.gc);
+ if (err)
+ goto out_bgpio_remove;
+
+ return 0;
+
+out_bgpio_remove:
+ bgpio_remove(&port->bgc);
+out_iounmap:
+ if (iores)
+ iounmap(port->base);
+out_release_mem:
+ if (iores)
+ release_mem_region(iores->start, resource_size(iores));
+out_kfree:
+ kfree(port);
+ dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
+ return err;
+}
+
+static struct platform_driver mxs_gpio_driver = {
+ .driver = {
+ .name = "gpio-mxs",
+ .owner = THIS_MODULE,
+ },
+ .probe = mxs_gpio_probe,
+};
+
+static int __init mxs_gpio_init(void)
+{
+ return platform_driver_register(&mxs_gpio_driver);
+}
+postcore_initcall(mxs_gpio_init);
+
+MODULE_AUTHOR("Freescale Semiconductor, "
+ "Daniel Mack <danielncaiaq.de>, "
+ "Juergen Beisert <kernel@pengutronix.de>");
+MODULE_DESCRIPTION("Freescale MXS GPIO");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 35bebde23e8..0599854e221 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -54,6 +54,11 @@ struct gpio_bank {
struct device *dev;
bool dbck_flag;
int stride;
+ u32 width;
+
+ void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
+
+ struct omap_gpio_reg_offs *regs;
};
#ifdef CONFIG_ARCH_OMAP3
@@ -79,121 +84,18 @@ static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
*/
static struct gpio_bank *gpio_bank;
-static int bank_width;
-
/* TODO: Analyze removing gpio_bank_count usage from driver code */
int gpio_bank_count;
-static inline struct gpio_bank *get_gpio_bank(int gpio)
-{
- if (cpu_is_omap15xx()) {
- if (OMAP_GPIO_IS_MPUIO(gpio))
- return &gpio_bank[0];
- return &gpio_bank[1];
- }
- if (cpu_is_omap16xx()) {
- if (OMAP_GPIO_IS_MPUIO(gpio))
- return &gpio_bank[0];
- return &gpio_bank[1 + (gpio >> 4)];
- }
- if (cpu_is_omap7xx()) {
- if (OMAP_GPIO_IS_MPUIO(gpio))
- return &gpio_bank[0];
- return &gpio_bank[1 + (gpio >> 5)];
- }
- if (cpu_is_omap24xx())
- return &gpio_bank[gpio >> 5];
- if (cpu_is_omap34xx() || cpu_is_omap44xx())
- return &gpio_bank[gpio >> 5];
- BUG();
- return NULL;
-}
-
-static inline int get_gpio_index(int gpio)
-{
- if (cpu_is_omap7xx())
- return gpio & 0x1f;
- if (cpu_is_omap24xx())
- return gpio & 0x1f;
- if (cpu_is_omap34xx() || cpu_is_omap44xx())
- return gpio & 0x1f;
- return gpio & 0x0f;
-}
-
-static inline int gpio_valid(int gpio)
-{
- if (gpio < 0)
- return -1;
- if (cpu_class_is_omap1() && OMAP_GPIO_IS_MPUIO(gpio)) {
- if (gpio >= OMAP_MAX_GPIO_LINES + 16)
- return -1;
- return 0;
- }
- if (cpu_is_omap15xx() && gpio < 16)
- return 0;
- if ((cpu_is_omap16xx()) && gpio < 64)
- return 0;
- if (cpu_is_omap7xx() && gpio < 192)
- return 0;
- if (cpu_is_omap2420() && gpio < 128)
- return 0;
- if (cpu_is_omap2430() && gpio < 160)
- return 0;
- if ((cpu_is_omap34xx() || cpu_is_omap44xx()) && gpio < 192)
- return 0;
- return -1;
-}
-
-static int check_gpio(int gpio)
-{
- if (unlikely(gpio_valid(gpio) < 0)) {
- printk(KERN_ERR "omap-gpio: invalid GPIO %d\n", gpio);
- dump_stack();
- return -1;
- }
- return 0;
-}
+#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
+#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
{
void __iomem *reg = bank->base;
u32 l;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_IO_CNTL / bank->stride;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DIR_CONTROL;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DIRECTION;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DIR_CONTROL;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_OE;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_OE;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
- }
+ reg += bank->regs->direction;
l = __raw_readl(reg);
if (is_input)
l |= 1 << gpio;
@@ -202,165 +104,48 @@ static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
__raw_writel(l, reg);
}
-static void _set_gpio_dataout(struct gpio_bank *bank, int gpio, int enable)
+
+/* set data out value using dedicate set/clear register */
+static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
{
void __iomem *reg = bank->base;
- u32 l = 0;
+ u32 l = GPIO_BIT(bank, gpio);
+
+ if (enable)
+ reg += bank->regs->set_dataout;
+ else
+ reg += bank->regs->clr_dataout;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_OUTPUT / bank->stride;
- l = __raw_readl(reg);
- if (enable)
- l |= 1 << gpio;
- else
- l &= ~(1 << gpio);
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DATA_OUTPUT;
- l = __raw_readl(reg);
- if (enable)
- l |= 1 << gpio;
- else
- l &= ~(1 << gpio);
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- if (enable)
- reg += OMAP1610_GPIO_SET_DATAOUT;
- else
- reg += OMAP1610_GPIO_CLEAR_DATAOUT;
- l = 1 << gpio;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DATA_OUTPUT;
- l = __raw_readl(reg);
- if (enable)
- l |= 1 << gpio;
- else
- l &= ~(1 << gpio);
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- if (enable)
- reg += OMAP24XX_GPIO_SETDATAOUT;
- else
- reg += OMAP24XX_GPIO_CLEARDATAOUT;
- l = 1 << gpio;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- if (enable)
- reg += OMAP4_GPIO_SETDATAOUT;
- else
- reg += OMAP4_GPIO_CLEARDATAOUT;
- l = 1 << gpio;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
- }
__raw_writel(l, reg);
}
-static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
+/* set data out value using mask register */
+static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
{
- void __iomem *reg;
+ void __iomem *reg = bank->base + bank->regs->dataout;
+ u32 gpio_bit = GPIO_BIT(bank, gpio);
+ u32 l;
- if (check_gpio(gpio) < 0)
- return -EINVAL;
- reg = bank->base;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_INPUT_LATCH / bank->stride;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DATA_INPUT;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DATAIN;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DATA_INPUT;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_DATAIN;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_DATAIN;
- break;
-#endif
- default:
- return -EINVAL;
- }
- return (__raw_readl(reg)
- & (1 << get_gpio_index(gpio))) != 0;
+ l = __raw_readl(reg);
+ if (enable)
+ l |= gpio_bit;
+ else
+ l &= ~gpio_bit;
+ __raw_writel(l, reg);
}
-static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
+static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
{
- void __iomem *reg;
+ void __iomem *reg = bank->base + bank->regs->datain;
- if (check_gpio(gpio) < 0)
- return -EINVAL;
- reg = bank->base;
+ return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
+}
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_OUTPUT / bank->stride;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DATA_OUTPUT;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DATAOUT;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DATA_OUTPUT;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_DATAOUT;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_DATAOUT;
- break;
-#endif
- default:
- return -EINVAL;
- }
+static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
+{
+ void __iomem *reg = bank->base + bank->regs->dataout;
- return (__raw_readl(reg) & (1 << get_gpio_index(gpio))) != 0;
+ return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
}
#define MOD_REG_BIT(reg, bit_mask, set) \
@@ -383,7 +168,7 @@ do { \
static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
unsigned debounce)
{
- void __iomem *reg = bank->base;
+ void __iomem *reg;
u32 val;
u32 l;
@@ -397,21 +182,12 @@ static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
else
debounce = (debounce / 0x1f) - 1;
- l = 1 << get_gpio_index(gpio);
-
- if (bank->method == METHOD_GPIO_44XX)
- reg += OMAP4_GPIO_DEBOUNCINGTIME;
- else
- reg += OMAP24XX_GPIO_DEBOUNCE_VAL;
+ l = GPIO_BIT(bank, gpio);
+ reg = bank->base + bank->regs->debounce;
__raw_writel(debounce, reg);
- reg = bank->base;
- if (bank->method == METHOD_GPIO_44XX)
- reg += OMAP4_GPIO_DEBOUNCENABLE;
- else
- reg += OMAP24XX_GPIO_DEBOUNCE_EN;
-
+ reg = bank->base + bank->regs->debounce_en;
val = __raw_readl(reg);
if (debounce) {
@@ -629,9 +405,6 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
else
gpio = d->irq - IH_GPIO_BASE;
- if (check_gpio(gpio) < 0)
- return -EINVAL;
-
if (type & ~IRQ_TYPE_SENSE_MASK)
return -EINVAL;
@@ -642,7 +415,7 @@ static int gpio_irq_type(struct irq_data *d, unsigned type)
bank = irq_data_get_irq_chip_data(d);
spin_lock_irqsave(&bank->lock, flags);
- retval = _set_gpio_triggering(bank, get_gpio_index(gpio), type);
+ retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
spin_unlock_irqrestore(&bank->lock, flags);
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
@@ -657,195 +430,81 @@ static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
void __iomem *reg = bank->base;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- /* MPUIO irqstatus is reset by reading the status register,
- * so do nothing here */
- return;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_STATUS;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_IRQSTATUS1;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_STATUS;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_IRQSTATUS1;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_IRQSTATUS0;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
- }
+ reg += bank->regs->irqstatus;
__raw_writel(gpio_mask, reg);
/* Workaround for clearing DSP GPIO interrupts to allow retention */
- if (cpu_is_omap24xx() || cpu_is_omap34xx())
- reg = bank->base + OMAP24XX_GPIO_IRQSTATUS2;
- else if (cpu_is_omap44xx())
- reg = bank->base + OMAP4_GPIO_IRQSTATUS1;
-
- if (cpu_is_omap24xx() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
+ if (bank->regs->irqstatus2) {
+ reg = bank->base + bank->regs->irqstatus2;
__raw_writel(gpio_mask, reg);
+ }
/* Flush posted write for the irq status to avoid spurious interrupts */
__raw_readl(reg);
- }
}
static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
{
- _clear_gpio_irqbank(bank, 1 << get_gpio_index(gpio));
+ _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
}
static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
{
void __iomem *reg = bank->base;
- int inv = 0;
u32 l;
- u32 mask;
-
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
- mask = 0xffff;
- inv = 1;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_MASK;
- mask = 0xffff;
- inv = 1;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_IRQENABLE1;
- mask = 0xffff;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_MASK;
- mask = 0xffffffff;
- inv = 1;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_IRQENABLE1;
- mask = 0xffffffff;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_IRQSTATUSSET0;
- mask = 0xffffffff;
- break;
-#endif
- default:
- WARN_ON(1);
- return 0;
- }
+ u32 mask = (1 << bank->width) - 1;
+ reg += bank->regs->irqenable;
l = __raw_readl(reg);
- if (inv)
+ if (bank->regs->irqenable_inv)
l = ~l;
l &= mask;
return l;
}
-static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask, int enable)
+static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
void __iomem *reg = bank->base;
u32 l;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP1
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_GPIO_MASKIT / bank->stride;
- l = __raw_readl(reg);
- if (enable)
- l &= ~(gpio_mask);
- else
- l |= gpio_mask;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_INT_MASK;
+ if (bank->regs->set_irqenable) {
+ reg += bank->regs->set_irqenable;
+ l = gpio_mask;
+ } else {
+ reg += bank->regs->irqenable;
l = __raw_readl(reg);
- if (enable)
- l &= ~(gpio_mask);
+ if (bank->regs->irqenable_inv)
+ l &= ~gpio_mask;
else
l |= gpio_mask;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_GPIO_1610:
- if (enable)
- reg += OMAP1610_GPIO_SET_IRQENABLE1;
- else
- reg += OMAP1610_GPIO_CLEAR_IRQENABLE1;
+ }
+
+ __raw_writel(l, reg);
+}
+
+static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
+{
+ void __iomem *reg = bank->base;
+ u32 l;
+
+ if (bank->regs->clr_irqenable) {
+ reg += bank->regs->clr_irqenable;
l = gpio_mask;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_INT_MASK;
+ } else {
+ reg += bank->regs->irqenable;
l = __raw_readl(reg);
- if (enable)
- l &= ~(gpio_mask);
- else
+ if (bank->regs->irqenable_inv)
l |= gpio_mask;
- break;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- case METHOD_GPIO_24XX:
- if (enable)
- reg += OMAP24XX_GPIO_SETIRQENABLE1;
- else
- reg += OMAP24XX_GPIO_CLEARIRQENABLE1;
- l = gpio_mask;
- break;
-#endif
-#ifdef CONFIG_ARCH_OMAP4
- case METHOD_GPIO_44XX:
- if (enable)
- reg += OMAP4_GPIO_IRQSTATUSSET0;
else
- reg += OMAP4_GPIO_IRQSTATUSCLR0;
- l = gpio_mask;
- break;
-#endif
- default:
- WARN_ON(1);
- return;
+ l &= ~gpio_mask;
}
+
__raw_writel(l, reg);
}
static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
{
- _enable_gpio_irqbank(bank, 1 << get_gpio_index(gpio), enable);
+ _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
}
/*
@@ -858,50 +517,32 @@ static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int ena
*/
static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
{
- unsigned long uninitialized_var(flags);
+ u32 gpio_bit = GPIO_BIT(bank, gpio);
+ unsigned long flags;
- switch (bank->method) {
-#ifdef CONFIG_ARCH_OMAP16XX
- case METHOD_MPUIO:
- case METHOD_GPIO_1610:
- spin_lock_irqsave(&bank->lock, flags);
- if (enable)
- bank->suspend_wakeup |= (1 << gpio);
- else
- bank->suspend_wakeup &= ~(1 << gpio);
- spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
-#endif
-#ifdef CONFIG_ARCH_OMAP2PLUS
- case METHOD_GPIO_24XX:
- case METHOD_GPIO_44XX:
- if (bank->non_wakeup_gpios & (1 << gpio)) {
- printk(KERN_ERR "Unable to modify wakeup on "
- "non-wakeup GPIO%d\n",
- (bank - gpio_bank) * 32 + gpio);
- return -EINVAL;
- }
- spin_lock_irqsave(&bank->lock, flags);
- if (enable)
- bank->suspend_wakeup |= (1 << gpio);
- else
- bank->suspend_wakeup &= ~(1 << gpio);
- spin_unlock_irqrestore(&bank->lock, flags);
- return 0;
-#endif
- default:
- printk(KERN_ERR "Can't enable GPIO wakeup for method %i\n",
- bank->method);
+ if (bank->non_wakeup_gpios & gpio_bit) {
+ dev_err(bank->dev,
+ "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
return -EINVAL;
}
+
+ spin_lock_irqsave(&bank->lock, flags);
+ if (enable)
+ bank->suspend_wakeup |= gpio_bit;
+ else
+ bank->suspend_wakeup &= ~gpio_bit;
+
+ spin_unlock_irqrestore(&bank->lock, flags);
+
+ return 0;
}
static void _reset_gpio(struct gpio_bank *bank, int gpio)
{
- _set_gpio_direction(bank, get_gpio_index(gpio), 1);
+ _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
_set_gpio_irqenable(bank, gpio, 0);
_clear_gpio_irqstatus(bank, gpio);
- _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+ _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
}
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
@@ -911,10 +552,8 @@ static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
struct gpio_bank *bank;
int retval;
- if (check_gpio(gpio) < 0)
- return -ENODEV;
bank = irq_data_get_irq_chip_data(d);
- retval = _set_gpio_wakeup(bank, get_gpio_index(gpio), enable);
+ retval = _set_gpio_wakeup(bank, gpio, enable);
return retval;
}
@@ -1030,31 +669,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
chained_irq_enter(chip, desc);
bank = irq_get_handler_data(irq);
-#ifdef CONFIG_ARCH_OMAP1
- if (bank->method == METHOD_MPUIO)
- isr_reg = bank->base +
- OMAP_MPUIO_GPIO_INT / bank->stride;
-#endif
-#ifdef CONFIG_ARCH_OMAP15XX
- if (bank->method == METHOD_GPIO_1510)
- isr_reg = bank->base + OMAP1510_GPIO_INT_STATUS;
-#endif
-#if defined(CONFIG_ARCH_OMAP16XX)
- if (bank->method == METHOD_GPIO_1610)
- isr_reg = bank->base + OMAP1610_GPIO_IRQSTATUS1;
-#endif
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
- if (bank->method == METHOD_GPIO_7XX)
- isr_reg = bank->base + OMAP7XX_GPIO_INT_STATUS;
-#endif
-#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
- if (bank->method == METHOD_GPIO_24XX)
- isr_reg = bank->base + OMAP24XX_GPIO_IRQSTATUS1;
-#endif
-#if defined(CONFIG_ARCH_OMAP4)
- if (bank->method == METHOD_GPIO_44XX)
- isr_reg = bank->base + OMAP4_GPIO_IRQSTATUS0;
-#endif
+ isr_reg = bank->base + bank->regs->irqstatus;
if (WARN_ON(!isr_reg))
goto exit;
@@ -1076,9 +691,9 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
/* clear edge sensitive interrupts before handler(s) are
called so that we don't miss any interrupt occurred while
executing them */
- _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 0);
+ _disable_gpio_irqbank(bank, isr_saved & ~level_mask);
_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
- _enable_gpio_irqbank(bank, isr_saved & ~level_mask, 1);
+ _enable_gpio_irqbank(bank, isr_saved & ~level_mask);
/* if there is only edge sensitive GPIO pin interrupts
configured, we could unmask GPIO bank interrupt immediately */
@@ -1094,7 +709,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
gpio_irq = bank->virtual_irq_start;
for (; isr != 0; isr >>= 1, gpio_irq++) {
- gpio_index = get_gpio_index(irq_to_gpio(gpio_irq));
+ gpio_index = GPIO_INDEX(bank, irq_to_gpio(gpio_irq));
if (!(isr & 1))
continue;
@@ -1150,7 +765,7 @@ static void gpio_mask_irq(struct irq_data *d)
spin_lock_irqsave(&bank->lock, flags);
_set_gpio_irqenable(bank, gpio, 0);
- _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
+ _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -1158,13 +773,13 @@ static void gpio_unmask_irq(struct irq_data *d)
{
unsigned int gpio = d->irq - IH_GPIO_BASE;
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
- unsigned int irq_mask = 1 << get_gpio_index(gpio);
+ unsigned int irq_mask = GPIO_BIT(bank, gpio);
u32 trigger = irqd_get_trigger_type(d);
unsigned long flags;
spin_lock_irqsave(&bank->lock, flags);
if (trigger)
- _set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
+ _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
/* For level-triggered GPIOs, the clearing must be done after
* the HW source is cleared, thus after the handler has run */
@@ -1191,45 +806,8 @@ static struct irq_chip gpio_irq_chip = {
#ifdef CONFIG_ARCH_OMAP1
-/* MPUIO uses the always-on 32k clock */
-
-static void mpuio_ack_irq(struct irq_data *d)
-{
- /* The ISR is reset automatically, so do nothing here. */
-}
-
-static void mpuio_mask_irq(struct irq_data *d)
-{
- unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
-
- _set_gpio_irqenable(bank, gpio, 0);
-}
-
-static void mpuio_unmask_irq(struct irq_data *d)
-{
- unsigned int gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
- struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
-
- _set_gpio_irqenable(bank, gpio, 1);
-}
-
-static struct irq_chip mpuio_irq_chip = {
- .name = "MPUIO",
- .irq_ack = mpuio_ack_irq,
- .irq_mask = mpuio_mask_irq,
- .irq_unmask = mpuio_unmask_irq,
- .irq_set_type = gpio_irq_type,
-#ifdef CONFIG_ARCH_OMAP16XX
- /* REVISIT: assuming only 16xx supports MPUIO wake events */
- .irq_set_wake = gpio_wake_enable,
-#endif
-};
-
-
#define bank_is_mpuio(bank) ((bank)->method == METHOD_MPUIO)
-
#ifdef CONFIG_ARCH_OMAP16XX
#include <linux/platform_device.h>
@@ -1289,7 +867,7 @@ static struct platform_device omap_mpuio_device = {
static inline void mpuio_init(void)
{
- struct gpio_bank *bank = get_gpio_bank(OMAP_MPUIO(0));
+ struct gpio_bank *bank = &gpio_bank[0];
platform_set_drvdata(&omap_mpuio_device, bank);
if (platform_driver_register(&omap_mpuio_driver) == 0)
@@ -1302,8 +880,6 @@ static inline void mpuio_init(void) {}
#else
-extern struct irq_chip mpuio_irq_chip;
-
#define bank_is_mpuio(bank) 0
static inline void mpuio_init(void) {}
@@ -1329,31 +905,8 @@ static int gpio_input(struct gpio_chip *chip, unsigned offset)
static int gpio_is_input(struct gpio_bank *bank, int mask)
{
- void __iomem *reg = bank->base;
+ void __iomem *reg = bank->base + bank->regs->direction;
- switch (bank->method) {
- case METHOD_MPUIO:
- reg += OMAP_MPUIO_IO_CNTL / bank->stride;
- break;
- case METHOD_GPIO_1510:
- reg += OMAP1510_GPIO_DIR_CONTROL;
- break;
- case METHOD_GPIO_1610:
- reg += OMAP1610_GPIO_DIRECTION;
- break;
- case METHOD_GPIO_7XX:
- reg += OMAP7XX_GPIO_DIR_CONTROL;
- break;
- case METHOD_GPIO_24XX:
- reg += OMAP24XX_GPIO_OE;
- break;
- case METHOD_GPIO_44XX:
- reg += OMAP4_GPIO_OE;
- break;
- default:
- WARN_ONCE(1, "gpio_is_input: incorrect OMAP GPIO method");
- return -EINVAL;
- }
return __raw_readl(reg) & mask;
}
@@ -1365,9 +918,9 @@ static int gpio_get(struct gpio_chip *chip, unsigned offset)
u32 mask;
gpio = chip->base + offset;
- bank = get_gpio_bank(gpio);
+ bank = container_of(chip, struct gpio_bank, chip);
reg = bank->base;
- mask = 1 << get_gpio_index(gpio);
+ mask = GPIO_BIT(bank, gpio);
if (gpio_is_input(bank, mask))
return _get_gpio_datain(bank, gpio);
@@ -1382,7 +935,7 @@ static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
bank = container_of(chip, struct gpio_bank, chip);
spin_lock_irqsave(&bank->lock, flags);
- _set_gpio_dataout(bank, offset, value);
+ bank->set_dataout(bank, offset, value);
_set_gpio_direction(bank, offset, 0);
spin_unlock_irqrestore(&bank->lock, flags);
return 0;
@@ -1416,7 +969,7 @@ static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
bank = container_of(chip, struct gpio_bank, chip);
spin_lock_irqsave(&bank->lock, flags);
- _set_gpio_dataout(bank, offset, value);
+ bank->set_dataout(bank, offset, value);
spin_unlock_irqrestore(&bank->lock, flags);
}
@@ -1432,19 +985,17 @@ static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
static void __init omap_gpio_show_rev(struct gpio_bank *bank)
{
+ static bool called;
u32 rev;
- if (cpu_is_omap16xx() && !(bank->method != METHOD_MPUIO))
- rev = __raw_readw(bank->base + OMAP1610_GPIO_REVISION);
- else if (cpu_is_omap24xx() || cpu_is_omap34xx())
- rev = __raw_readl(bank->base + OMAP24XX_GPIO_REVISION);
- else if (cpu_is_omap44xx())
- rev = __raw_readl(bank->base + OMAP4_GPIO_REVISION);
- else
+ if (called || bank->regs->revision == USHRT_MAX)
return;
- printk(KERN_INFO "OMAP GPIO hardware version %d.%d\n",
+ rev = __raw_readw(bank->base + bank->regs->revision);
+ pr_info("OMAP GPIO hardware version %d.%d\n",
(rev >> 4) & 0x0f, rev & 0x0f);
+
+ called = true;
}
/* This lock class tells lockdep that GPIO irqs are in a different
@@ -1526,6 +1077,30 @@ static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
}
}
+static __init void
+omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
+ unsigned int num)
+{
+ struct irq_chip_generic *gc;
+ struct irq_chip_type *ct;
+
+ gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
+ handle_simple_irq);
+ ct = gc->chip_types;
+
+ /* NOTE: No ack required, reading IRQ status clears it. */
+ ct->chip.irq_mask = irq_gc_mask_set_bit;
+ ct->chip.irq_unmask = irq_gc_mask_clr_bit;
+ ct->chip.irq_set_type = gpio_irq_type;
+ /* REVISIT: assuming only 16xx supports MPUIO wake events */
+ if (cpu_is_omap16xx())
+ ct->chip.irq_set_wake = gpio_wake_enable,
+
+ ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
+ irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
+ IRQ_NOREQUEST | IRQ_NOPROBE, 0);
+}
+
static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
{
int j;
@@ -1553,22 +1128,23 @@ static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
} else {
bank->chip.label = "gpio";
bank->chip.base = gpio;
- gpio += bank_width;
+ gpio += bank->width;
}
- bank->chip.ngpio = bank_width;
+ bank->chip.ngpio = bank->width;
gpiochip_add(&bank->chip);
for (j = bank->virtual_irq_start;
- j < bank->virtual_irq_start + bank_width; j++) {
+ j < bank->virtual_irq_start + bank->width; j++) {
irq_set_lockdep_class(j, &gpio_lock_class);
irq_set_chip_data(j, bank);
- if (bank_is_mpuio(bank))
- irq_set_chip(j, &mpuio_irq_chip);
- else
+ if (bank_is_mpuio(bank)) {
+ omap_mpuio_alloc_gc(bank, j, bank->width);
+ } else {
irq_set_chip(j, &gpio_irq_chip);
- irq_set_handler(j, handle_simple_irq);
- set_irq_flags(j, IRQF_VALID);
+ irq_set_handler(j, handle_simple_irq);
+ set_irq_flags(j, IRQF_VALID);
+ }
}
irq_set_chained_handler(bank->irq, gpio_irq_handler);
irq_set_handler_data(bank->irq, bank);
@@ -1610,7 +1186,14 @@ static int __devinit omap_gpio_probe(struct platform_device *pdev)
bank->dev = &pdev->dev;
bank->dbck_flag = pdata->dbck_flag;
bank->stride = pdata->bank_stride;
- bank_width = pdata->bank_width;
+ bank->width = pdata->bank_width;
+
+ bank->regs = pdata->regs;
+
+ if (bank->regs->set_dataout && bank->regs->clr_dataout)
+ bank->set_dataout = _set_gpio_dataout_reg;
+ else
+ bank->set_dataout = _set_gpio_dataout_mask;
spin_lock_init(&bank->lock);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/gpio-pca953x.c
index 0451d7ac94a..c43b8ff626a 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1,5 +1,5 @@
/*
- * pca953x.c - 4/8/16 bit I/O ports
+ * PCA953x 4/8/16 bit I/O ports
*
* Copyright (C) 2005 Ben Gardner <bgardner@wabtec.com>
* Copyright (C) 2007 Marvell International Ltd.
@@ -21,7 +21,6 @@
#include <linux/slab.h>
#ifdef CONFIG_OF_GPIO
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#endif
#define PCA953X_INPUT 0
@@ -85,7 +84,6 @@ struct pca953x_chip {
#endif
struct i2c_client *client;
- struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
const char *const *names;
int chip_type;
@@ -437,7 +435,7 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
do {
level = __ffs(pending);
- generic_handle_irq(level + chip->irq_base);
+ handle_nested_irq(level + chip->irq_base);
pending &= ~(1 << level);
} while (pending);
@@ -446,13 +444,13 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
}
static int pca953x_irq_setup(struct pca953x_chip *chip,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id,
+ int irq_base)
{
struct i2c_client *client = chip->client;
- struct pca953x_platform_data *pdata = client->dev.platform_data;
int ret, offset = 0;
- if (pdata->irq_base != -1
+ if (irq_base != -1
&& (id->driver_data & PCA_INT)) {
int lvl;
@@ -474,15 +472,19 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
* this purpose.
*/
chip->irq_stat &= chip->reg_direction;
- chip->irq_base = pdata->irq_base;
mutex_init(&chip->irq_lock);
+ chip->irq_base = irq_alloc_descs(-1, irq_base, chip->gpio_chip.ngpio, -1);
+ if (chip->irq_base < 0)
+ goto out_failed;
+
for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
int irq = lvl + chip->irq_base;
+ irq_clear_status_flags(irq, IRQ_NOREQUEST);
irq_set_chip_data(irq, chip);
- irq_set_chip_and_handler(irq, &pca953x_irq_chip,
- handle_simple_irq);
+ irq_set_chip(irq, &pca953x_irq_chip);
+ irq_set_nested_thread(irq, true);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
#else
@@ -493,8 +495,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
ret = request_threaded_irq(client->irq,
NULL,
pca953x_irq_handler,
- IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
dev_name(&client->dev), chip);
if (ret) {
dev_err(&client->dev, "failed to request irq %d\n",
@@ -514,17 +515,19 @@ out_failed:
static void pca953x_irq_teardown(struct pca953x_chip *chip)
{
- if (chip->irq_base != -1)
+ if (chip->irq_base != -1) {
+ irq_free_descs(chip->irq_base, chip->gpio_chip.ngpio);
free_irq(chip->client->irq, chip);
+ }
}
#else /* CONFIG_GPIO_PCA953X_IRQ */
static int pca953x_irq_setup(struct pca953x_chip *chip,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id,
+ int irq_base)
{
struct i2c_client *client = chip->client;
- struct pca953x_platform_data *pdata = client->dev.platform_data;
- if (pdata->irq_base != -1 && (id->driver_data & PCA_INT))
+ if (irq_base != -1 && (id->driver_data & PCA_INT))
dev_warn(&client->dev, "interrupt support not compiled in\n");
return 0;
@@ -541,46 +544,39 @@ static void pca953x_irq_teardown(struct pca953x_chip *chip)
#ifdef CONFIG_OF_GPIO
/*
* Translate OpenFirmware node properties into platform_data
+ * WARNING: This is DEPRECATED and will be removed eventually!
*/
-static struct pca953x_platform_data *
-pca953x_get_alt_pdata(struct i2c_client *client)
+void
+pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
- struct pca953x_platform_data *pdata;
struct device_node *node;
const __be32 *val;
int size;
node = client->dev.of_node;
if (node == NULL)
- return NULL;
-
- pdata = kzalloc(sizeof(struct pca953x_platform_data), GFP_KERNEL);
- if (pdata == NULL) {
- dev_err(&client->dev, "Unable to allocate platform_data\n");
- return NULL;
- }
+ return;
- pdata->gpio_base = -1;
+ *gpio_base = -1;
val = of_get_property(node, "linux,gpio-base", &size);
+ WARN(val, "%s: device-tree property 'linux,gpio-base' is deprecated!", __func__);
if (val) {
if (size != sizeof(*val))
dev_warn(&client->dev, "%s: wrong linux,gpio-base\n",
node->full_name);
else
- pdata->gpio_base = be32_to_cpup(val);
+ *gpio_base = be32_to_cpup(val);
}
val = of_get_property(node, "polarity", NULL);
+ WARN(val, "%s: device-tree property 'polarity' is deprecated!", __func__);
if (val)
- pdata->invert = *val;
-
- return pdata;
+ *invert = *val;
}
#else
-static struct pca953x_platform_data *
-pca953x_get_alt_pdata(struct i2c_client *client)
+void
+pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
- return NULL;
}
#endif
@@ -642,6 +638,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
{
struct pca953x_platform_data *pdata;
struct pca953x_chip *chip;
+ int irq_base=0, invert=0;
int ret = 0;
chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
@@ -649,26 +646,22 @@ static int __devinit pca953x_probe(struct i2c_client *client,
return -ENOMEM;
pdata = client->dev.platform_data;
- if (pdata == NULL) {
- pdata = pca953x_get_alt_pdata(client);
- /*
- * Unlike normal platform_data, this is allocated
- * dynamically and must be freed in the driver
- */
- chip->dyn_pdata = pdata;
- }
-
- if (pdata == NULL) {
- dev_dbg(&client->dev, "no platform data\n");
- ret = -EINVAL;
- goto out_failed;
+ if (pdata) {
+ irq_base = pdata->irq_base;
+ chip->gpio_start = pdata->gpio_base;
+ invert = pdata->invert;
+ chip->names = pdata->names;
+ } else {
+ pca953x_get_alt_pdata(client, &chip->gpio_start, &invert);
+#ifdef CONFIG_OF_GPIO
+ /* If I2C node has no interrupts property, disable GPIO interrupts */
+ if (of_find_property(client->dev.of_node, "interrupts", NULL) == NULL)
+ irq_base = -1;
+#endif
}
chip->client = client;
- chip->gpio_start = pdata->gpio_base;
-
- chip->names = pdata->names;
chip->chip_type = id->driver_data & (PCA953X_TYPE | PCA957X_TYPE);
mutex_init(&chip->i2c_lock);
@@ -679,13 +672,13 @@ static int __devinit pca953x_probe(struct i2c_client *client,
pca953x_setup_gpio(chip, id->driver_data & PCA_GPIO_MASK);
if (chip->chip_type == PCA953X_TYPE)
- device_pca953x_init(chip, pdata->invert);
+ device_pca953x_init(chip, invert);
else if (chip->chip_type == PCA957X_TYPE)
- device_pca957x_init(chip, pdata->invert);
+ device_pca957x_init(chip, invert);
else
goto out_failed;
- ret = pca953x_irq_setup(chip, id);
+ ret = pca953x_irq_setup(chip, id, irq_base);
if (ret)
goto out_failed;
@@ -693,7 +686,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
if (ret)
goto out_failed_irq;
- if (pdata->setup) {
+ if (pdata && pdata->setup) {
ret = pdata->setup(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0)
@@ -706,7 +699,6 @@ static int __devinit pca953x_probe(struct i2c_client *client,
out_failed_irq:
pca953x_irq_teardown(chip);
out_failed:
- kfree(chip->dyn_pdata);
kfree(chip);
return ret;
}
@@ -717,7 +709,7 @@ static int pca953x_remove(struct i2c_client *client)
struct pca953x_chip *chip = i2c_get_clientdata(client);
int ret = 0;
- if (pdata->teardown) {
+ if (pdata && pdata->teardown) {
ret = pdata->teardown(client, chip->gpio_chip.base,
chip->gpio_chip.ngpio, pdata->context);
if (ret < 0) {
@@ -735,7 +727,6 @@ static int pca953x_remove(struct i2c_client *client)
}
pca953x_irq_teardown(chip);
- kfree(chip->dyn_pdata);
kfree(chip);
return 0;
}
diff --git a/drivers/gpio/pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 879b473aab5..7369fdda92b 100644
--- a/drivers/gpio/pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -1,5 +1,5 @@
/*
- * pcf857x - driver for pcf857x, pca857x, and pca967x I2C GPIO expanders
+ * Driver for pcf857x, pca857x, and pca967x I2C GPIO expanders
*
* Copyright (C) 2007 David Brownell
*
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/gpio-pch.c
index 36919e77c49..36919e77c49 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/gpio-pch.c
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/gpio-pl061.c
index 6fcb28cdd86..2c5a18f32bf 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -1,7 +1,5 @@
/*
- * linux/drivers/gpio/pl061.c
- *
- * Copyright (C) 2008, 2009 Provigent Ltd.
+ * Copyright (C) 2008, 2009 Provigent Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/gpio/gpio-plat-samsung.c b/drivers/gpio/gpio-plat-samsung.c
index ea37c046178..ef67f1952a7 100644
--- a/drivers/gpio/gpio-plat-samsung.c
+++ b/drivers/gpio/gpio-plat-samsung.c
@@ -1,5 +1,4 @@
-/* arch/arm/plat-samsung/gpiolib.c
- *
+/*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/gpio-rdc321x.c
index 2762698e020..2762698e020 100644
--- a/drivers/gpio/rdc321x-gpio.c
+++ b/drivers/gpio/gpio-rdc321x.c
diff --git a/drivers/gpio/gpio-s5pc100.c b/drivers/gpio/gpio-s5pc100.c
index 2842394b28b..7f87b0c76e0 100644
--- a/drivers/gpio/gpio-s5pc100.c
+++ b/drivers/gpio/gpio-s5pc100.c
@@ -1,4 +1,5 @@
-/* linux/arch/arm/mach-s5pc100/gpiolib.c
+/*
+ * S5PC100 - GPIOlib support
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -6,8 +7,6 @@
* Copyright 2009 Samsung Electronics Co
* Kyungmin Park <kyungmin.park@samsung.com>
*
- * S5PC100 - GPIOlib support
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/gpio/gpio-s5pv210.c b/drivers/gpio/gpio-s5pv210.c
index 1ba20a703e0..eb12f1602de 100644
--- a/drivers/gpio/gpio-s5pv210.c
+++ b/drivers/gpio/gpio-s5pv210.c
@@ -1,10 +1,9 @@
-/* linux/arch/arm/mach-s5pv210/gpiolib.c
+/*
+ * S5PV210 - GPIOlib support
*
* Copyright (c) 2010 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
- * S5PV210 - GPIOlib support
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
diff --git a/drivers/gpio/sch_gpio.c b/drivers/gpio/gpio-sch.c
index 56060421cdf..16351584549 100644
--- a/drivers/gpio/sch_gpio.c
+++ b/drivers/gpio/gpio-sch.c
@@ -1,5 +1,5 @@
/*
- * sch_gpio.c - GPIO interface for Intel Poulsbo SCH
+ * GPIO interface for Intel Poulsbo SCH
*
* Copyright (c) 2010 CompuLab Ltd
* Author: Denis Turischev <denis@compulab.co.il>
diff --git a/drivers/gpio/stmpe-gpio.c b/drivers/gpio/gpio-stmpe.c
index 4c980b57332..4c980b57332 100644
--- a/drivers/gpio/stmpe-gpio.c
+++ b/drivers/gpio/gpio-stmpe.c
diff --git a/drivers/gpio/sx150x.c b/drivers/gpio/gpio-sx150x.c
index a4f73534394..a4f73534394 100644
--- a/drivers/gpio/sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
diff --git a/drivers/gpio/tc3589x-gpio.c b/drivers/gpio/gpio-tc3589x.c
index 2a82e8999a4..2a82e8999a4 100644
--- a/drivers/gpio/tc3589x-gpio.c
+++ b/drivers/gpio/gpio-tc3589x.c
diff --git a/arch/arm/mach-tegra/gpio.c b/drivers/gpio/gpio-tegra.c
index 919d6383773..747eb40e8af 100644
--- a/arch/arm/mach-tegra/gpio.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -23,6 +23,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/of.h>
#include <asm/mach/irq.h>
@@ -340,6 +341,15 @@ static int __init tegra_gpio_init(void)
}
}
+#ifdef CONFIG_OF_GPIO
+ /*
+ * This isn't ideal, but it gets things hooked up until this
+ * driver is converted into a platform_device
+ */
+ tegra_gpio_chip.of_node = of_find_compatible_node(NULL, NULL,
+ "nvidia,tegra20-gpio");
+#endif /* CONFIG_OF_GPIO */
+
gpiochip_add(&tegra_gpio_chip);
for (i = INT_GPIO_BASE; i < (INT_GPIO_BASE + TEGRA_NR_GPIOS); i++) {
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/gpio-timberdale.c
index 0265872e57d..c593bd46bfb 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -1,5 +1,5 @@
/*
- * timbgpio.c timberdale FPGA GPIO driver
+ * Timberdale FPGA GPIO driver
* Copyright (c) 2009 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/gpio-tps65910.c
index 15097ca616d..b9c1c297669 100644
--- a/drivers/gpio/tps65910-gpio.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -1,5 +1,5 @@
/*
- * tps65910-gpio.c -- TI TPS6591x
+ * TI TPS6591x GPIO driver
*
* Copyright 2010 Texas Instruments Inc.
*
diff --git a/drivers/gpio/twl4030-gpio.c b/drivers/gpio/gpio-twl4030.c
index 57635ac35a7..b8b4f228757 100644
--- a/drivers/gpio/twl4030-gpio.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -1,5 +1,5 @@
/*
- * twl4030_gpio.c -- access to GPIOs on TWL4030/TPS659x0 chips
+ * Access to GPIOs on TWL4030/TPS659x0 chips
*
* Copyright (C) 2006-2007 Texas Instruments, Inc.
* Copyright (C) 2006 MontaVista Software, Inc.
diff --git a/drivers/gpio/gpio-u300.c b/drivers/gpio/gpio-u300.c
index d92790140fe..fd2dfeeefdf 100644
--- a/drivers/gpio/gpio-u300.c
+++ b/drivers/gpio/gpio-u300.c
@@ -1,11 +1,8 @@
/*
- *
- * arch/arm/mach-u300/gpio.c
- *
+ * U300 GPIO module.
*
* Copyright (C) 2007-2009 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
- * U300 GPIO module.
* This can driver either of the two basic GPIO cores
* available in the U300 platforms:
* COH 901 335 - Used in DB3150 (U300 1.0) and DB3200 (U330 1.0)
diff --git a/drivers/gpio/ucb1400_gpio.c b/drivers/gpio/gpio-ucb1400.c
index 50e6bd1392c..50e6bd1392c 100644
--- a/drivers/gpio/ucb1400_gpio.c
+++ b/drivers/gpio/gpio-ucb1400.c
diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/gpio-vr41xx.c
index a365be040b3..98723cb9ac6 100644
--- a/drivers/gpio/vr41xx_giu.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -518,7 +518,7 @@ static int __devinit giu_probe(struct platform_device *pdev)
if (!res)
return -EBUSY;
- giu_base = ioremap(res->start, res->end - res->start + 1);
+ giu_base = ioremap(res->start, resource_size(res));
if (!giu_base)
return -ENOMEM;
diff --git a/drivers/gpio/vx855_gpio.c b/drivers/gpio/gpio-vx855.c
index ef5aabd8b8b..ef5aabd8b8b 100644
--- a/drivers/gpio/vx855_gpio.c
+++ b/drivers/gpio/gpio-vx855.c
diff --git a/drivers/gpio/wm831x-gpio.c b/drivers/gpio/gpio-wm831x.c
index 2bcfb0be09f..deb949e75ec 100644
--- a/drivers/gpio/wm831x-gpio.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -1,5 +1,5 @@
/*
- * wm831x-gpio.c -- gpiolib support for Wolfson WM831x PMICs
+ * gpiolib support for Wolfson WM831x PMICs
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
diff --git a/drivers/gpio/wm8350-gpiolib.c b/drivers/gpio/gpio-wm8350.c
index 359999290f5..a06af515483 100644
--- a/drivers/gpio/wm8350-gpiolib.c
+++ b/drivers/gpio/gpio-wm8350.c
@@ -1,5 +1,5 @@
/*
- * wm835x-gpiolib.c -- gpiolib support for Wolfson WM835x PMICs
+ * gpiolib support for Wolfson WM835x PMICs
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
diff --git a/drivers/gpio/wm8994-gpio.c b/drivers/gpio/gpio-wm8994.c
index c822baacd8f..96198f3fab7 100644
--- a/drivers/gpio/wm8994-gpio.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -1,5 +1,5 @@
/*
- * wm8994-gpio.c -- gpiolib support for Wolfson WM8994
+ * gpiolib support for Wolfson WM8994
*
* Copyright 2009 Wolfson Microelectronics PLC.
*
diff --git a/drivers/gpio/xilinx_gpio.c b/drivers/gpio/gpio-xilinx.c
index 846fbd5e31b..846fbd5e31b 100644
--- a/drivers/gpio/xilinx_gpio.c
+++ b/drivers/gpio/gpio-xilinx.c
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f245c588ae9..ce7914c4c04 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -262,6 +262,7 @@ enum intel_pch {
};
#define QUIRK_PIPEA_FORCE (1<<0)
+#define QUIRK_LVDS_SSC_DISABLE (1<<1)
struct intel_fbdev;
@@ -1194,7 +1195,9 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode);
/* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5c0d1247f45..a087e1bf0c2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1374,25 +1374,24 @@ i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
}
static uint32_t
-i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
- struct drm_device *dev = obj->base.dev;
- uint32_t size;
+ uint32_t gtt_size;
if (INTEL_INFO(dev)->gen >= 4 ||
- obj->tiling_mode == I915_TILING_NONE)
- return obj->base.size;
+ tiling_mode == I915_TILING_NONE)
+ return size;
/* Previous chips need a power-of-two fence region when tiling */
if (INTEL_INFO(dev)->gen == 3)
- size = 1024*1024;
+ gtt_size = 1024*1024;
else
- size = 512*1024;
+ gtt_size = 512*1024;
- while (size < obj->base.size)
- size <<= 1;
+ while (gtt_size < size)
+ gtt_size <<= 1;
- return size;
+ return gtt_size;
}
/**
@@ -1403,59 +1402,52 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
* potential fence register mapping.
*/
static uint32_t
-i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode)
{
- struct drm_device *dev = obj->base.dev;
-
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
if (INTEL_INFO(dev)->gen >= 4 ||
- obj->tiling_mode == I915_TILING_NONE)
+ tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- return i915_gem_get_gtt_size(obj);
+ return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
/**
* i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
* unfenced object
- * @obj: object to check
+ * @dev: the device
+ * @size: size of the object
+ * @tiling_mode: tiling mode of the object
*
* Return the required GTT alignment for an object, only taking into account
* unfenced tiled surface requirements.
*/
uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
+ uint32_t size,
+ int tiling_mode)
{
- struct drm_device *dev = obj->base.dev;
- int tile_height;
-
/*
* Minimum alignment is 4k (GTT page size) for sane hw.
*/
if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
- obj->tiling_mode == I915_TILING_NONE)
+ tiling_mode == I915_TILING_NONE)
return 4096;
- /*
- * Older chips need unfenced tiled buffers to be aligned to the left
- * edge of an even tile row (where tile rows are counted as if the bo is
- * placed in a fenced gtt region).
+ /* Previous hardware however needs to be aligned to a power-of-two
+ * tile height. The simplest method for determining this is to reuse
+ * the power-of-tile object size.
*/
- if (IS_GEN2(dev))
- tile_height = 16;
- else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
- tile_height = 32;
- else
- tile_height = 8;
-
- return tile_height * obj->stride * 2;
+ return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
int
@@ -2744,9 +2736,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
return -EINVAL;
}
- fence_size = i915_gem_get_gtt_size(obj);
- fence_alignment = i915_gem_get_gtt_alignment(obj);
- unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+ fence_size = i915_gem_get_gtt_size(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ unfenced_alignment =
+ i915_gem_get_unfenced_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode);
if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment :
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 82d70fd9e93..99c4faa59d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -348,7 +348,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
- i915_gem_get_unfenced_gtt_alignment(obj);
+ i915_gem_get_unfenced_gtt_alignment(dev,
+ obj->base.size,
+ args->tiling_mode);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 21b6f93fe91..0f1c799afea 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4305,7 +4305,8 @@ static void intel_update_watermarks(struct drm_device *dev)
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
- return dev_priv->lvds_use_ssc && i915_panel_use_ssc;
+ return dev_priv->lvds_use_ssc && i915_panel_use_ssc
+ && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
}
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
@@ -7810,6 +7811,15 @@ static void quirk_pipea_force (struct drm_device *dev)
DRM_DEBUG_DRIVER("applying pipe a force quirk\n");
}
+/*
+ * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
+ */
+static void quirk_ssc_force_disable(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -7838,6 +7848,9 @@ struct intel_quirk intel_quirks[] = {
/* 855 & before need to leave pipe A & dpll A up */
{ 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
{ 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
+
+ /* Lenovo U160 cannot use SSC on LVDS */
+ { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
};
static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 36ca465c00c..306b15f39c9 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -172,6 +172,20 @@ config HID_EZKEY
---help---
Support for Ezkey BTC 8193 keyboard.
+config HID_HOLTEK
+ tristate "Holtek On Line Grip based game controller support"
+ depends on USB_HID
+ ---help---
+ Say Y here if you have a Holtek On Line Grip based game controller.
+
+config HOLTEK_FF
+ bool "Holtek On Line Grip force feedback support"
+ depends on HID_HOLTEK
+ select INPUT_FF_MEMLESS
+ ---help---
+ Say Y here if you have a Holtek On Line Grip based game controller
+ and want to have force feedback support for it.
+
config HID_KEYTOUCH
tristate "Keytouch HID devices"
depends on USB_HID
@@ -322,6 +336,7 @@ config HID_MULTITOUCH
- Stantum multitouch panels
- Touch International Panels
- Unitec Panels
+ - XAT optical touch panels
If unsure, say N.
@@ -435,6 +450,7 @@ config HID_QUANTA
config HID_ROCCAT
tristate "Roccat special event support"
depends on USB_HID
+ select HID_ROCCAT_COMMON
---help---
Support for Roccat special events.
Say Y here if you have a Roccat mouse or keyboard and want OSD or
@@ -442,44 +458,40 @@ config HID_ROCCAT
config HID_ROCCAT_COMMON
tristate
+ depends on HID_ROCCAT
config HID_ROCCAT_ARVO
tristate "Roccat Arvo keyboard support"
depends on USB_HID
- select HID_ROCCAT
- select HID_ROCCAT_COMMON
+ depends on HID_ROCCAT
---help---
Support for Roccat Arvo keyboard.
config HID_ROCCAT_KONE
tristate "Roccat Kone Mouse support"
depends on USB_HID
- select HID_ROCCAT
- select HID_ROCCAT_COMMON
+ depends on HID_ROCCAT
---help---
Support for Roccat Kone mouse.
config HID_ROCCAT_KONEPLUS
tristate "Roccat Kone[+] mouse support"
depends on USB_HID
- select HID_ROCCAT
- select HID_ROCCAT_COMMON
+ depends on HID_ROCCAT
---help---
Support for Roccat Kone[+] mouse.
config HID_ROCCAT_KOVAPLUS
tristate "Roccat Kova[+] mouse support"
depends on USB_HID
- select HID_ROCCAT
- select HID_ROCCAT_COMMON
+ depends on HID_ROCCAT
---help---
Support for Roccat Kova[+] mouse.
config HID_ROCCAT_PYRA
tristate "Roccat Pyra mouse support"
depends on USB_HID
- select HID_ROCCAT
- select HID_ROCCAT_COMMON
+ depends on HID_ROCCAT
---help---
Support for Roccat Pyra mouse.
@@ -495,6 +507,12 @@ config HID_SONY
---help---
Support for Sony PS3 controller.
+config HID_SPEEDLINK
+ tristate "Speedlink VAD Cezanne mouse support"
+ depends on USB_HID
+ ---help---
+ Support for Speedlink Vicious and Divine Cezanne mouse.
+
config HID_SUNPLUS
tristate "Sunplus wireless desktop"
depends on USB_HID
@@ -568,6 +586,12 @@ config HID_WACOM_POWER_SUPPLY
Say Y here if you want to enable power supply status monitoring for
Wacom Bluetooth devices.
+config HID_WIIMOTE
+ tristate "Nintendo Wii Remote support"
+ depends on BT_HIDP
+ ---help---
+ Support for the Nintendo Wii Remote bluetooth device.
+
config HID_ZEROPLUS
tristate "Zeroplus based game controller support"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index f8cc4ea7335..0a0a38e9fd2 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_HID_EMS_FF) += hid-emsff.o
obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
+obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_HID_ROCCAT_PYRA) += hid-roccat-pyra.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
+obj-$(CONFIG_HID_SPEEDLINK) += hid-speedlink.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
@@ -73,6 +75,7 @@ obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
obj-$(CONFIG_HID_WACOM) += hid-wacom.o
obj-$(CONFIG_HID_WALTOP) += hid-waltop.o
+obj-$(CONFIG_HID_WIIMOTE) += hid-wiimote.o
obj-$(CONFIG_USB_HID) += usbhid/
obj-$(CONFIG_USB_MOUSE) += usbhid/
diff --git a/drivers/hid/hid-axff.c b/drivers/hid/hid-axff.c
index b4554288de0..121514149e0 100644
--- a/drivers/hid/hid-axff.c
+++ b/drivers/hid/hid-axff.c
@@ -154,6 +154,7 @@ static int ax_probe(struct hid_device *hdev, const struct hid_device_id *id)
error = hid_hw_open(hdev);
if (error) {
dev_err(&hdev->dev, "hw open failed\n");
+ hid_hw_stop(hdev);
return error;
}
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6f3289a5788..1a5cf0c9cfc 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1388,6 +1388,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IRTOUCHSYSTEMS, USB_DEVICE_ID_IRTOUCH_INFRARED_USB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
@@ -1426,10 +1427,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LUMIO, USB_DEVICE_ID_CRYSTALTOUCH_DUAL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
@@ -1491,6 +1494,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0709) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UNITEC, USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
@@ -1499,11 +1503,14 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ }
};
@@ -1771,7 +1778,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GTCO, USB_DEVICE_ID_GTCO_1007) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IMATION, USB_DEVICE_ID_DISC_STAKKA) },
- { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_YUREX) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KBGEAR, USB_DEVICE_ID_KBGEAR_JAMSTUDIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KWORLD, USB_DEVICE_ID_KWORLD_RADIO_FM700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_GPEN_560) },
@@ -1912,6 +1918,11 @@ static bool hid_ignore(struct hid_device *hdev)
hdev->product <= USB_DEVICE_ID_HANWANG_TABLET_LAST)
return true;
break;
+ case USB_VENDOR_ID_JESS:
+ if (hdev->product == USB_DEVICE_ID_JESS_YUREX &&
+ hdev->type == HID_TYPE_USBNONE)
+ return true;
+ break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index 81877c67cae..a5dc13fe367 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -126,7 +126,12 @@ static int ems_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err;
}
- emsff_init(hdev);
+ ret = emsff_init(hdev);
+ if (ret) {
+ dev_err(&hdev->dev, "force feedback init failed\n");
+ hid_hw_stop(hdev);
+ goto err;
+ }
return 0;
err:
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
new file mode 100644
index 00000000000..91e3a032112
--- /dev/null
+++ b/drivers/hid/hid-holtekff.c
@@ -0,0 +1,240 @@
+/*
+ * Force feedback support for Holtek On Line Grip based gamepads
+ *
+ * These include at least a Brazilian "Clone Joypad Super Power Fire"
+ * which uses vendor ID 0x1241 and identifies as "HOLTEK On Line Grip".
+ *
+ * Copyright (c) 2011 Anssi Hannula <anssi.hannula@iki.fi>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+
+#ifdef CONFIG_HOLTEK_FF
+#include "usbhid/usbhid.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("Force feedback support for Holtek On Line Grip based devices");
+
+/*
+ * These commands and parameters are currently known:
+ *
+ * byte 0: command id:
+ * 01 set effect parameters
+ * 02 play specified effect
+ * 03 stop specified effect
+ * 04 stop all effects
+ * 06 stop all effects
+ * (the difference between 04 and 06 isn't known; win driver
+ * sends 06,04 on application init, and 06 otherwise)
+ *
+ * Commands 01 and 02 need to be sent as pairs, i.e. you need to send 01
+ * before each 02.
+ *
+ * The rest of the bytes are parameters. Command 01 takes all of them, and
+ * commands 02,03 take only the effect id.
+ *
+ * byte 1:
+ * bits 0-3: effect id:
+ * 1: very strong rumble
+ * 2: periodic rumble, short intervals
+ * 3: very strong rumble
+ * 4: periodic rumble, long intervals
+ * 5: weak periodic rumble, long intervals
+ * 6: weak periodic rumble, short intervals
+ * 7: periodic rumble, short intervals
+ * 8: strong periodic rumble, short intervals
+ * 9: very strong rumble
+ * a: causes an error
+ * b: very strong periodic rumble, very short intervals
+ * c-f: nothing
+ * bit 6: right (weak) motor enabled
+ * bit 7: left (strong) motor enabled
+ *
+ * bytes 2-3: time in milliseconds, big-endian
+ * bytes 5-6: unknown (win driver seems to use at least 10e0 with effect 1
+ * and 0014 with effect 6)
+ * byte 7:
+ * bits 0-3: effect magnitude
+ */
+
+#define HOLTEKFF_MSG_LENGTH 7
+
+static const u8 start_effect_1[] = { 0x02, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 stop_all4[] = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+static const u8 stop_all6[] = { 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+struct holtekff_device {
+ struct hid_field *field;
+};
+
+static void holtekff_send(struct holtekff_device *holtekff,
+ struct hid_device *hid,
+ const u8 data[HOLTEKFF_MSG_LENGTH])
+{
+ int i;
+
+ for (i = 0; i < HOLTEKFF_MSG_LENGTH; i++) {
+ holtekff->field->value[i] = data[i];
+ }
+
+ dbg_hid("sending %02x %02x %02x %02x %02x %02x %02x\n", data[0],
+ data[1], data[2], data[3], data[4], data[5], data[6]);
+
+ usbhid_submit_report(hid, holtekff->field->report, USB_DIR_OUT);
+}
+
+static int holtekff_play(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct holtekff_device *holtekff = data;
+ int left, right;
+ /* effect type 1, length 65535 msec */
+ u8 buf[HOLTEKFF_MSG_LENGTH] =
+ { 0x01, 0x01, 0xff, 0xff, 0x10, 0xe0, 0x00 };
+
+ left = effect->u.rumble.strong_magnitude;
+ right = effect->u.rumble.weak_magnitude;
+ dbg_hid("called with 0x%04x 0x%04x\n", left, right);
+
+ if (!left && !right) {
+ holtekff_send(holtekff, hid, stop_all6);
+ return 0;
+ }
+
+ if (left)
+ buf[1] |= 0x80;
+ if (right)
+ buf[1] |= 0x40;
+
+ /* The device takes a single magnitude, so we just sum them up. */
+ buf[6] = min(0xf, (left >> 12) + (right >> 12));
+
+ holtekff_send(holtekff, hid, buf);
+ holtekff_send(holtekff, hid, start_effect_1);
+
+ return 0;
+}
+
+static int holtekff_init(struct hid_device *hid)
+{
+ struct holtekff_device *holtekff;
+ struct hid_report *report;
+ struct hid_input *hidinput = list_entry(hid->inputs.next,
+ struct hid_input, list);
+ struct list_head *report_list =
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ struct input_dev *dev = hidinput->input;
+ int error;
+
+ if (list_empty(report_list)) {
+ hid_err(hid, "no output report found\n");
+ return -ENODEV;
+ }
+
+ report = list_entry(report_list->next, struct hid_report, list);
+
+ if (report->maxfield < 1 || report->field[0]->report_count != 7) {
+ hid_err(hid, "unexpected output report layout\n");
+ return -ENODEV;
+ }
+
+ holtekff = kzalloc(sizeof(*holtekff), GFP_KERNEL);
+ if (!holtekff)
+ return -ENOMEM;
+
+ set_bit(FF_RUMBLE, dev->ffbit);
+
+ holtekff->field = report->field[0];
+
+ /* initialize the same way as win driver does */
+ holtekff_send(holtekff, hid, stop_all4);
+ holtekff_send(holtekff, hid, stop_all6);
+
+ error = input_ff_create_memless(dev, holtekff, holtekff_play);
+ if (error) {
+ kfree(holtekff);
+ return error;
+ }
+
+ hid_info(hid, "Force feedback for Holtek On Line Grip based devices by Anssi Hannula <anssi.hannula@iki.fi>\n");
+
+ return 0;
+}
+#else
+static inline int holtekff_init(struct hid_device *hid)
+{
+ return 0;
+}
+#endif
+
+static int holtek_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "parse failed\n");
+ goto err;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (ret) {
+ hid_err(hdev, "hw start failed\n");
+ goto err;
+ }
+
+ holtekff_init(hdev);
+
+ return 0;
+err:
+ return ret;
+}
+
+static const struct hid_device_id holtek_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, holtek_devices);
+
+static struct hid_driver holtek_driver = {
+ .name = "holtek",
+ .id_table = holtek_devices,
+ .probe = holtek_probe,
+};
+
+static int __init holtek_init(void)
+{
+ return hid_register_driver(&holtek_driver);
+}
+
+static void __exit holtek_exit(void)
+{
+ hid_unregister_driver(&holtek_driver);
+}
+
+module_init(holtek_init);
+module_exit(holtek_exit);
+
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index a756ee6c7df..db63ccf21cc 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -350,6 +350,9 @@
#define USB_VENDOR_ID_ILITEK 0x222a
#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
+#define USB_VENDOR_ID_HOLTEK 0x1241
+#define USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP 0x5015
+
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -472,6 +475,8 @@
#define USB_DEVICE_ID_MS_LK6K 0x00f9
#define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
+#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
+#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -495,6 +500,9 @@
#define USB_VENDOR_ID_NEXTWINDOW 0x1926
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
+#define USB_VENDOR_ID_NINTENDO 0x057e
+#define USB_DEVICE_ID_NINTENDO_WIIMOTE 0x0306
+
#define USB_VENDOR_ID_NTRIG 0x1b96
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN 0x0001
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1 0x0003
@@ -630,6 +638,7 @@
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004
#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005
+#define USB_DEVICE_ID_UCLOGIC_TABLET_WP1062 0x0064
#define USB_VENDOR_ID_UNITEC 0x227d
#define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709
@@ -663,6 +672,12 @@
#define USB_VENDOR_ID_WISEGROUP_LTD2 0x6677
#define USB_DEVICE_ID_SMARTJOY_DUAL_PLUS 0x8802
+#define USB_VENDOR_ID_X_TENSIONS 0x1ae7
+#define USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE 0x9001
+
+#define USB_VENDOR_ID_XAT 0x2505
+#define USB_DEVICE_ID_XAT_CSR 0x0220
+
#define USB_VENDOR_ID_YEALINK 0x6993
#define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index 21f205f0925..a7f916e8fc3 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -41,6 +41,66 @@
#define LG_FF3 0x1000
#define LG_FF4 0x2000
+/* Size of the original descriptor of the Driving Force Pro wheel */
+#define DFP_RDESC_ORIG_SIZE 97
+
+/* Fixed report descriptor for Logitech Driving Force Pro wheel controller
+ *
+ * The original descriptor hides the separate throttle and brake axes in
+ * a custom vendor usage page, providing only a combined value as
+ * GenericDesktop.Y.
+ * This descriptor removes the combined Y axis and instead reports
+ * separate throttle (Y) and brake (RZ).
+ */
+static __u8 dfp_rdesc_fixed[] = {
+0x05, 0x01, /* Usage Page (Desktop), */
+0x09, 0x04, /* Usage (Joystik), */
+0xA1, 0x01, /* Collection (Application), */
+0xA1, 0x02, /* Collection (Logical), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x0E, /* Report Size (14), */
+0x14, /* Logical Minimum (0), */
+0x26, 0xFF, 0x3F, /* Logical Maximum (16383), */
+0x34, /* Physical Minimum (0), */
+0x46, 0xFF, 0x3F, /* Physical Maximum (16383), */
+0x09, 0x30, /* Usage (X), */
+0x81, 0x02, /* Input (Variable), */
+0x95, 0x0E, /* Report Count (14), */
+0x75, 0x01, /* Report Size (1), */
+0x25, 0x01, /* Logical Maximum (1), */
+0x45, 0x01, /* Physical Maximum (1), */
+0x05, 0x09, /* Usage Page (Button), */
+0x19, 0x01, /* Usage Minimum (01h), */
+0x29, 0x0E, /* Usage Maximum (0Eh), */
+0x81, 0x02, /* Input (Variable), */
+0x05, 0x01, /* Usage Page (Desktop), */
+0x95, 0x01, /* Report Count (1), */
+0x75, 0x04, /* Report Size (4), */
+0x25, 0x07, /* Logical Maximum (7), */
+0x46, 0x3B, 0x01, /* Physical Maximum (315), */
+0x65, 0x14, /* Unit (Degrees), */
+0x09, 0x39, /* Usage (Hat Switch), */
+0x81, 0x42, /* Input (Variable, Nullstate), */
+0x65, 0x00, /* Unit, */
+0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+0x46, 0xFF, 0x00, /* Physical Maximum (255), */
+0x75, 0x08, /* Report Size (8), */
+0x81, 0x01, /* Input (Constant), */
+0x09, 0x31, /* Usage (Y), */
+0x81, 0x02, /* Input (Variable), */
+0x09, 0x35, /* Usage (Rz), */
+0x81, 0x02, /* Input (Variable), */
+0x81, 0x01, /* Input (Constant), */
+0xC0, /* End Collection, */
+0xA1, 0x02, /* Collection (Logical), */
+0x09, 0x02, /* Usage (02h), */
+0x95, 0x07, /* Report Count (7), */
+0x91, 0x02, /* Output (Variable), */
+0xC0, /* End Collection, */
+0xC0 /* End Collection */
+};
+
+
/*
* Certain Logitech keyboards send in report #3 keys which are far
* above the logical maximum described in descriptor. This extends
@@ -74,6 +134,18 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[47] = 0x95;
rdesc[48] = 0x0B;
}
+
+ switch (hdev->product) {
+ case USB_DEVICE_ID_LOGITECH_DFP_WHEEL:
+ if (*rsize == DFP_RDESC_ORIG_SIZE) {
+ hid_info(hdev,
+ "fixing up Logitech Driving Force Pro report descriptor\n");
+ rdesc = dfp_rdesc_fixed;
+ *rsize = sizeof(dfp_rdesc_fixed);
+ }
+ break;
+ }
+
return rdesc;
}
@@ -380,7 +452,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL),
.driver_data = LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFP_WHEEL),
- .driver_data = LG_FF },
+ .driver_data = LG_NOGET | LG_FF },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL),
.driver_data = LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ),
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 0f6fc54dc19..e5c699b6c6f 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -23,11 +23,12 @@
#include "hid-ids.h"
-#define MS_HIDINPUT 0x01
-#define MS_ERGONOMY 0x02
-#define MS_PRESENTER 0x04
-#define MS_RDESC 0x08
-#define MS_NOGET 0x10
+#define MS_HIDINPUT 0x01
+#define MS_ERGONOMY 0x02
+#define MS_PRESENTER 0x04
+#define MS_RDESC 0x08
+#define MS_NOGET 0x10
+#define MS_DUPLICATE_USAGES 0x20
/*
* Microsoft Wireless Desktop Receiver (Model 1028) has
@@ -109,6 +110,18 @@ static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
+static int ms_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+
+ if (quirks & MS_DUPLICATE_USAGES)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
static int ms_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
@@ -179,8 +192,12 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_ERGONOMY | MS_RDESC },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
.driver_data = MS_PRESENTER },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K),
+ .driver_data = MS_ERGONOMY },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0),
.driver_data = MS_NOGET },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
+ .driver_data = MS_DUPLICATE_USAGES },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
@@ -193,6 +210,7 @@ static struct hid_driver ms_driver = {
.id_table = ms_devices,
.report_fixup = ms_report_fixup,
.input_mapping = ms_input_mapping,
+ .input_mapped = ms_input_mapped,
.event = ms_event,
.probe = ms_probe,
};
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 62cac4dc3b6..58d0e7aaf08 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -727,6 +727,10 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_DEFAULT,
HID_USB_DEVICE(USB_VENDOR_ID_UNITEC,
USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
+ /* XAT */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XAT,
+ USB_DEVICE_ID_XAT_CSR) },
{ }
};
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index ab19f2905d2..158b389d0fb 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -44,8 +44,6 @@ struct pk_device {
struct pcmidi_snd *pm; /* pcmidi device context */
};
-struct pcmidi_snd;
-
struct pcmidi_sustain {
unsigned long in_use;
struct pcmidi_snd *pm;
@@ -242,7 +240,7 @@ drop_note:
return;
}
-void pcmidi_sustained_note_release(unsigned long data)
+static void pcmidi_sustained_note_release(unsigned long data)
{
struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
@@ -250,7 +248,7 @@ void pcmidi_sustained_note_release(unsigned long data)
pms->in_use = 0;
}
-void init_sustain_timers(struct pcmidi_snd *pm)
+static void init_sustain_timers(struct pcmidi_snd *pm)
{
struct pcmidi_sustain *pms;
unsigned i;
@@ -264,7 +262,7 @@ void init_sustain_timers(struct pcmidi_snd *pm)
}
}
-void stop_sustain_timers(struct pcmidi_snd *pm)
+static void stop_sustain_timers(struct pcmidi_snd *pm)
{
struct pcmidi_sustain *pms;
unsigned i;
@@ -499,7 +497,7 @@ static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data)
return 1;
}
-int pcmidi_handle_report(
+static int pcmidi_handle_report(
struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size)
{
int ret = 0;
@@ -518,7 +516,8 @@ int pcmidi_handle_report(
return ret;
}
-void pcmidi_setup_extra_keys(struct pcmidi_snd *pm, struct input_dev *input)
+static void pcmidi_setup_extra_keys(
+ struct pcmidi_snd *pm, struct input_dev *input)
{
/* reassigned functionality for N/A keys
MY PICTURES => KEY_WORDPROCESSOR
@@ -602,7 +601,7 @@ static struct snd_rawmidi_ops pcmidi_in_ops = {
.trigger = pcmidi_in_trigger
};
-int pcmidi_snd_initialise(struct pcmidi_snd *pm)
+static int pcmidi_snd_initialise(struct pcmidi_snd *pm)
{
static int dev;
struct snd_card *card;
@@ -720,7 +719,7 @@ fail:
return err;
}
-int pcmidi_snd_terminate(struct pcmidi_snd *pm)
+static int pcmidi_snd_terminate(struct pcmidi_snd *pm)
{
if (pm->card) {
stop_sustain_timers(pm);
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index 2307471d96d..093bfad00b0 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -39,7 +39,7 @@ static ssize_t arvo_sysfs_show_mode_key(struct device *dev,
int retval;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_MODE_KEY,
+ retval = roccat_common_receive(usb_dev, ARVO_COMMAND_MODE_KEY,
&temp_buf, sizeof(struct arvo_mode_key));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -67,7 +67,7 @@ static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
temp_buf.state = state;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_MODE_KEY,
+ retval = roccat_common_send(usb_dev, ARVO_COMMAND_MODE_KEY,
&temp_buf, sizeof(struct arvo_mode_key));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -87,7 +87,7 @@ static ssize_t arvo_sysfs_show_key_mask(struct device *dev,
int retval;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_KEY_MASK,
+ retval = roccat_common_receive(usb_dev, ARVO_COMMAND_KEY_MASK,
&temp_buf, sizeof(struct arvo_key_mask));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -115,7 +115,7 @@ static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
temp_buf.key_mask = key_mask;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_KEY_MASK,
+ retval = roccat_common_send(usb_dev, ARVO_COMMAND_KEY_MASK,
&temp_buf, sizeof(struct arvo_key_mask));
mutex_unlock(&arvo->arvo_lock);
if (retval)
@@ -130,7 +130,7 @@ static int arvo_get_actual_profile(struct usb_device *usb_dev)
struct arvo_actual_profile temp_buf;
int retval;
- retval = roccat_common_receive(usb_dev, ARVO_USB_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common_receive(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
&temp_buf, sizeof(struct arvo_actual_profile));
if (retval)
@@ -163,11 +163,14 @@ static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
if (retval)
return retval;
+ if (profile < 1 || profile > 5)
+ return -EINVAL;
+
temp_buf.command = ARVO_COMMAND_ACTUAL_PROFILE;
temp_buf.actual_profile = profile;
mutex_lock(&arvo->arvo_lock);
- retval = roccat_common_send(usb_dev, ARVO_USB_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common_send(usb_dev, ARVO_COMMAND_ACTUAL_PROFILE,
&temp_buf, sizeof(struct arvo_actual_profile));
if (!retval) {
arvo->actual_profile = profile;
@@ -225,7 +228,7 @@ static ssize_t arvo_sysfs_write_button(struct file *fp,
loff_t off, size_t count)
{
return arvo_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct arvo_button), ARVO_USB_COMMAND_BUTTON);
+ sizeof(struct arvo_button), ARVO_COMMAND_BUTTON);
}
static ssize_t arvo_sysfs_read_info(struct file *fp,
@@ -233,7 +236,7 @@ static ssize_t arvo_sysfs_read_info(struct file *fp,
loff_t off, size_t count)
{
return arvo_sysfs_read(fp, kobj, buf, off, count,
- sizeof(struct arvo_info), ARVO_USB_COMMAND_INFO);
+ sizeof(struct arvo_info), ARVO_COMMAND_INFO);
}
@@ -399,7 +402,7 @@ static int arvo_raw_event(struct hid_device *hdev,
if (size != 3)
return 0;
- if (arvo->roccat_claimed)
+ if (arvo && arvo->roccat_claimed)
arvo_report_to_chrdev(arvo, data);
return 0;
diff --git a/drivers/hid/hid-roccat-arvo.h b/drivers/hid/hid-roccat-arvo.h
index d284a781c99..ce8415e4f00 100644
--- a/drivers/hid/hid-roccat-arvo.h
+++ b/drivers/hid/hid-roccat-arvo.h
@@ -46,19 +46,6 @@ enum arvo_commands {
ARVO_COMMAND_ACTUAL_PROFILE = 0x7,
};
-enum arvo_usb_commands {
- ARVO_USB_COMMAND_MODE_KEY = 0x303,
- /*
- * read/write
- * Read uses both index bytes as profile/key indexes
- * Write has index 0, profile/key is determined by payload
- */
- ARVO_USB_COMMAND_BUTTON = 0x304,
- ARVO_USB_COMMAND_INFO = 0x305,
- ARVO_USB_COMMAND_KEY_MASK = 0x306,
- ARVO_USB_COMMAND_ACTUAL_PROFILE = 0x307,
-};
-
struct arvo_special_report {
uint8_t unknown1; /* always 0x01 */
uint8_t event;
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index 13b1eb0c8c6..edf898dee28 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -11,10 +11,16 @@
* any later version.
*/
+#include <linux/hid.h>
#include <linux/slab.h>
#include "hid-roccat-common.h"
-int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
+static inline uint16_t roccat_common_feature_report(uint8_t report_id)
+{
+ return 0x300 | report_id;
+}
+
+int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size)
{
char *buf;
@@ -25,9 +31,10 @@ int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
return -ENOMEM;
len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
- USB_REQ_CLEAR_FEATURE,
+ HID_REQ_GET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
- usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+ roccat_common_feature_report(report_id),
+ 0, buf, size, USB_CTRL_SET_TIMEOUT);
memcpy(data, buf, size);
kfree(buf);
@@ -35,7 +42,7 @@ int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
}
EXPORT_SYMBOL_GPL(roccat_common_receive);
-int roccat_common_send(struct usb_device *usb_dev, uint usb_command,
+int roccat_common_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size)
{
char *buf;
@@ -48,9 +55,10 @@ int roccat_common_send(struct usb_device *usb_dev, uint usb_command,
memcpy(buf, data, size);
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
- USB_REQ_SET_CONFIGURATION,
+ HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
- usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+ roccat_common_feature_report(report_id),
+ 0, buf, size, USB_CTRL_SET_TIMEOUT);
kfree(buf);
return ((len < 0) ? len : ((len != size) ? -EIO : 0));
diff --git a/drivers/hid/hid-roccat-common.h b/drivers/hid/hid-roccat-common.h
index fe45fae05bb..9a5bc61f969 100644
--- a/drivers/hid/hid-roccat-common.h
+++ b/drivers/hid/hid-roccat-common.h
@@ -15,9 +15,9 @@
#include <linux/usb.h>
#include <linux/types.h>
-int roccat_common_receive(struct usb_device *usb_dev, uint usb_command,
+int roccat_common_receive(struct usb_device *usb_dev, uint report_id,
void *data, uint size);
-int roccat_common_send(struct usb_device *usb_dev, uint usb_command,
+int roccat_common_send(struct usb_device *usb_dev, uint report_id,
void const *data, uint size);
#endif
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index a57838d1526..2b8f3a31ffb 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -37,6 +37,47 @@
static uint profile_numbers[5] = {0, 1, 2, 3, 4};
+static int kone_receive(struct usb_device *usb_dev, uint usb_command,
+ void *data, uint size)
+{
+ char *buf;
+ int len;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len = usb_control_msg(usb_dev, usb_rcvctrlpipe(usb_dev, 0),
+ HID_REQ_GET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN,
+ usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+ memcpy(data, buf, size);
+ kfree(buf);
+ return ((len < 0) ? len : ((len != size) ? -EIO : 0));
+}
+
+static int kone_send(struct usb_device *usb_dev, uint usb_command,
+ void const *data, uint size)
+{
+ char *buf;
+ int len;
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ memcpy(buf, data, size);
+
+ len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
+ HID_REQ_SET_REPORT,
+ USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
+ usb_command, 0, buf, size, USB_CTRL_SET_TIMEOUT);
+
+ kfree(buf);
+ return ((len < 0) ? len : ((len != size) ? -EIO : 0));
+}
+
/* kone_class is used for creating sysfs attributes via roccat char device */
static struct class *kone_class;
@@ -68,7 +109,7 @@ static int kone_check_write(struct usb_device *usb_dev)
*/
msleep(80);
- retval = roccat_common_receive(usb_dev,
+ retval = kone_receive(usb_dev,
kone_command_confirm_write, &data, 1);
if (retval)
return retval;
@@ -96,7 +137,7 @@ static int kone_check_write(struct usb_device *usb_dev)
static int kone_get_settings(struct usb_device *usb_dev,
struct kone_settings *buf)
{
- return roccat_common_receive(usb_dev, kone_command_settings, buf,
+ return kone_receive(usb_dev, kone_command_settings, buf,
sizeof(struct kone_settings));
}
@@ -109,7 +150,7 @@ static int kone_set_settings(struct usb_device *usb_dev,
struct kone_settings const *settings)
{
int retval;
- retval = roccat_common_send(usb_dev, kone_command_settings,
+ retval = kone_send(usb_dev, kone_command_settings,
settings, sizeof(struct kone_settings));
if (retval)
return retval;
@@ -182,7 +223,7 @@ static int kone_get_weight(struct usb_device *usb_dev, int *result)
int retval;
uint8_t data;
- retval = roccat_common_receive(usb_dev, kone_command_weight, &data, 1);
+ retval = kone_receive(usb_dev, kone_command_weight, &data, 1);
if (retval)
return retval;
@@ -201,7 +242,7 @@ static int kone_get_firmware_version(struct usb_device *usb_dev, int *result)
int retval;
uint16_t data;
- retval = roccat_common_receive(usb_dev, kone_command_firmware_version,
+ retval = kone_receive(usb_dev, kone_command_firmware_version,
&data, 2);
if (retval)
return retval;
@@ -384,7 +425,7 @@ static int kone_tcu_command(struct usb_device *usb_dev, int number)
{
unsigned char value;
value = number;
- return roccat_common_send(usb_dev, kone_command_calibrate, &value, 1);
+ return kone_send(usb_dev, kone_command_calibrate, &value, 1);
}
/*
@@ -791,6 +832,9 @@ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
if (size != sizeof(struct kone_mouse_event))
return 0;
+ if (kone == NULL)
+ return 0;
+
/*
* Firmware 1.38 introduced new behaviour for tilt and special buttons.
* Pressed button is reported in each movement event.
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index 4109a028e13..64abb5b8a59 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -166,7 +166,7 @@ enum kone_mouse_events {
/* osd events are thought to be display on screen */
kone_mouse_event_osd_dpi = 0xa0,
kone_mouse_event_osd_profile = 0xb0,
- /* TODO clarify meaning and occurrence of kone_mouse_event_calibration */
+ /* TODO clarify meaning and occurence of kone_mouse_event_calibration */
kone_mouse_event_calibration = 0xc0,
kone_mouse_event_call_overlong_macro = 0xe0,
/* switch events notify if user changed values with mousebutton click */
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 5b640a7a15a..59e47770fa1 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -50,7 +50,7 @@ static int koneplus_send_control(struct usb_device *usb_dev, uint value,
control.value = value;
control.request = request;
- return roccat_common_send(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
+ return roccat_common_send(usb_dev, KONEPLUS_COMMAND_CONTROL,
&control, sizeof(struct koneplus_control));
}
@@ -60,7 +60,7 @@ static int koneplus_receive_control_status(struct usb_device *usb_dev)
struct koneplus_control control;
do {
- retval = roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_CONTROL,
+ retval = roccat_common_receive(usb_dev, KONEPLUS_COMMAND_CONTROL,
&control, sizeof(struct koneplus_control));
/* check if we get a completely wrong answer */
@@ -120,7 +120,7 @@ static int koneplus_select_profile(struct usb_device *usb_dev, uint number,
static int koneplus_get_info(struct usb_device *usb_dev,
struct koneplus_info *buf)
{
- return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_INFO,
+ return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_INFO,
buf, sizeof(struct koneplus_info));
}
@@ -134,14 +134,14 @@ static int koneplus_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct koneplus_profile_settings));
}
static int koneplus_set_profile_settings(struct usb_device *usb_dev,
struct koneplus_profile_settings const *settings)
{
- return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ return koneplus_send(usb_dev, KONEPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct koneplus_profile_settings));
}
@@ -155,14 +155,14 @@ static int koneplus_get_profile_buttons(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ return roccat_common_receive(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct koneplus_profile_buttons));
}
static int koneplus_set_profile_buttons(struct usb_device *usb_dev,
struct koneplus_profile_buttons const *buttons)
{
- return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ return koneplus_send(usb_dev, KONEPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct koneplus_profile_buttons));
}
@@ -172,7 +172,7 @@ static int koneplus_get_actual_profile(struct usb_device *usb_dev)
struct koneplus_actual_profile buf;
int retval;
- retval = roccat_common_receive(usb_dev, KONEPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common_receive(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct koneplus_actual_profile));
return retval ? retval : buf.actual_profile;
@@ -187,7 +187,7 @@ static int koneplus_set_actual_profile(struct usb_device *usb_dev,
buf.size = sizeof(struct koneplus_actual_profile);
buf.actual_profile = new_profile;
- return koneplus_send(usb_dev, KONEPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ return koneplus_send(usb_dev, KONEPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct koneplus_actual_profile));
}
@@ -240,12 +240,20 @@ static ssize_t koneplus_sysfs_write(struct file *fp, struct kobject *kobj,
return real_size;
}
+static ssize_t koneplus_sysfs_write_talk(struct file *fp,
+ struct kobject *kobj, struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ return koneplus_sysfs_write(fp, kobj, buf, off, count,
+ sizeof(struct koneplus_talk), KONEPLUS_COMMAND_TALK);
+}
+
static ssize_t koneplus_sysfs_write_macro(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_macro), KONEPLUS_USB_COMMAND_MACRO);
+ sizeof(struct koneplus_macro), KONEPLUS_COMMAND_MACRO);
}
static ssize_t koneplus_sysfs_read_sensor(struct file *fp,
@@ -253,7 +261,7 @@ static ssize_t koneplus_sysfs_read_sensor(struct file *fp,
loff_t off, size_t count)
{
return koneplus_sysfs_read(fp, kobj, buf, off, count,
- sizeof(struct koneplus_sensor), KONEPLUS_USB_COMMAND_SENSOR);
+ sizeof(struct koneplus_sensor), KONEPLUS_COMMAND_SENSOR);
}
static ssize_t koneplus_sysfs_write_sensor(struct file *fp,
@@ -261,7 +269,7 @@ static ssize_t koneplus_sysfs_write_sensor(struct file *fp,
loff_t off, size_t count)
{
return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_sensor), KONEPLUS_USB_COMMAND_SENSOR);
+ sizeof(struct koneplus_sensor), KONEPLUS_COMMAND_SENSOR);
}
static ssize_t koneplus_sysfs_write_tcu(struct file *fp,
@@ -269,7 +277,7 @@ static ssize_t koneplus_sysfs_write_tcu(struct file *fp,
loff_t off, size_t count)
{
return koneplus_sysfs_write(fp, kobj, buf, off, count,
- sizeof(struct koneplus_tcu), KONEPLUS_USB_COMMAND_TCU);
+ sizeof(struct koneplus_tcu), KONEPLUS_COMMAND_TCU);
}
static ssize_t koneplus_sysfs_read_tcu_image(struct file *fp,
@@ -277,7 +285,7 @@ static ssize_t koneplus_sysfs_read_tcu_image(struct file *fp,
loff_t off, size_t count)
{
return koneplus_sysfs_read(fp, kobj, buf, off, count,
- sizeof(struct koneplus_tcu_image), KONEPLUS_USB_COMMAND_TCU);
+ sizeof(struct koneplus_tcu_image), KONEPLUS_COMMAND_TCU);
}
static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
@@ -423,6 +431,9 @@ static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev,
if (retval)
return retval;
+ if (profile > 4)
+ return -EINVAL;
+
mutex_lock(&koneplus->koneplus_lock);
retval = koneplus_set_actual_profile(usb_dev, profile);
@@ -431,7 +442,7 @@ static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev,
return retval;
}
- koneplus->actual_profile = profile;
+ koneplus_profile_activated(koneplus, profile);
roccat_report.type = KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_PROFILE;
roccat_report.data1 = profile + 1;
@@ -557,6 +568,11 @@ static struct bin_attribute koneplus_bin_attributes[] = {
.size = sizeof(struct koneplus_macro),
.write = koneplus_sysfs_write_macro
},
+ {
+ .attr = { .name = "talk", .mode = 0220 },
+ .size = sizeof(struct koneplus_talk),
+ .write = koneplus_sysfs_write_talk
+ },
__ATTR_NULL
};
@@ -738,6 +754,9 @@ static int koneplus_raw_event(struct hid_device *hdev,
!= USB_INTERFACE_PROTOCOL_MOUSE)
return 0;
+ if (koneplus == NULL)
+ return 0;
+
koneplus_keep_values_up_to_date(koneplus, data);
if (koneplus->roccat_claimed)
diff --git a/drivers/hid/hid-roccat-koneplus.h b/drivers/hid/hid-roccat-koneplus.h
index c57a376ab8a..c03332a4fa9 100644
--- a/drivers/hid/hid-roccat-koneplus.h
+++ b/drivers/hid/hid-roccat-koneplus.h
@@ -14,6 +14,12 @@
#include <linux/types.h>
+struct koneplus_talk {
+ uint8_t command; /* KONEPLUS_COMMAND_TALK */
+ uint8_t size; /* always 0x10 */
+ uint8_t data[14];
+} __packed;
+
/*
* case 1: writes request 80 and reads value 1
*
@@ -137,26 +143,14 @@ enum koneplus_commands {
KONEPLUS_COMMAND_PROFILE_BUTTONS = 0x7,
KONEPLUS_COMMAND_MACRO = 0x8,
KONEPLUS_COMMAND_INFO = 0x9,
+ KONEPLUS_COMMAND_TCU = 0xc,
KONEPLUS_COMMAND_E = 0xe,
KONEPLUS_COMMAND_SENSOR = 0xf,
+ KONEPLUS_COMMAND_TALK = 0x10,
KONEPLUS_COMMAND_FIRMWARE_WRITE = 0x1b,
KONEPLUS_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
};
-enum koneplus_usb_commands {
- KONEPLUS_USB_COMMAND_CONTROL = 0x304,
- KONEPLUS_USB_COMMAND_ACTUAL_PROFILE = 0x305,
- KONEPLUS_USB_COMMAND_PROFILE_SETTINGS = 0x306,
- KONEPLUS_USB_COMMAND_PROFILE_BUTTONS = 0x307,
- KONEPLUS_USB_COMMAND_MACRO = 0x308,
- KONEPLUS_USB_COMMAND_INFO = 0x309,
- KONEPLUS_USB_COMMAND_TCU = 0x30c,
- KONEPLUS_USB_COMMAND_E = 0x30e,
- KONEPLUS_USB_COMMAND_SENSOR = 0x30f,
- KONEPLUS_USB_COMMAND_FIRMWARE_WRITE = 0x31b,
- KONEPLUS_USB_COMMAND_FIRMWARE_WRITE_CONTROL = 0x31c,
-};
-
enum koneplus_mouse_report_numbers {
KONEPLUS_MOUSE_REPORT_NUMBER_HID = 1,
KONEPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2,
@@ -193,6 +187,7 @@ enum koneplus_mouse_report_button_types {
* data2 = action
*/
KONEPLUS_MOUSE_REPORT_BUTTON_TYPE_MULTIMEDIA = 0xf0,
+ KONEPLUS_MOUSE_REPORT_TALK = 0xff,
};
enum koneplus_mouse_report_button_action {
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index 984be2f8967..1f8336e3f58 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -58,7 +58,7 @@ static int kovaplus_send_control(struct usb_device *usb_dev, uint value,
control.value = value;
control.request = request;
- retval = roccat_common_send(usb_dev, KOVAPLUS_USB_COMMAND_CONTROL,
+ retval = roccat_common_send(usb_dev, KOVAPLUS_COMMAND_CONTROL,
&control, sizeof(struct kovaplus_control));
return retval;
@@ -70,7 +70,7 @@ static int kovaplus_receive_control_status(struct usb_device *usb_dev)
struct kovaplus_control control;
do {
- retval = roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_CONTROL,
+ retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_CONTROL,
&control, sizeof(struct kovaplus_control));
/* check if we get a completely wrong answer */
@@ -90,7 +90,7 @@ static int kovaplus_receive_control_status(struct usb_device *usb_dev)
if (control.value == KOVAPLUS_CONTROL_REQUEST_STATUS_OVERLOAD)
return -EINVAL;
- hid_err(usb_dev, "kovaplus_receive_control_status: "
+ hid_err(usb_dev, "roccat_common_receive_control_status: "
"unknown response value 0x%x\n", control.value);
return -EINVAL;
} while (1);
@@ -119,7 +119,7 @@ static int kovaplus_select_profile(struct usb_device *usb_dev, uint number,
static int kovaplus_get_info(struct usb_device *usb_dev,
struct kovaplus_info *buf)
{
- return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_INFO,
+ return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_INFO,
buf, sizeof(struct kovaplus_info));
}
@@ -133,14 +133,14 @@ static int kovaplus_get_profile_settings(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct kovaplus_profile_settings));
}
static int kovaplus_set_profile_settings(struct usb_device *usb_dev,
struct kovaplus_profile_settings const *settings)
{
- return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS,
+ return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_SETTINGS,
settings, sizeof(struct kovaplus_profile_settings));
}
@@ -154,14 +154,14 @@ static int kovaplus_get_profile_buttons(struct usb_device *usb_dev,
if (retval)
return retval;
- return roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ return roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct kovaplus_profile_buttons));
}
static int kovaplus_set_profile_buttons(struct usb_device *usb_dev,
struct kovaplus_profile_buttons const *buttons)
{
- return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS,
+ return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_PROFILE_BUTTONS,
buttons, sizeof(struct kovaplus_profile_buttons));
}
@@ -171,7 +171,7 @@ static int kovaplus_get_actual_profile(struct usb_device *usb_dev)
struct kovaplus_actual_profile buf;
int retval;
- retval = roccat_common_receive(usb_dev, KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ retval = roccat_common_receive(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
return retval ? retval : buf.actual_profile;
@@ -186,7 +186,7 @@ static int kovaplus_set_actual_profile(struct usb_device *usb_dev,
buf.size = sizeof(struct kovaplus_actual_profile);
buf.actual_profile = new_profile;
- return kovaplus_send(usb_dev, KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE,
+ return kovaplus_send(usb_dev, KOVAPLUS_COMMAND_ACTUAL_PROFILE,
&buf, sizeof(struct kovaplus_actual_profile));
}
@@ -337,7 +337,7 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
mutex_lock(&kovaplus->kovaplus_lock);
retval = kovaplus_set_actual_profile(usb_dev, profile);
- kovaplus->actual_profile = profile;
+ kovaplus_profile_activated(kovaplus, profile);
mutex_unlock(&kovaplus->kovaplus_lock);
if (retval)
return retval;
@@ -662,6 +662,9 @@ static int kovaplus_raw_event(struct hid_device *hdev,
!= USB_INTERFACE_PROTOCOL_MOUSE)
return 0;
+ if (kovaplus == NULL)
+ return 0;
+
kovaplus_keep_values_up_to_date(kovaplus, data);
if (kovaplus->roccat_claimed)
diff --git a/drivers/hid/hid-roccat-kovaplus.h b/drivers/hid/hid-roccat-kovaplus.h
index ce40607d21c..fb2aed44a8e 100644
--- a/drivers/hid/hid-roccat-kovaplus.h
+++ b/drivers/hid/hid-roccat-kovaplus.h
@@ -83,15 +83,6 @@ enum kovaplus_commands {
KOVAPLUS_COMMAND_A = 0xa,
};
-enum kovaplus_usb_commands {
- KOVAPLUS_USB_COMMAND_CONTROL = 0x304,
- KOVAPLUS_USB_COMMAND_ACTUAL_PROFILE = 0x305,
- KOVAPLUS_USB_COMMAND_PROFILE_SETTINGS = 0x306,
- KOVAPLUS_USB_COMMAND_PROFILE_BUTTONS = 0x307,
- KOVAPLUS_USB_COMMAND_INFO = 0x309,
- KOVAPLUS_USB_COMMAND_A = 0x30a,
-};
-
enum kovaplus_mouse_report_numbers {
KOVAPLUS_MOUSE_REPORT_NUMBER_MOUSE = 1,
KOVAPLUS_MOUSE_REPORT_NUMBER_AUDIO = 2,
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 38280c055a1..8140776bd8c 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -53,7 +53,7 @@ static int pyra_send_control(struct usb_device *usb_dev, int value,
control.value = value;
control.request = request;
- return roccat_common_send(usb_dev, PYRA_USB_COMMAND_CONTROL,
+ return roccat_common_send(usb_dev, PYRA_COMMAND_CONTROL,
&control, sizeof(struct pyra_control));
}
@@ -64,7 +64,7 @@ static int pyra_receive_control_status(struct usb_device *usb_dev)
do {
msleep(10);
- retval = roccat_common_receive(usb_dev, PYRA_USB_COMMAND_CONTROL,
+ retval = roccat_common_receive(usb_dev, PYRA_COMMAND_CONTROL,
&control, sizeof(struct pyra_control));
/* requested too early, try again */
@@ -89,7 +89,7 @@ static int pyra_get_profile_settings(struct usb_device *usb_dev,
PYRA_CONTROL_REQUEST_PROFILE_SETTINGS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_PROFILE_SETTINGS,
+ return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS,
buf, sizeof(struct pyra_profile_settings));
}
@@ -101,20 +101,20 @@ static int pyra_get_profile_buttons(struct usb_device *usb_dev,
PYRA_CONTROL_REQUEST_PROFILE_BUTTONS);
if (retval)
return retval;
- return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_PROFILE_BUTTONS,
+ return roccat_common_receive(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS,
buf, sizeof(struct pyra_profile_buttons));
}
static int pyra_get_settings(struct usb_device *usb_dev,
struct pyra_settings *buf)
{
- return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_SETTINGS,
+ return roccat_common_receive(usb_dev, PYRA_COMMAND_SETTINGS,
buf, sizeof(struct pyra_settings));
}
static int pyra_get_info(struct usb_device *usb_dev, struct pyra_info *buf)
{
- return roccat_common_receive(usb_dev, PYRA_USB_COMMAND_INFO,
+ return roccat_common_receive(usb_dev, PYRA_COMMAND_INFO,
buf, sizeof(struct pyra_info));
}
@@ -131,26 +131,22 @@ static int pyra_send(struct usb_device *usb_dev, uint command,
static int pyra_set_profile_settings(struct usb_device *usb_dev,
struct pyra_profile_settings const *settings)
{
- return pyra_send(usb_dev, PYRA_USB_COMMAND_PROFILE_SETTINGS, settings,
+ return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_SETTINGS, settings,
sizeof(struct pyra_profile_settings));
}
static int pyra_set_profile_buttons(struct usb_device *usb_dev,
struct pyra_profile_buttons const *buttons)
{
- return pyra_send(usb_dev, PYRA_USB_COMMAND_PROFILE_BUTTONS, buttons,
+ return pyra_send(usb_dev, PYRA_COMMAND_PROFILE_BUTTONS, buttons,
sizeof(struct pyra_profile_buttons));
}
static int pyra_set_settings(struct usb_device *usb_dev,
struct pyra_settings const *settings)
{
- int retval;
- retval = roccat_common_send(usb_dev, PYRA_USB_COMMAND_SETTINGS, settings,
+ return pyra_send(usb_dev, PYRA_COMMAND_SETTINGS, settings,
sizeof(struct pyra_settings));
- if (retval)
- return retval;
- return pyra_receive_control_status(usb_dev);
}
static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
@@ -641,6 +637,9 @@ static int pyra_raw_event(struct hid_device *hdev, struct hid_report *report,
!= USB_INTERFACE_PROTOCOL_MOUSE)
return 0;
+ if (pyra == NULL)
+ return 0;
+
pyra_keep_values_up_to_date(pyra, data);
if (pyra->roccat_claimed)
diff --git a/drivers/hid/hid-roccat-pyra.h b/drivers/hid/hid-roccat-pyra.h
index 14cbbe1621e..0442d7fa2dc 100644
--- a/drivers/hid/hid-roccat-pyra.h
+++ b/drivers/hid/hid-roccat-pyra.h
@@ -83,15 +83,6 @@ enum pyra_commands {
PYRA_COMMAND_B = 0xb
};
-enum pyra_usb_commands {
- PYRA_USB_COMMAND_CONTROL = 0x304,
- PYRA_USB_COMMAND_SETTINGS = 0x305,
- PYRA_USB_COMMAND_PROFILE_SETTINGS = 0x306,
- PYRA_USB_COMMAND_PROFILE_BUTTONS = 0x307,
- PYRA_USB_COMMAND_INFO = 0x309,
- PYRA_USB_COMMAND_B = 0x30b /* writes 3 bytes */
-};
-
enum pyra_mouse_report_numbers {
PYRA_MOUSE_REPORT_NUMBER_HID = 1,
PYRA_MOUSE_REPORT_NUMBER_AUDIO = 2,
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 936c911fdca..5cd25bd907f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -28,6 +28,12 @@
#define SIXAXIS_CONTROLLER_USB (1 << 1)
#define SIXAXIS_CONTROLLER_BT (1 << 2)
+static const u8 sixaxis_rdesc_fixup[] = {
+ 0x95, 0x13, 0x09, 0x01, 0x81, 0x02, 0x95, 0x0C,
+ 0x81, 0x01, 0x75, 0x10, 0x95, 0x04, 0x26, 0xFF,
+ 0x03, 0x46, 0xFF, 0x03, 0x09, 0x01, 0x81, 0x02
+};
+
struct sony_sc {
unsigned long quirks;
};
@@ -43,9 +49,37 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev, "Fixing up Sony Vaio VGX report descriptor\n");
rdesc[55] = 0x06;
}
+
+ /* The HID descriptor exposed over BT has a trailing zero byte */
+ if ((((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize == 148) ||
+ ((sc->quirks & SIXAXIS_CONTROLLER_BT) && *rsize == 149)) &&
+ rdesc[83] == 0x75) {
+ hid_info(hdev, "Fixing up Sony Sixaxis report descriptor\n");
+ memcpy((void *)&rdesc[83], (void *)&sixaxis_rdesc_fixup,
+ sizeof(sixaxis_rdesc_fixup));
+ }
return rdesc;
}
+static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
+ __u8 *rd, int size)
+{
+ struct sony_sc *sc = hid_get_drvdata(hdev);
+
+ /* Sixaxis HID report has acclerometers/gyro with MSByte first, this
+ * has to be BYTE_SWAPPED before passing up to joystick interface
+ */
+ if ((sc->quirks & (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)) &&
+ rd[0] == 0x01 && size == 49) {
+ swap(rd[41], rd[42]);
+ swap(rd[43], rd[44]);
+ swap(rd[45], rd[46]);
+ swap(rd[47], rd[48]);
+ }
+
+ return 0;
+}
+
/*
* The Sony Sixaxis does not handle HID Output Reports on the Interrupt EP
* like it should according to usbhid/hid-core.c::usbhid_output_raw_report()
@@ -194,6 +228,7 @@ static struct hid_driver sony_driver = {
.probe = sony_probe,
.remove = sony_remove,
.report_fixup = sony_report_fixup,
+ .raw_event = sony_raw_event
};
static int __init sony_init(void)
diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
new file mode 100644
index 00000000000..60201374171
--- /dev/null
+++ b/drivers/hid/hid-speedlink.c
@@ -0,0 +1,89 @@
+/*
+ * HID driver for Speedlink Vicious and Divine Cezanne (USB mouse).
+ * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
+ * the HID descriptor.
+ *
+ * Copyright (c) 2011 Stefan Kriwanek <mail@stefankriwanek.de>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "hid-ids.h"
+#include "usbhid/usbhid.h"
+
+static const struct hid_device_id speedlink_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE)},
+ { }
+};
+
+static int speedlink_input_mapping(struct hid_device *hdev,
+ struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ /*
+ * The Cezanne mouse has a second "keyboard" USB endpoint for it is
+ * able to map keyboard events to the button presses.
+ * It sends a standard keyboard report descriptor, though, whose
+ * LEDs we ignore.
+ */
+ switch (usage->hid & HID_USAGE_PAGE) {
+ case HID_UP_LED:
+ return -1;
+ }
+ return 0;
+}
+
+static int speedlink_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ /* No other conditions due to usage_table. */
+ /* Fix "jumpy" cursor (invalid events sent by device). */
+ if (value == 256)
+ return 1;
+ /* Drop useless distance 0 events (on button clicks etc.) as well */
+ if (value == 0)
+ return 1;
+
+ return 0;
+}
+
+MODULE_DEVICE_TABLE(hid, speedlink_devices);
+
+static const struct hid_usage_id speedlink_grabbed_usages[] = {
+ { HID_GD_X, EV_REL, 0 },
+ { HID_GD_Y, EV_REL, 1 },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver speedlink_driver = {
+ .name = "speedlink",
+ .id_table = speedlink_devices,
+ .usage_table = speedlink_grabbed_usages,
+ .input_mapping = speedlink_input_mapping,
+ .event = speedlink_event,
+};
+
+static int __init speedlink_init(void)
+{
+ return hid_register_driver(&speedlink_driver);
+}
+
+static void __exit speedlink_exit(void)
+{
+ hid_unregister_driver(&speedlink_driver);
+}
+
+module_init(speedlink_init);
+module_exit(speedlink_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 05fdc85a76e..e15732f1a22 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -343,6 +343,193 @@ static __u8 wp8060u_rdesc_fixed[] = {
};
/*
+ * Original WP1062 report descriptor.
+ *
+ * Only report ID 9 is actually used.
+ *
+ * Usage Page (Digitizer), ; Digitizer (0Dh)
+ * Usage (Pen), ; Pen (02h, application collection)
+ * Collection (Application),
+ * Report ID (7),
+ * Usage (Stylus), ; Stylus (20h, logical collection)
+ * Collection (Physical),
+ * Usage (Tip Switch), ; Tip switch (42h, momentary control)
+ * Usage (Barrel Switch), ; Barrel switch (44h, momentary control)
+ * Usage (Eraser), ; Eraser (45h, momentary control)
+ * Logical Minimum (0),
+ * Logical Maximum (1),
+ * Report Size (1),
+ * Report Count (3),
+ * Input (Variable),
+ * Report Count (3),
+ * Input (Constant, Variable),
+ * Usage (In Range), ; In range (32h, momentary control)
+ * Report Count (1),
+ * Input (Variable),
+ * Report Count (1),
+ * Input (Constant, Variable),
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (X), ; X (30h, dynamic value)
+ * Report Size (16),
+ * Report Count (1),
+ * Push,
+ * Unit Exponent (13),
+ * Unit (Inch),
+ * Physical Minimum (0),
+ * Physical Maximum (10000),
+ * Logical Maximum (20000),
+ * Input (Variable),
+ * Usage (Y), ; Y (31h, dynamic value)
+ * Physical Maximum (6583),
+ * Logical Maximum (13166),
+ * Input (Variable),
+ * Pop,
+ * Usage Page (Digitizer), ; Digitizer (0Dh)
+ * Usage (Tip Pressure), ; Tip pressure (30h, dynamic value)
+ * Logical Maximum (1023),
+ * Input (Variable),
+ * Report Size (16),
+ * End Collection,
+ * End Collection,
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (Mouse), ; Mouse (02h, application collection)
+ * Collection (Application),
+ * Report ID (8),
+ * Usage (Pointer), ; Pointer (01h, physical collection)
+ * Collection (Physical),
+ * Usage Page (Button), ; Button (09h)
+ * Usage Minimum (01h),
+ * Usage Maximum (03h),
+ * Logical Minimum (0),
+ * Logical Maximum (1),
+ * Report Count (3),
+ * Report Size (1),
+ * Input (Variable),
+ * Report Count (5),
+ * Input (Constant),
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (X), ; X (30h, dynamic value)
+ * Usage (Y), ; Y (31h, dynamic value)
+ * Usage (Wheel), ; Wheel (38h, dynamic value)
+ * Usage (00h),
+ * Logical Minimum (-127),
+ * Logical Maximum (127),
+ * Report Size (8),
+ * Report Count (4),
+ * Input (Variable, Relative),
+ * End Collection,
+ * End Collection,
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (Mouse), ; Mouse (02h, application collection)
+ * Collection (Application),
+ * Report ID (9),
+ * Usage (Pointer), ; Pointer (01h, physical collection)
+ * Collection (Physical),
+ * Usage Page (Button), ; Button (09h)
+ * Usage Minimum (01h),
+ * Usage Maximum (03h),
+ * Logical Minimum (0),
+ * Logical Maximum (1),
+ * Report Count (3),
+ * Report Size (1),
+ * Input (Variable),
+ * Report Count (4),
+ * Input (Constant),
+ * Usage Page (Digitizer), ; Digitizer (0Dh)
+ * Usage (In Range), ; In range (32h, momentary control)
+ * Report Count (1),
+ * Input (Variable),
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (X), ; X (30h, dynamic value)
+ * Report Size (16),
+ * Report Count (1),
+ * Push,
+ * Unit Exponent (13),
+ * Unit (Inch),
+ * Physical Minimum (0),
+ * Physical Maximum (10000),
+ * Logical Maximum (20000),
+ * Input (Variable),
+ * Usage (Y), ; Y (31h, dynamic value)
+ * Physical Maximum (6583),
+ * Logical Maximum (13166),
+ * Input (Variable),
+ * Pop,
+ * Usage Page (Digitizer), ; Digitizer (0Dh)
+ * Usage (Tip Pressure), ; Tip pressure (30h, dynamic value)
+ * Logical Maximum (1023),
+ * Report Count (1),
+ * Report Size (16),
+ * Input (Variable),
+ * End Collection,
+ * End Collection,
+ * Usage Page (Desktop), ; Generic desktop controls (01h)
+ * Usage (00h),
+ * Collection (Application),
+ * Report ID (4),
+ * Logical Minimum (0),
+ * Logical Maximum (255),
+ * Usage (00h),
+ * Report Size (8),
+ * Report Count (3),
+ * Feature (Variable),
+ * End Collection
+ */
+
+/* Size of the original descriptor of WP1062 tablet */
+#define WP1062_RDESC_ORIG_SIZE 254
+
+/*
+ * Fixed WP1062 report descriptor.
+ *
+ * Removed unused reports, corrected second barrel button usage code, physical
+ * units.
+ */
+static __u8 wp1062_rdesc_fixed[] = {
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x02, /* Usage (Pen), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x09, /* Report ID (9), */
+ 0x09, 0x20, /* Usage (Stylus), */
+ 0xA0, /* Collection (Physical), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x09, 0x44, /* Usage (Barrel Switch), */
+ 0x09, 0x46, /* Usage (Tablet Pick), */
+ 0x14, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x04, /* Report Count (4), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x09, 0x32, /* Usage (In Range), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x14, /* Logical Minimum (0), */
+ 0xA4, /* Push, */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x55, 0xFD, /* Unit Exponent (-3), */
+ 0x65, 0x13, /* Unit (Inch), */
+ 0x34, /* Physical Minimum (0), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x10, 0x27, /* Physical Maximum (10000), */
+ 0x26, 0x20, 0x4E, /* Logical Maximum (20000), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x46, 0xB7, 0x19, /* Physical Maximum (6583), */
+ 0x26, 0x6E, 0x33, /* Logical Maximum (13166), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xB4, /* Pop, */
+ 0x09, 0x30, /* Usage (Tip Pressure), */
+ 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+};
+
+/*
* Original PF1209 report descriptor.
*
* The descriptor is similar to WPXXXXU descriptors, with an addition of a
@@ -584,6 +771,12 @@ static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc,
*rsize = sizeof(wp8060u_rdesc_fixed);
}
break;
+ case USB_DEVICE_ID_UCLOGIC_TABLET_WP1062:
+ if (*rsize == WP1062_RDESC_ORIG_SIZE) {
+ rdesc = wp1062_rdesc_fixed;
+ *rsize = sizeof(wp1062_rdesc_fixed);
+ }
+ break;
}
return rdesc;
@@ -598,6 +791,8 @@ static const struct hid_device_id uclogic_devices[] = {
USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC,
+ USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ }
};
MODULE_DEVICE_TABLE(hid, uclogic_devices);
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote.c
new file mode 100644
index 00000000000..a594383ce03
--- /dev/null
+++ b/drivers/hid/hid-wiimote.c
@@ -0,0 +1,489 @@
+/*
+ * HID driver for Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include "hid-ids.h"
+
+#define WIIMOTE_VERSION "0.1"
+#define WIIMOTE_NAME "Nintendo Wii Remote"
+#define WIIMOTE_BUFSIZE 32
+
+struct wiimote_buf {
+ __u8 data[HID_MAX_BUFFER_SIZE];
+ size_t size;
+};
+
+struct wiimote_state {
+ spinlock_t lock;
+ __u8 flags;
+};
+
+struct wiimote_data {
+ atomic_t ready;
+ struct hid_device *hdev;
+ struct input_dev *input;
+
+ spinlock_t qlock;
+ __u8 head;
+ __u8 tail;
+ struct wiimote_buf outq[WIIMOTE_BUFSIZE];
+ struct work_struct worker;
+
+ struct wiimote_state state;
+};
+
+#define WIIPROTO_FLAG_LED1 0x01
+#define WIIPROTO_FLAG_LED2 0x02
+#define WIIPROTO_FLAG_LED3 0x04
+#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
+ WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+
+enum wiiproto_reqs {
+ WIIPROTO_REQ_LED = 0x11,
+ WIIPROTO_REQ_DRM_K = 0x30,
+};
+
+enum wiiproto_keys {
+ WIIPROTO_KEY_LEFT,
+ WIIPROTO_KEY_RIGHT,
+ WIIPROTO_KEY_UP,
+ WIIPROTO_KEY_DOWN,
+ WIIPROTO_KEY_PLUS,
+ WIIPROTO_KEY_MINUS,
+ WIIPROTO_KEY_ONE,
+ WIIPROTO_KEY_TWO,
+ WIIPROTO_KEY_A,
+ WIIPROTO_KEY_B,
+ WIIPROTO_KEY_HOME,
+ WIIPROTO_KEY_COUNT
+};
+
+static __u16 wiiproto_keymap[] = {
+ KEY_LEFT, /* WIIPROTO_KEY_LEFT */
+ KEY_RIGHT, /* WIIPROTO_KEY_RIGHT */
+ KEY_UP, /* WIIPROTO_KEY_UP */
+ KEY_DOWN, /* WIIPROTO_KEY_DOWN */
+ KEY_NEXT, /* WIIPROTO_KEY_PLUS */
+ KEY_PREVIOUS, /* WIIPROTO_KEY_MINUS */
+ BTN_1, /* WIIPROTO_KEY_ONE */
+ BTN_2, /* WIIPROTO_KEY_TWO */
+ BTN_A, /* WIIPROTO_KEY_A */
+ BTN_B, /* WIIPROTO_KEY_B */
+ BTN_MODE, /* WIIPROTO_KEY_HOME */
+};
+
+#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
+ dev))
+
+static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
+ size_t count)
+{
+ __u8 *buf;
+ ssize_t ret;
+
+ if (!hdev->hid_output_raw_report)
+ return -ENODEV;
+
+ buf = kmemdup(buffer, count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hdev->hid_output_raw_report(hdev, buf, count, HID_OUTPUT_REPORT);
+
+ kfree(buf);
+ return ret;
+}
+
+static void wiimote_worker(struct work_struct *work)
+{
+ struct wiimote_data *wdata = container_of(work, struct wiimote_data,
+ worker);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->qlock, flags);
+
+ while (wdata->head != wdata->tail) {
+ spin_unlock_irqrestore(&wdata->qlock, flags);
+ wiimote_hid_send(wdata->hdev, wdata->outq[wdata->tail].data,
+ wdata->outq[wdata->tail].size);
+ spin_lock_irqsave(&wdata->qlock, flags);
+
+ wdata->tail = (wdata->tail + 1) % WIIMOTE_BUFSIZE;
+ }
+
+ spin_unlock_irqrestore(&wdata->qlock, flags);
+}
+
+static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer,
+ size_t count)
+{
+ unsigned long flags;
+ __u8 newhead;
+
+ if (count > HID_MAX_BUFFER_SIZE) {
+ hid_warn(wdata->hdev, "Sending too large output report\n");
+ return;
+ }
+
+ /*
+ * Copy new request into our output queue and check whether the
+ * queue is full. If it is full, discard this request.
+ * If it is empty we need to start a new worker that will
+ * send out the buffer to the hid device.
+ * If the queue is not empty, then there must be a worker
+ * that is currently sending out our buffer and this worker
+ * will reschedule itself until the queue is empty.
+ */
+
+ spin_lock_irqsave(&wdata->qlock, flags);
+
+ memcpy(wdata->outq[wdata->head].data, buffer, count);
+ wdata->outq[wdata->head].size = count;
+ newhead = (wdata->head + 1) % WIIMOTE_BUFSIZE;
+
+ if (wdata->head == wdata->tail) {
+ wdata->head = newhead;
+ schedule_work(&wdata->worker);
+ } else if (newhead != wdata->tail) {
+ wdata->head = newhead;
+ } else {
+ hid_warn(wdata->hdev, "Output queue is full");
+ }
+
+ spin_unlock_irqrestore(&wdata->qlock, flags);
+}
+
+static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
+{
+ __u8 cmd[2];
+
+ leds &= WIIPROTO_FLAGS_LEDS;
+ if ((wdata->state.flags & WIIPROTO_FLAGS_LEDS) == leds)
+ return;
+ wdata->state.flags = (wdata->state.flags & ~WIIPROTO_FLAGS_LEDS) | leds;
+
+ cmd[0] = WIIPROTO_REQ_LED;
+ cmd[1] = 0;
+
+ if (leds & WIIPROTO_FLAG_LED1)
+ cmd[1] |= 0x10;
+ if (leds & WIIPROTO_FLAG_LED2)
+ cmd[1] |= 0x20;
+ if (leds & WIIPROTO_FLAG_LED3)
+ cmd[1] |= 0x40;
+ if (leds & WIIPROTO_FLAG_LED4)
+ cmd[1] |= 0x80;
+
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
+#define wiifs_led_show_set(num) \
+static ssize_t wiifs_led_show_##num(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wiimote_data *wdata = dev_to_wii(dev); \
+ unsigned long flags; \
+ int state; \
+ \
+ if (!atomic_read(&wdata->ready)) \
+ return -EBUSY; \
+ \
+ spin_lock_irqsave(&wdata->state.lock, flags); \
+ state = !!(wdata->state.flags & WIIPROTO_FLAG_LED##num); \
+ spin_unlock_irqrestore(&wdata->state.lock, flags); \
+ \
+ return sprintf(buf, "%d\n", state); \
+} \
+static ssize_t wiifs_led_set_##num(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
+{ \
+ struct wiimote_data *wdata = dev_to_wii(dev); \
+ int tmp = simple_strtoul(buf, NULL, 10); \
+ unsigned long flags; \
+ __u8 state; \
+ \
+ if (!atomic_read(&wdata->ready)) \
+ return -EBUSY; \
+ \
+ spin_lock_irqsave(&wdata->state.lock, flags); \
+ \
+ state = wdata->state.flags; \
+ \
+ if (tmp) \
+ wiiproto_req_leds(wdata, state | WIIPROTO_FLAG_LED##num);\
+ else \
+ wiiproto_req_leds(wdata, state & ~WIIPROTO_FLAG_LED##num);\
+ \
+ spin_unlock_irqrestore(&wdata->state.lock, flags); \
+ \
+ return count; \
+} \
+static DEVICE_ATTR(led##num, S_IRUGO | S_IWUSR, wiifs_led_show_##num, \
+ wiifs_led_set_##num)
+
+wiifs_led_show_set(1);
+wiifs_led_show_set(2);
+wiifs_led_show_set(3);
+wiifs_led_show_set(4);
+
+static int wiimote_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+
+ if (!atomic_read(&wdata->ready))
+ return -EBUSY;
+ /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
+ smp_rmb();
+
+ return 0;
+}
+
+static void handler_keys(struct wiimote_data *wdata, const __u8 *payload)
+{
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_LEFT],
+ !!(payload[0] & 0x01));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_RIGHT],
+ !!(payload[0] & 0x02));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_DOWN],
+ !!(payload[0] & 0x04));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_UP],
+ !!(payload[0] & 0x08));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_PLUS],
+ !!(payload[0] & 0x10));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_TWO],
+ !!(payload[1] & 0x01));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_ONE],
+ !!(payload[1] & 0x02));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_B],
+ !!(payload[1] & 0x04));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_A],
+ !!(payload[1] & 0x08));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_MINUS],
+ !!(payload[1] & 0x10));
+ input_report_key(wdata->input, wiiproto_keymap[WIIPROTO_KEY_HOME],
+ !!(payload[1] & 0x80));
+ input_sync(wdata->input);
+}
+
+struct wiiproto_handler {
+ __u8 id;
+ size_t size;
+ void (*func)(struct wiimote_data *wdata, const __u8 *payload);
+};
+
+static struct wiiproto_handler handlers[] = {
+ { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys },
+ { .id = 0 }
+};
+
+static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ struct wiimote_data *wdata = hid_get_drvdata(hdev);
+ struct wiiproto_handler *h;
+ int i;
+ unsigned long flags;
+
+ if (!atomic_read(&wdata->ready))
+ return -EBUSY;
+ /* smp_rmb: Make sure wdata->xy is available when wdata->ready is 1 */
+ smp_rmb();
+
+ if (size < 1)
+ return -EINVAL;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+
+ for (i = 0; handlers[i].id; ++i) {
+ h = &handlers[i];
+ if (h->id == raw_data[0] && h->size < size)
+ h->func(wdata, &raw_data[1]);
+ }
+
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static struct wiimote_data *wiimote_create(struct hid_device *hdev)
+{
+ struct wiimote_data *wdata;
+ int i;
+
+ wdata = kzalloc(sizeof(*wdata), GFP_KERNEL);
+ if (!wdata)
+ return NULL;
+
+ wdata->input = input_allocate_device();
+ if (!wdata->input) {
+ kfree(wdata);
+ return NULL;
+ }
+
+ wdata->hdev = hdev;
+ hid_set_drvdata(hdev, wdata);
+
+ input_set_drvdata(wdata->input, wdata);
+ wdata->input->event = wiimote_input_event;
+ wdata->input->dev.parent = &wdata->hdev->dev;
+ wdata->input->id.bustype = wdata->hdev->bus;
+ wdata->input->id.vendor = wdata->hdev->vendor;
+ wdata->input->id.product = wdata->hdev->product;
+ wdata->input->id.version = wdata->hdev->version;
+ wdata->input->name = WIIMOTE_NAME;
+
+ set_bit(EV_KEY, wdata->input->evbit);
+ for (i = 0; i < WIIPROTO_KEY_COUNT; ++i)
+ set_bit(wiiproto_keymap[i], wdata->input->keybit);
+
+ spin_lock_init(&wdata->qlock);
+ INIT_WORK(&wdata->worker, wiimote_worker);
+
+ spin_lock_init(&wdata->state.lock);
+
+ return wdata;
+}
+
+static void wiimote_destroy(struct wiimote_data *wdata)
+{
+ kfree(wdata);
+}
+
+static int wiimote_hid_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ struct wiimote_data *wdata;
+ int ret;
+
+ wdata = wiimote_create(hdev);
+ if (!wdata) {
+ hid_err(hdev, "Can't alloc device\n");
+ return -ENOMEM;
+ }
+
+ ret = device_create_file(&hdev->dev, &dev_attr_led1);
+ if (ret)
+ goto err;
+ ret = device_create_file(&hdev->dev, &dev_attr_led2);
+ if (ret)
+ goto err;
+ ret = device_create_file(&hdev->dev, &dev_attr_led3);
+ if (ret)
+ goto err;
+ ret = device_create_file(&hdev->dev, &dev_attr_led4);
+ if (ret)
+ goto err;
+
+ ret = hid_parse(hdev);
+ if (ret) {
+ hid_err(hdev, "HID parse failed\n");
+ goto err;
+ }
+
+ ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+ if (ret) {
+ hid_err(hdev, "HW start failed\n");
+ goto err;
+ }
+
+ ret = input_register_device(wdata->input);
+ if (ret) {
+ hid_err(hdev, "Cannot register input device\n");
+ goto err_stop;
+ }
+
+ /* smp_wmb: Write wdata->xy first before wdata->ready is set to 1 */
+ smp_wmb();
+ atomic_set(&wdata->ready, 1);
+ hid_info(hdev, "New device registered\n");
+
+ /* by default set led1 after device initialization */
+ spin_lock_irq(&wdata->state.lock);
+ wiiproto_req_leds(wdata, WIIPROTO_FLAG_LED1);
+ spin_unlock_irq(&wdata->state.lock);
+
+ return 0;
+
+err_stop:
+ hid_hw_stop(hdev);
+err:
+ input_free_device(wdata->input);
+ device_remove_file(&hdev->dev, &dev_attr_led1);
+ device_remove_file(&hdev->dev, &dev_attr_led2);
+ device_remove_file(&hdev->dev, &dev_attr_led3);
+ device_remove_file(&hdev->dev, &dev_attr_led4);
+ wiimote_destroy(wdata);
+ return ret;
+}
+
+static void wiimote_hid_remove(struct hid_device *hdev)
+{
+ struct wiimote_data *wdata = hid_get_drvdata(hdev);
+
+ hid_info(hdev, "Device removed\n");
+
+ device_remove_file(&hdev->dev, &dev_attr_led1);
+ device_remove_file(&hdev->dev, &dev_attr_led2);
+ device_remove_file(&hdev->dev, &dev_attr_led3);
+ device_remove_file(&hdev->dev, &dev_attr_led4);
+
+ hid_hw_stop(hdev);
+ input_unregister_device(wdata->input);
+
+ cancel_work_sync(&wdata->worker);
+ wiimote_destroy(wdata);
+}
+
+static const struct hid_device_id wiimote_hid_devices[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
+ USB_DEVICE_ID_NINTENDO_WIIMOTE) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, wiimote_hid_devices);
+
+static struct hid_driver wiimote_hid_driver = {
+ .name = "wiimote",
+ .id_table = wiimote_hid_devices,
+ .probe = wiimote_hid_probe,
+ .remove = wiimote_hid_remove,
+ .raw_event = wiimote_hid_event,
+};
+
+static int __init wiimote_init(void)
+{
+ int ret;
+
+ ret = hid_register_driver(&wiimote_hid_driver);
+ if (ret)
+ pr_err("Can't register wiimote hid driver\n");
+
+ return ret;
+}
+
+static void __exit wiimote_exit(void)
+{
+ hid_unregister_driver(&wiimote_hid_driver);
+}
+
+module_init(wiimote_init);
+module_exit(wiimote_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
+MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
+MODULE_VERSION(WIIMOTE_VERSION);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 38c261a40c7..ad978f5748d 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1191,6 +1191,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
if (intf->cur_altsetting->desc.bInterfaceProtocol ==
USB_INTERFACE_PROTOCOL_MOUSE)
hid->type = HID_TYPE_USBMOUSE;
+ else if (intf->cur_altsetting->desc.bInterfaceProtocol == 0)
+ hid->type = HID_TYPE_USBNONE;
if (dev->manufacturer)
strlcpy(hid->name, dev->manufacturer, sizeof(hid->name));
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f9ba7d74dfc..9353992f9ee 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -302,7 +302,8 @@ static void ib_cache_event(struct ib_event_handler *handler,
event->event == IB_EVENT_LID_CHANGE ||
event->event == IB_EVENT_PKEY_CHANGE ||
event->event == IB_EVENT_SM_CHANGE ||
- event->event == IB_EVENT_CLIENT_REREGISTER) {
+ event->event == IB_EVENT_CLIENT_REREGISTER ||
+ event->event == IB_EVENT_GID_CHANGE) {
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (work) {
INIT_WORK(&work->work, ib_cache_task);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index b6a33b3c516..ca4c5dcd713 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -359,6 +359,10 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
+ if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
mutex_lock(&lock);
iboe_addr_get_sgid(dev_addr, &iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr +
@@ -406,11 +410,6 @@ static int cma_disable_callback(struct rdma_id_private *id_priv,
return 0;
}
-static int cma_has_cm_dev(struct rdma_id_private *id_priv)
-{
- return (id_priv->id.device && id_priv->cm_id.ib);
-}
-
struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
void *context, enum rdma_port_space ps,
enum ib_qp_type qp_type)
@@ -920,11 +919,11 @@ void rdma_destroy_id(struct rdma_cm_id *id)
if (id_priv->cma_dev) {
switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
case RDMA_TRANSPORT_IB:
- if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
+ if (id_priv->cm_id.ib)
ib_destroy_cm_id(id_priv->cm_id.ib);
break;
case RDMA_TRANSPORT_IWARP:
- if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw))
+ if (id_priv->cm_id.iw)
iw_destroy_cm_id(id_priv->cm_id.iw);
break;
default:
@@ -1085,12 +1084,12 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (cma_get_net_info(ib_event->private_data, listen_id->ps,
&ip_ver, &port, &src, &dst))
- goto err;
+ return NULL;
id = rdma_create_id(listen_id->event_handler, listen_id->context,
listen_id->ps, ib_event->param.req_rcvd.qp_type);
if (IS_ERR(id))
- goto err;
+ return NULL;
cma_save_net_info(&id->route.addr, &listen_id->route.addr,
ip_ver, port, src, dst);
@@ -1100,7 +1099,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
GFP_KERNEL);
if (!rt->path_rec)
- goto destroy_id;
+ goto err;
rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
if (rt->num_paths == 2)
@@ -1114,7 +1113,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
&rt->addr.dev_addr);
if (ret)
- goto destroy_id;
+ goto err;
}
rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
@@ -1122,9 +1121,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
id_priv->state = RDMA_CM_CONNECT;
return id_priv;
-destroy_id:
- rdma_destroy_id(id);
err:
+ rdma_destroy_id(id);
return NULL;
}
@@ -1468,13 +1466,15 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
{
struct ib_cm_compare_data compare_data;
struct sockaddr *addr;
+ struct ib_cm_id *id;
__be64 svc_id;
int ret;
- id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler,
- id_priv);
- if (IS_ERR(id_priv->cm_id.ib))
- return PTR_ERR(id_priv->cm_id.ib);
+ id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
+ if (IS_ERR(id))
+ return PTR_ERR(id);
+
+ id_priv->cm_id.ib = id;
addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
svc_id = cma_get_service_id(id_priv->id.ps, addr);
@@ -1497,12 +1497,15 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
{
int ret;
struct sockaddr_in *sin;
+ struct iw_cm_id *id;
+
+ id = iw_create_cm_id(id_priv->id.device,
+ iw_conn_req_handler,
+ id_priv);
+ if (IS_ERR(id))
+ return PTR_ERR(id);
- id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device,
- iw_conn_req_handler,
- id_priv);
- if (IS_ERR(id_priv->cm_id.iw))
- return PTR_ERR(id_priv->cm_id.iw);
+ id_priv->cm_id.iw = id;
sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
id_priv->cm_id.iw->local_addr = *sin;
@@ -2484,6 +2487,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
{
struct ib_cm_sidr_req_param req;
struct rdma_route *route;
+ struct ib_cm_id *id;
int ret;
req.private_data_len = sizeof(struct cma_hdr) +
@@ -2501,12 +2505,13 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
if (ret)
goto out;
- id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device,
- cma_sidr_rep_handler, id_priv);
- if (IS_ERR(id_priv->cm_id.ib)) {
- ret = PTR_ERR(id_priv->cm_id.ib);
+ id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
+ id_priv);
+ if (IS_ERR(id)) {
+ ret = PTR_ERR(id);
goto out;
}
+ id_priv->cm_id.ib = id;
req.path = route->path_rec;
req.service_id = cma_get_service_id(id_priv->id.ps,
@@ -2530,6 +2535,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
struct ib_cm_req_param req;
struct rdma_route *route;
void *private_data;
+ struct ib_cm_id *id;
int offset, ret;
memset(&req, 0, sizeof req);
@@ -2543,12 +2549,12 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memcpy(private_data + offset, conn_param->private_data,
conn_param->private_data_len);
- id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler,
- id_priv);
- if (IS_ERR(id_priv->cm_id.ib)) {
- ret = PTR_ERR(id_priv->cm_id.ib);
+ id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
+ if (IS_ERR(id)) {
+ ret = PTR_ERR(id);
goto out;
}
+ id_priv->cm_id.ib = id;
route = &id_priv->id.route;
ret = cma_format_hdr(private_data, id_priv->id.ps, route);
@@ -2577,8 +2583,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
out:
- if (ret && !IS_ERR(id_priv->cm_id.ib)) {
- ib_destroy_cm_id(id_priv->cm_id.ib);
+ if (ret && !IS_ERR(id)) {
+ ib_destroy_cm_id(id);
id_priv->cm_id.ib = NULL;
}
@@ -2595,10 +2601,8 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
struct iw_cm_conn_param iw_param;
cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
- if (IS_ERR(cm_id)) {
- ret = PTR_ERR(cm_id);
- goto out;
- }
+ if (IS_ERR(cm_id))
+ return PTR_ERR(cm_id);
id_priv->cm_id.iw = cm_id;
@@ -2622,7 +2626,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
iw_param.qpn = conn_param->qp_num;
ret = iw_cm_connect(cm_id, &iw_param);
out:
- if (ret && !IS_ERR(cm_id)) {
+ if (ret) {
iw_destroy_cm_id(cm_id);
id_priv->cm_id.iw = NULL;
}
@@ -2795,7 +2799,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_has_cm_dev(id_priv))
+ if (!id_priv->cm_id.ib)
return -EINVAL;
switch (id->device->node_type) {
@@ -2817,7 +2821,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_has_cm_dev(id_priv))
+ if (!id_priv->cm_id.ib)
return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) {
@@ -2848,7 +2852,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
- if (!cma_has_cm_dev(id_priv))
+ if (!id_priv->cm_id.ib)
return -EINVAL;
switch (rdma_node_get_transport(id->device->node_type)) {
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 4007f721d25..e711de400a0 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -627,6 +627,9 @@ int ib_modify_device(struct ib_device *device,
int device_modify_mask,
struct ib_device_modify *device_modify)
{
+ if (!device->modify_device)
+ return -ENOSYS;
+
return device->modify_device(device, device_modify_mask,
device_modify);
}
@@ -647,6 +650,9 @@ int ib_modify_port(struct ib_device *device,
u8 port_num, int port_modify_mask,
struct ib_port_modify *port_modify)
{
+ if (!device->modify_port)
+ return -ENOSYS;
+
if (port_num < start_port(device) || port_num > end_port(device))
return -EINVAL;
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index aeebc4d37e3..f101bb73be6 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -99,14 +99,6 @@ static int c2_query_port(struct ib_device *ibdev,
return 0;
}
-static int c2_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props)
-{
- pr_debug("%s:%u\n", __func__, __LINE__);
- return 0;
-}
-
static int c2_query_pkey(struct ib_device *ibdev,
u8 port, u16 index, u16 * pkey)
{
@@ -817,7 +809,6 @@ int c2_register_device(struct c2_dev *dev)
dev->ibdev.dma_device = &dev->pcidev->dev;
dev->ibdev.query_device = c2_query_device;
dev->ibdev.query_port = c2_query_port;
- dev->ibdev.modify_port = c2_modify_port;
dev->ibdev.query_pkey = c2_query_pkey;
dev->ibdev.query_gid = c2_query_gid;
dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 2e2741307af..c7d9411f295 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -61,13 +61,6 @@
#include "iwch_user.h"
#include "common.h"
-static int iwch_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props)
-{
- return -ENOSYS;
-}
-
static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
@@ -1392,7 +1385,6 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
dev->ibdev.query_device = iwch_query_device;
dev->ibdev.query_port = iwch_query_port;
- dev->ibdev.modify_port = iwch_modify_port;
dev->ibdev.query_pkey = iwch_query_pkey;
dev->ibdev.query_gid = iwch_query_gid;
dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 5b9e4220ca0..247fe706e7f 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -58,13 +58,6 @@ static int fastreg_support = 1;
module_param(fastreg_support, int, 0644);
MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
-static int c4iw_modify_port(struct ib_device *ibdev,
- u8 port, int port_modify_mask,
- struct ib_port_modify *props)
-{
- return -ENOSYS;
-}
-
static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
struct ib_ah_attr *ah_attr)
{
@@ -456,7 +449,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
dev->ibdev.query_device = c4iw_query_device;
dev->ibdev.query_port = c4iw_query_port;
- dev->ibdev.modify_port = c4iw_modify_port;
dev->ibdev.query_pkey = c4iw_query_pkey;
dev->ibdev.query_gid = c4iw_query_gid;
dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 4fb50d58b49..407ff392415 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -37,6 +37,7 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/genalloc.h>
+#include <linux/ratelimit.h>
#include "iw_cxgb4.h"
#define RANDOM_SIZE 16
@@ -311,8 +312,8 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
- if (!addr && printk_ratelimit())
- printk(KERN_WARNING MOD "%s: Out of PBL memory\n",
+ if (!addr)
+ printk_ratelimited(KERN_WARNING MOD "%s: Out of PBL memory\n",
pci_name(rdev->lldi.pdev));
return (u32)addr;
}
@@ -373,8 +374,8 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
- if (!addr && printk_ratelimit())
- printk(KERN_WARNING MOD "%s: Out of RQT memory\n",
+ if (!addr)
+ printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev));
return (u32)addr;
}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index ee79a2d97b1..8697eca1435 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -40,6 +40,7 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/jiffies.h>
+#include <linux/cpu.h>
#include <asm/pgtable.h>
#include "ipath_kernel.h"
@@ -1684,17 +1685,19 @@ static int find_best_unit(struct file *fp,
* information. There may be some issues with dual core numbering
* as well. This needs more work prior to release.
*/
- if (!cpumask_empty(&current->cpus_allowed) &&
- !cpumask_full(&current->cpus_allowed)) {
+ if (!cpumask_empty(tsk_cpus_allowed(current)) &&
+ !cpumask_full(tsk_cpus_allowed(current))) {
int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
- for (i = 0; i < ncpus; i++)
- if (cpumask_test_cpu(i, &current->cpus_allowed)) {
+ get_online_cpus();
+ for_each_online_cpu(i)
+ if (cpumask_test_cpu(i, tsk_cpus_allowed(current))) {
ipath_cdbg(PROC, "%s[%u] affinity set for "
"cpu %d/%d\n", current->comm,
current->pid, i, ncpus);
curcpu = i;
nset++;
}
+ put_online_cpus();
if (curcpu != -1 && nset != ncpus) {
if (npresent) {
prefunit = curcpu / (ncpus / npresent);
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index ceb98ee7866..43f2d0424d4 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -32,6 +32,7 @@
*/
#include <rdma/ib_smi.h>
+#include <rdma/ib_pma.h>
#include "ipath_kernel.h"
#include "ipath_verbs.h"
@@ -789,151 +790,18 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
return recv_subn_get_pkeytable(smp, ibdev);
}
-#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
-#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
-#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
-#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
-#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
-#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
-
-struct ib_perf {
- u8 base_version;
- u8 mgmt_class;
- u8 class_version;
- u8 method;
- __be16 status;
- __be16 unused;
- __be64 tid;
- __be16 attr_id;
- __be16 resv;
- __be32 attr_mod;
- u8 reserved[40];
- u8 data[192];
-} __attribute__ ((packed));
-
-struct ib_pma_classportinfo {
- u8 base_version;
- u8 class_version;
- __be16 cap_mask;
- u8 reserved[3];
- u8 resp_time_value; /* only lower 5 bits */
- union ib_gid redirect_gid;
- __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 redirect_lid;
- __be16 redirect_pkey;
- __be32 redirect_qp; /* only lower 24 bits */
- __be32 redirect_qkey;
- union ib_gid trap_gid;
- __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 trap_lid;
- __be16 trap_pkey;
- __be32 trap_hl_qp; /* 8, 24 bits respectively */
- __be32 trap_qkey;
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplescontrol {
- u8 opcode;
- u8 port_select;
- u8 tick;
- u8 counter_width; /* only lower 3 bits */
- __be32 counter_mask0_9; /* 2, 10 * 3, bits */
- __be16 counter_mask10_14; /* 1, 5 * 3, bits */
- u8 sample_mechanisms;
- u8 sample_status; /* only lower 2 bits */
- __be64 option_mask;
- __be64 vendor_mask;
- __be32 sample_start;
- __be32 sample_interval;
- __be16 tag;
- __be16 counter_select[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplesresult {
- __be16 tag;
- __be16 sample_status; /* only lower 2 bits */
- __be32 counter[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplesresult_ext {
- __be16 tag;
- __be16 sample_status; /* only lower 2 bits */
- __be32 extended_width; /* only upper 2 bits */
- __be64 counter[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portcounters {
- u8 reserved;
- u8 port_select;
- __be16 counter_select;
- __be16 symbol_error_counter;
- u8 link_error_recovery_counter;
- u8 link_downed_counter;
- __be16 port_rcv_errors;
- __be16 port_rcv_remphys_errors;
- __be16 port_rcv_switch_relay_errors;
- __be16 port_xmit_discards;
- u8 port_xmit_constraint_errors;
- u8 port_rcv_constraint_errors;
- u8 reserved1;
- u8 lli_ebor_errors; /* 4, 4, bits */
- __be16 reserved2;
- __be16 vl15_dropped;
- __be32 port_xmit_data;
- __be32 port_rcv_data;
- __be32 port_xmit_packets;
- __be32 port_rcv_packets;
-} __attribute__ ((packed));
-
-#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
-#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
-#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
-#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
-#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
-#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
-#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
-#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
-#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
-#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
-#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
-#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
-#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
-
-struct ib_pma_portcounters_ext {
- u8 reserved;
- u8 port_select;
- __be16 counter_select;
- __be32 reserved1;
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_packets;
- __be64 port_rcv_packets;
- __be64 port_unicast_xmit_packets;
- __be64 port_unicast_rcv_packets;
- __be64 port_multicast_xmit_packets;
- __be64 port_multicast_rcv_packets;
-} __attribute__ ((packed));
-
-#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
-#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
-#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
-#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
-#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
-#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
-#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
-#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
-
-static int recv_pma_get_classportinfo(struct ib_perf *pmp)
+static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
{
- struct ib_pma_classportinfo *p =
- (struct ib_pma_classportinfo *)pmp->data;
+ struct ib_class_port_info *p =
+ (struct ib_class_port_info *)pmp->data;
memset(pmp->data, 0, sizeof(pmp->data));
- if (pmp->attr_mod != 0)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0)
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
/* Indicate AllPortSelect is valid (only one port anyway) */
- p->cap_mask = cpu_to_be16(1 << 8);
+ p->capability_mask = cpu_to_be16(1 << 8);
p->base_version = 1;
p->class_version = 1;
/*
@@ -957,7 +825,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
-static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
+static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
@@ -970,9 +838,9 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 ||
+ if (pmp->mad_hdr.attr_mod != 0 ||
(port_select != port && port_select != 0xFF))
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
/*
* Ticks are 10x the link transfer period which for 2.5Gbs is 4
* nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
@@ -1006,7 +874,7 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
+static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
@@ -1017,9 +885,9 @@ static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
u8 status;
int ret;
- if (pmp->attr_mod != 0 ||
+ if (pmp->mad_hdr.attr_mod != 0 ||
(p->port_select != port && p->port_select != 0xFF)) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
@@ -1093,7 +961,7 @@ static u64 get_counter(struct ipath_ibdev *dev,
return ret;
}
-static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
+static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
struct ib_device *ibdev)
{
struct ib_pma_portsamplesresult *p =
@@ -1118,7 +986,7 @@ static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev)
{
struct ib_pma_portsamplesresult_ext *p =
@@ -1145,7 +1013,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int recv_pma_get_portcounters(struct ib_perf *pmp,
+static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1179,9 +1047,9 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 ||
+ if (pmp->mad_hdr.attr_mod != 0 ||
(port_select != port && port_select != 0xFF))
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
if (cntrs.symbol_error_counter > 0xFFFFUL)
p->symbol_error_counter = cpu_to_be16(0xFFFF);
@@ -1216,7 +1084,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1244,7 +1112,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
+static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters_ext *p =
@@ -1265,9 +1133,9 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 ||
+ if (pmp->mad_hdr.attr_mod != 0 ||
(port_select != port && port_select != 0xFF))
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
p->port_xmit_data = cpu_to_be64(swords);
p->port_rcv_data = cpu_to_be64(rwords);
@@ -1281,7 +1149,7 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int recv_pma_set_portcounters(struct ib_perf *pmp,
+static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1344,7 +1212,7 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
return recv_pma_get_portcounters(pmp, ibdev, port);
}
-static int recv_pma_set_portcounters_ext(struct ib_perf *pmp,
+static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1518,19 +1386,19 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
- struct ib_perf *pmp = (struct ib_perf *)out_mad;
+ struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
int ret;
*out_mad = *in_mad;
- if (pmp->class_version != 1) {
- pmp->status |= IB_SMP_UNSUP_VERSION;
+ if (pmp->mad_hdr.class_version != 1) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
- switch (pmp->method) {
+ switch (pmp->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
- switch (pmp->attr_id) {
+ switch (pmp->mad_hdr.attr_id) {
case IB_PMA_CLASS_PORT_INFO:
ret = recv_pma_get_classportinfo(pmp);
goto bail;
@@ -1554,13 +1422,13 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
port_num);
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
case IB_MGMT_METHOD_SET:
- switch (pmp->attr_id) {
+ switch (pmp->mad_hdr.attr_id) {
case IB_PMA_PORT_SAMPLES_CONTROL:
ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
port_num);
@@ -1574,7 +1442,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
port_num);
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
@@ -1588,7 +1456,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
ret = IB_MAD_RESULT_SUCCESS;
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METHOD;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_smp *) pmp);
}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 57ffa50f509..f36da994a85 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -35,6 +35,7 @@
#include <linux/mlx4/cmd.h>
#include <linux/gfp.h>
+#include <rdma/ib_pma.h>
#include "mlx4_ib.h"
@@ -232,7 +233,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
}
}
-int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct ib_wc *in_wc, struct ib_grh *in_grh,
struct ib_mad *in_mad, struct ib_mad *out_mad)
{
@@ -302,6 +303,71 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
+static void edit_counter(struct mlx4_counter *cnt,
+ struct ib_pma_portcounters *pma_cnt)
+{
+ pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
+ pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
+ pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
+ pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
+}
+
+static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_ib_dev *dev = to_mdev(ibdev);
+ int err;
+ u32 inmod = dev->counters[port_num - 1] & 0xffff;
+ u8 mode;
+
+ if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
+ return -EINVAL;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
+ if (IS_ERR(mailbox))
+ return IB_MAD_RESULT_FAILURE;
+
+ err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
+ MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
+ if (err)
+ err = IB_MAD_RESULT_FAILURE;
+ else {
+ memset(out_mad->data, 0, sizeof out_mad->data);
+ mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
+ switch (mode & 0xf) {
+ case 0:
+ edit_counter(mailbox->buf,
+ (void *)(out_mad->data + 40));
+ err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+ break;
+ default:
+ err = IB_MAD_RESULT_FAILURE;
+ }
+ }
+
+ mlx4_free_cmd_mailbox(dev->dev, mailbox);
+
+ return err;
+}
+
+int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ struct ib_wc *in_wc, struct ib_grh *in_grh,
+ struct ib_mad *in_mad, struct ib_mad *out_mad)
+{
+ switch (rdma_port_get_link_layer(ibdev, port_num)) {
+ case IB_LINK_LAYER_INFINIBAND:
+ return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+ case IB_LINK_LAYER_ETHERNET:
+ return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
+ in_grh, in_mad, out_mad);
+ default:
+ return -EINVAL;
+ }
+}
+
static void send_handler(struct ib_mad_agent *agent,
struct ib_mad_send_wc *mad_send_wc)
{
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index fbe1973f77b..fa643f4f4e2 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -816,7 +816,7 @@ static void update_gids_task(struct work_struct *work)
memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
event.device = &gw->dev->ib_dev;
event.element.port_num = gw->port;
- event.event = IB_EVENT_LID_CHANGE;
+ event.event = IB_EVENT_GID_CHANGE;
ib_dispatch_event(&event);
}
@@ -1098,11 +1098,21 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (init_node_data(ibdev))
goto err_map;
+ for (i = 0; i < ibdev->num_ports; ++i) {
+ if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
+ if (err)
+ ibdev->counters[i] = -1;
+ } else
+ ibdev->counters[i] = -1;
+ }
+
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
if (ib_register_device(&ibdev->ib_dev, NULL))
- goto err_map;
+ goto err_counter;
if (mlx4_ib_mad_init(ibdev))
goto err_reg;
@@ -1132,6 +1142,10 @@ err_notif:
err_reg:
ib_unregister_device(&ibdev->ib_dev);
+err_counter:
+ for (; i; --i)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+
err_map:
iounmap(ibdev->uar_map);
@@ -1160,7 +1174,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
ibdev->iboe.nb.notifier_call = NULL;
}
iounmap(ibdev->uar_map);
-
+ for (p = 0; p < ibdev->num_ports; ++p)
+ mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 2a322f21049..e4bf2cff866 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -193,6 +193,7 @@ struct mlx4_ib_dev {
struct mutex cap_mask_mutex;
bool ib_active;
struct mlx4_ib_iboe iboe;
+ int counters[MLX4_MAX_PORTS];
};
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2001f20a436..3a91d9d8dc5 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -893,7 +893,6 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
--path->static_rate;
} else
path->static_rate = 0;
- path->counter_index = 0xff;
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
@@ -1034,6 +1033,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
}
}
+ if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ if (dev->counters[qp->port - 1] != -1) {
+ context->pri_path.counter_index =
+ dev->counters[qp->port - 1];
+ optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
+ } else
+ context->pri_path.counter_index = 0xff;
+ }
+
if (attr_mask & IB_QP_PKEY_INDEX) {
context->pri_path.pkey_index = attr->pkey_index;
optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 7bfa2a16495..3082b3b3d62 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -301,6 +301,38 @@ static int mthca_cmd_post(struct mthca_dev *dev,
return err;
}
+
+static int mthca_status_to_errno(u8 status)
+{
+ static const int trans_table[] = {
+ [MTHCA_CMD_STAT_INTERNAL_ERR] = -EIO,
+ [MTHCA_CMD_STAT_BAD_OP] = -EPERM,
+ [MTHCA_CMD_STAT_BAD_PARAM] = -EINVAL,
+ [MTHCA_CMD_STAT_BAD_SYS_STATE] = -ENXIO,
+ [MTHCA_CMD_STAT_BAD_RESOURCE] = -EBADF,
+ [MTHCA_CMD_STAT_RESOURCE_BUSY] = -EBUSY,
+ [MTHCA_CMD_STAT_DDR_MEM_ERR] = -ENOMEM,
+ [MTHCA_CMD_STAT_EXCEED_LIM] = -ENOMEM,
+ [MTHCA_CMD_STAT_BAD_RES_STATE] = -EBADF,
+ [MTHCA_CMD_STAT_BAD_INDEX] = -EBADF,
+ [MTHCA_CMD_STAT_BAD_NVMEM] = -EFAULT,
+ [MTHCA_CMD_STAT_BAD_QPEE_STATE] = -EINVAL,
+ [MTHCA_CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
+ [MTHCA_CMD_STAT_REG_BOUND] = -EBUSY,
+ [MTHCA_CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
+ [MTHCA_CMD_STAT_BAD_PKT] = -EBADMSG,
+ [MTHCA_CMD_STAT_BAD_SIZE] = -ENOMEM,
+ };
+
+ if (status >= ARRAY_SIZE(trans_table) ||
+ (status != MTHCA_CMD_STAT_OK
+ && trans_table[status] == 0))
+ return -EINVAL;
+
+ return trans_table[status];
+}
+
+
static int mthca_cmd_poll(struct mthca_dev *dev,
u64 in_param,
u64 *out_param,
@@ -308,11 +340,11 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
u32 in_modifier,
u8 op_modifier,
u16 op,
- unsigned long timeout,
- u8 *status)
+ unsigned long timeout)
{
int err = 0;
unsigned long end;
+ u8 status;
down(&dev->cmd.poll_sem);
@@ -341,7 +373,12 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
(u64) be32_to_cpu((__force __be32)
__raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
- *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
+ status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
+ if (status) {
+ mthca_dbg(dev, "Command %02x completed with status %02x\n",
+ op, status);
+ err = mthca_status_to_errno(status);
+ }
out:
up(&dev->cmd.poll_sem);
@@ -374,8 +411,7 @@ static int mthca_cmd_wait(struct mthca_dev *dev,
u32 in_modifier,
u8 op_modifier,
u16 op,
- unsigned long timeout,
- u8 *status)
+ unsigned long timeout)
{
int err = 0;
struct mthca_cmd_context *context;
@@ -407,10 +443,11 @@ static int mthca_cmd_wait(struct mthca_dev *dev,
if (err)
goto out;
- *status = context->status;
- if (*status)
+ if (context->status) {
mthca_dbg(dev, "Command %02x completed with status %02x\n",
- op, *status);
+ op, context->status);
+ err = mthca_status_to_errno(context->status);
+ }
if (out_is_imm)
*out_param = context->out_param;
@@ -432,17 +469,16 @@ static int mthca_cmd_box(struct mthca_dev *dev,
u32 in_modifier,
u8 op_modifier,
u16 op,
- unsigned long timeout,
- u8 *status)
+ unsigned long timeout)
{
if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
return mthca_cmd_wait(dev, in_param, &out_param, 0,
in_modifier, op_modifier, op,
- timeout, status);
+ timeout);
else
return mthca_cmd_poll(dev, in_param, &out_param, 0,
in_modifier, op_modifier, op,
- timeout, status);
+ timeout);
}
/* Invoke a command with no output parameter */
@@ -451,11 +487,10 @@ static int mthca_cmd(struct mthca_dev *dev,
u32 in_modifier,
u8 op_modifier,
u16 op,
- unsigned long timeout,
- u8 *status)
+ unsigned long timeout)
{
return mthca_cmd_box(dev, in_param, 0, in_modifier,
- op_modifier, op, timeout, status);
+ op_modifier, op, timeout);
}
/*
@@ -469,17 +504,16 @@ static int mthca_cmd_imm(struct mthca_dev *dev,
u32 in_modifier,
u8 op_modifier,
u16 op,
- unsigned long timeout,
- u8 *status)
+ unsigned long timeout)
{
if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
return mthca_cmd_wait(dev, in_param, out_param, 1,
in_modifier, op_modifier, op,
- timeout, status);
+ timeout);
else
return mthca_cmd_poll(dev, in_param, out_param, 1,
in_modifier, op_modifier, op,
- timeout, status);
+ timeout);
}
int mthca_cmd_init(struct mthca_dev *dev)
@@ -596,14 +630,14 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
kfree(mailbox);
}
-int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
+int mthca_SYS_EN(struct mthca_dev *dev)
{
u64 out;
int ret;
- ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D, status);
+ ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
- if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR)
+ if (ret == -ENOMEM)
mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, "
"sladdr=%d, SPD source=%s\n",
(int) (out >> 6) & 0xf, (int) (out >> 4) & 3,
@@ -612,13 +646,13 @@ int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
return ret;
}
-int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status)
+int mthca_SYS_DIS(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C);
}
static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
- u64 virt, u8 *status)
+ u64 virt)
{
struct mthca_mailbox *mailbox;
struct mthca_icm_iter iter;
@@ -666,8 +700,8 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
if (++nent == MTHCA_MAILBOX_SIZE / 16) {
err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
- CMD_TIME_CLASS_B, status);
- if (err || *status)
+ CMD_TIME_CLASS_B);
+ if (err)
goto out;
nent = 0;
}
@@ -676,7 +710,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
if (nent)
err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
switch (op) {
case CMD_MAP_FA:
@@ -696,19 +730,19 @@ out:
return err;
}
-int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
+int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm)
{
- return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1, status);
+ return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1);
}
-int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status)
+int mthca_UNMAP_FA(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B);
}
-int mthca_RUN_FW(struct mthca_dev *dev, u8 *status)
+int mthca_RUN_FW(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A);
}
static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
@@ -737,7 +771,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
}
-int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
+int mthca_QUERY_FW(struct mthca_dev *dev)
{
struct mthca_mailbox *mailbox;
u32 *outbox;
@@ -771,7 +805,7 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
if (err)
goto out;
@@ -843,7 +877,7 @@ out:
return err;
}
-int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
+int mthca_ENABLE_LAM(struct mthca_dev *dev)
{
struct mthca_mailbox *mailbox;
u8 info;
@@ -864,14 +898,11 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
- CMD_TIME_CLASS_C, status);
+ CMD_TIME_CLASS_C);
if (err)
goto out;
- if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE)
- goto out;
-
MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET);
MTHCA_GET(dev->ddr_end, outbox, ENABLE_LAM_END_OFFSET);
MTHCA_GET(info, outbox, ENABLE_LAM_INFO_OFFSET);
@@ -896,12 +927,12 @@ out:
return err;
}
-int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status)
+int mthca_DISABLE_LAM(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C);
}
-int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
+int mthca_QUERY_DDR(struct mthca_dev *dev)
{
struct mthca_mailbox *mailbox;
u8 info;
@@ -922,7 +953,7 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
if (err)
goto out;
@@ -952,7 +983,7 @@ out:
}
int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
- struct mthca_dev_lim *dev_lim, u8 *status)
+ struct mthca_dev_lim *dev_lim)
{
struct mthca_mailbox *mailbox;
u32 *outbox;
@@ -1028,7 +1059,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
if (err)
goto out;
@@ -1232,7 +1263,7 @@ static void get_board_id(void *vsd, char *board_id)
}
int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
- struct mthca_adapter *adapter, u8 *status)
+ struct mthca_adapter *adapter)
{
struct mthca_mailbox *mailbox;
u32 *outbox;
@@ -1251,7 +1282,7 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
outbox = mailbox->buf;
err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
if (err)
goto out;
@@ -1275,8 +1306,7 @@ out:
}
int mthca_INIT_HCA(struct mthca_dev *dev,
- struct mthca_init_hca_param *param,
- u8 *status)
+ struct mthca_init_hca_param *param)
{
struct mthca_mailbox *mailbox;
__be32 *inbox;
@@ -1393,7 +1423,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
}
- err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, CMD_TIME_CLASS_D, status);
+ err = mthca_cmd(dev, mailbox->dma, 0, 0,
+ CMD_INIT_HCA, CMD_TIME_CLASS_D);
mthca_free_mailbox(dev, mailbox);
return err;
@@ -1401,7 +1432,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
int mthca_INIT_IB(struct mthca_dev *dev,
struct mthca_init_ib_param *param,
- int port, u8 *status)
+ int port)
{
struct mthca_mailbox *mailbox;
u32 *inbox;
@@ -1445,24 +1476,24 @@ int mthca_INIT_IB(struct mthca_dev *dev,
MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET);
err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
mthca_free_mailbox(dev, mailbox);
return err;
}
-int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status)
+int mthca_CLOSE_IB(struct mthca_dev *dev, int port)
{
- return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A, status);
+ return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A);
}
-int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status)
+int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic)
{
- return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C, status);
+ return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C);
}
int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
- int port, u8 *status)
+ int port)
{
struct mthca_mailbox *mailbox;
u32 *inbox;
@@ -1491,18 +1522,18 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET);
err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
mthca_free_mailbox(dev, mailbox);
return err;
}
-int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status)
+int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt)
{
- return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status);
+ return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt);
}
-int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status)
+int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt)
{
struct mthca_mailbox *mailbox;
__be64 *inbox;
@@ -1517,7 +1548,7 @@ int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status
inbox[1] = cpu_to_be64(dma_addr);
err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
mthca_free_mailbox(dev, mailbox);
@@ -1528,31 +1559,31 @@ int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status
return err;
}
-int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status)
+int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count)
{
mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n",
page_count, (unsigned long long) virt);
- return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status);
+ return mthca_cmd(dev, virt, page_count, 0,
+ CMD_UNMAP_ICM, CMD_TIME_CLASS_B);
}
-int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status)
+int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm)
{
- return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1, status);
+ return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1);
}
-int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status)
+int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B);
}
-int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
- u8 *status)
+int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages)
{
- int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE,
- CMD_TIME_CLASS_A, status);
+ int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0,
+ 0, CMD_SET_ICM_SIZE, CMD_TIME_CLASS_A);
- if (ret || status)
+ if (ret)
return ret;
/*
@@ -1566,74 +1597,73 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
}
int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int mpt_index, u8 *status)
+ int mpt_index)
{
return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
}
int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int mpt_index, u8 *status)
+ int mpt_index)
{
return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
!mailbox, CMD_HW2SW_MPT,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
}
int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int num_mtt, u8 *status)
+ int num_mtt)
{
return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
}
-int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status)
+int mthca_SYNC_TPT(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status);
+ return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B);
}
int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
- int eq_num, u8 *status)
+ int eq_num)
{
mthca_dbg(dev, "%s mask %016llx for eqn %d\n",
unmap ? "Clearing" : "Setting",
(unsigned long long) event_mask, eq_num);
return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
- 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status);
+ 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
}
int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int eq_num, u8 *status)
+ int eq_num)
{
return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int eq_num, u8 *status)
+ int eq_num)
{
return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
CMD_HW2SW_EQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int cq_num, u8 *status)
+ int cq_num)
{
return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int cq_num, u8 *status)
+ int cq_num)
{
return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
CMD_HW2SW_CQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
-int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size,
- u8 *status)
+int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size)
{
struct mthca_mailbox *mailbox;
__be32 *inbox;
@@ -1657,44 +1687,43 @@ int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size,
MTHCA_PUT(inbox, lkey, RESIZE_CQ_LKEY_OFFSET);
err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
mthca_free_mailbox(dev, mailbox);
return err;
}
int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int srq_num, u8 *status)
+ int srq_num)
{
return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int srq_num, u8 *status)
+ int srq_num)
{
return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
CMD_HW2SW_SRQ,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
- struct mthca_mailbox *mailbox, u8 *status)
+ struct mthca_mailbox *mailbox)
{
return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
- CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status);
+ CMD_QUERY_SRQ, CMD_TIME_CLASS_A);
}
-int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status)
+int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit)
{
return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
}
int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
enum ib_qp_state next, u32 num, int is_ee,
- struct mthca_mailbox *mailbox, u32 optmask,
- u8 *status)
+ struct mthca_mailbox *mailbox, u32 optmask)
{
static const u16 op[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
[IB_QPS_RESET] = {
@@ -1755,7 +1784,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
(!!is_ee << 24) | num, op_mod,
- op[cur][next], CMD_TIME_CLASS_C, status);
+ op[cur][next], CMD_TIME_CLASS_C);
if (0 && mailbox) {
int i;
@@ -1789,21 +1818,20 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
}
err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
- op_mod, op[cur][next], CMD_TIME_CLASS_C, status);
+ op_mod, op[cur][next], CMD_TIME_CLASS_C);
}
return err;
}
int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
- struct mthca_mailbox *mailbox, u8 *status)
+ struct mthca_mailbox *mailbox)
{
return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
- CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status);
+ CMD_QUERY_QPEE, CMD_TIME_CLASS_A);
}
-int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
- u8 *status)
+int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
{
u8 op_mod;
@@ -1825,12 +1853,12 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
}
return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP,
- CMD_TIME_CLASS_B, status);
+ CMD_TIME_CLASS_B);
}
int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
- void *in_mad, void *response_mad, u8 *status)
+ void *in_mad, void *response_mad)
{
struct mthca_mailbox *inmailbox, *outmailbox;
void *inbox;
@@ -1897,9 +1925,9 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
in_modifier, op_modifier,
- CMD_MAD_IFC, CMD_TIME_CLASS_C, status);
+ CMD_MAD_IFC, CMD_TIME_CLASS_C);
- if (!err && !*status)
+ if (!err)
memcpy(response_mad, outmailbox->buf, 256);
mthca_free_mailbox(dev, inmailbox);
@@ -1908,33 +1936,33 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
}
int mthca_READ_MGM(struct mthca_dev *dev, int index,
- struct mthca_mailbox *mailbox, u8 *status)
+ struct mthca_mailbox *mailbox)
{
return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
- CMD_READ_MGM, CMD_TIME_CLASS_A, status);
+ CMD_READ_MGM, CMD_TIME_CLASS_A);
}
int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
- struct mthca_mailbox *mailbox, u8 *status)
+ struct mthca_mailbox *mailbox)
{
return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
}
int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- u16 *hash, u8 *status)
+ u16 *hash)
{
u64 imm;
int err;
err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
- CMD_TIME_CLASS_A, status);
+ CMD_TIME_CLASS_A);
*hash = imm;
return err;
}
-int mthca_NOP(struct mthca_dev *dev, u8 *status)
+int mthca_NOP(struct mthca_dev *dev)
{
- return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100), status);
+ return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100));
}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 6efd3265f24..f952244c54d 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -252,79 +252,74 @@ struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
gfp_t gfp_mask);
void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
-int mthca_SYS_EN(struct mthca_dev *dev, u8 *status);
-int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status);
-int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
-int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status);
-int mthca_RUN_FW(struct mthca_dev *dev, u8 *status);
-int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status);
-int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status);
-int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status);
-int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status);
+int mthca_SYS_EN(struct mthca_dev *dev);
+int mthca_SYS_DIS(struct mthca_dev *dev);
+int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm);
+int mthca_UNMAP_FA(struct mthca_dev *dev);
+int mthca_RUN_FW(struct mthca_dev *dev);
+int mthca_QUERY_FW(struct mthca_dev *dev);
+int mthca_ENABLE_LAM(struct mthca_dev *dev);
+int mthca_DISABLE_LAM(struct mthca_dev *dev);
+int mthca_QUERY_DDR(struct mthca_dev *dev);
int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
- struct mthca_dev_lim *dev_lim, u8 *status);
+ struct mthca_dev_lim *dev_lim);
int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
- struct mthca_adapter *adapter, u8 *status);
+ struct mthca_adapter *adapter);
int mthca_INIT_HCA(struct mthca_dev *dev,
- struct mthca_init_hca_param *param,
- u8 *status);
+ struct mthca_init_hca_param *param);
int mthca_INIT_IB(struct mthca_dev *dev,
struct mthca_init_ib_param *param,
- int port, u8 *status);
-int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status);
-int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status);
+ int port);
+int mthca_CLOSE_IB(struct mthca_dev *dev, int port);
+int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic);
int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
- int port, u8 *status);
-int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status);
-int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status);
-int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status);
-int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status);
-int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status);
-int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
- u8 *status);
+ int port);
+int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt);
+int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt);
+int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count);
+int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm);
+int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev);
+int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages);
int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int mpt_index, u8 *status);
+ int mpt_index);
int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int mpt_index, u8 *status);
+ int mpt_index);
int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int num_mtt, u8 *status);
-int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status);
+ int num_mtt);
+int mthca_SYNC_TPT(struct mthca_dev *dev);
int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
- int eq_num, u8 *status);
+ int eq_num);
int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int eq_num, u8 *status);
+ int eq_num);
int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int eq_num, u8 *status);
+ int eq_num);
int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int cq_num, u8 *status);
+ int cq_num);
int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int cq_num, u8 *status);
-int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size,
- u8 *status);
+ int cq_num);
+int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size);
int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int srq_num, u8 *status);
+ int srq_num);
int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- int srq_num, u8 *status);
+ int srq_num);
int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
- struct mthca_mailbox *mailbox, u8 *status);
-int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status);
+ struct mthca_mailbox *mailbox);
+int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit);
int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
enum ib_qp_state next, u32 num, int is_ee,
- struct mthca_mailbox *mailbox, u32 optmask,
- u8 *status);
+ struct mthca_mailbox *mailbox, u32 optmask);
int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
- struct mthca_mailbox *mailbox, u8 *status);
-int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
- u8 *status);
+ struct mthca_mailbox *mailbox);
+int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn);
int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
- void *in_mad, void *response_mad, u8 *status);
+ void *in_mad, void *response_mad);
int mthca_READ_MGM(struct mthca_dev *dev, int index,
- struct mthca_mailbox *mailbox, u8 *status);
+ struct mthca_mailbox *mailbox);
int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
- struct mthca_mailbox *mailbox, u8 *status);
+ struct mthca_mailbox *mailbox);
int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
- u16 *hash, u8 *status);
-int mthca_NOP(struct mthca_dev *dev, u8 *status);
+ u16 *hash);
+int mthca_NOP(struct mthca_dev *dev);
#endif /* MTHCA_CMD_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 18ee3fa4b88..53157b86a1b 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -779,7 +779,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
struct mthca_mailbox *mailbox;
struct mthca_cq_context *cq_context;
int err = -ENOMEM;
- u8 status;
cq->ibcq.cqe = nent - 1;
cq->is_kernel = !ctx;
@@ -847,19 +846,12 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
cq_context->state_db = cpu_to_be32(cq->arm_db_index);
}
- err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status);
+ err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn);
if (err) {
mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
goto err_out_free_mr;
}
- if (status) {
- mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto err_out_free_mr;
- }
-
spin_lock_irq(&dev->cq_table.lock);
if (mthca_array_set(&dev->cq_table.cq,
cq->cqn & (dev->limits.num_cqs - 1),
@@ -915,7 +907,6 @@ void mthca_free_cq(struct mthca_dev *dev,
{
struct mthca_mailbox *mailbox;
int err;
- u8 status;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
@@ -923,11 +914,9 @@ void mthca_free_cq(struct mthca_dev *dev,
return;
}
- err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status);
+ err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn);
if (err)
mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
- else if (status)
- mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
if (0) {
__be32 *ctx = mailbox->buf;
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 76785c653c1..7c9d35f39d7 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -474,7 +474,6 @@ static int mthca_create_eq(struct mthca_dev *dev,
struct mthca_eq_context *eq_context;
int err = -ENOMEM;
int i;
- u8 status;
eq->dev = dev;
eq->nent = roundup_pow_of_two(max(nent, 2));
@@ -543,15 +542,9 @@ static int mthca_create_eq(struct mthca_dev *dev,
eq_context->intr = intr;
eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
- err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status);
+ err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn);
if (err) {
- mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err);
- goto err_out_free_mr;
- }
- if (status) {
- mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",
- status);
- err = -EINVAL;
+ mthca_warn(dev, "SW2HW_EQ returned %d\n", err);
goto err_out_free_mr;
}
@@ -597,7 +590,6 @@ static void mthca_free_eq(struct mthca_dev *dev,
{
struct mthca_mailbox *mailbox;
int err;
- u8 status;
int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
PAGE_SIZE;
int i;
@@ -606,11 +598,9 @@ static void mthca_free_eq(struct mthca_dev *dev,
if (IS_ERR(mailbox))
return;
- err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status);
+ err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn);
if (err)
- mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err);
- if (status)
- mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);
+ mthca_warn(dev, "HW2SW_EQ returned %d\n", err);
dev->eq_table.arm_mask &= ~eq->eqn_mask;
@@ -738,7 +728,6 @@ static void mthca_unmap_eq_regs(struct mthca_dev *dev)
int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
{
int ret;
- u8 status;
/*
* We assume that mapping one page is enough for the whole EQ
@@ -757,9 +746,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
return -ENOMEM;
}
- ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status);
- if (!ret && status)
- ret = -EINVAL;
+ ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
if (ret) {
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
@@ -771,9 +758,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
void mthca_unmap_eq_icm(struct mthca_dev *dev)
{
- u8 status;
-
- mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
+ mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
__free_page(dev->eq_table.icm_page);
@@ -782,7 +767,6 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev)
int mthca_init_eq_table(struct mthca_dev *dev)
{
int err;
- u8 status;
u8 intr;
int i;
@@ -864,22 +848,16 @@ int mthca_init_eq_table(struct mthca_dev *dev)
}
err = mthca_MAP_EQ(dev, async_mask(dev),
- 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
+ 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
if (err)
mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
- if (status)
- mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",
- dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);
err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
- 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
+ 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
if (err)
mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
- if (status)
- mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
- dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
if (mthca_is_memfree(dev))
@@ -909,15 +887,14 @@ err_out_free:
void mthca_cleanup_eq_table(struct mthca_dev *dev)
{
- u8 status;
int i;
mthca_free_irqs(dev);
mthca_MAP_EQ(dev, async_mask(dev),
- 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status);
+ 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
- 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status);
+ 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
for (i = 0; i < MTHCA_NUM_EQ; ++i)
mthca_free_eq(dev, &dev->eq_table.eq[i]);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 03a59534f59..b6f7f457fc5 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -201,7 +201,6 @@ int mthca_process_mad(struct ib_device *ibdev,
struct ib_mad *out_mad)
{
int err;
- u8 status;
u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
u16 prev_lid = 0;
struct ib_port_attr pattr;
@@ -252,17 +251,11 @@ int mthca_process_mad(struct ib_device *ibdev,
err = mthca_MAD_IFC(to_mdev(ibdev),
mad_flags & IB_MAD_IGNORE_MKEY,
mad_flags & IB_MAD_IGNORE_BKEY,
- port_num, in_wc, in_grh, in_mad, out_mad,
- &status);
- if (err) {
- mthca_err(to_mdev(ibdev), "MAD_IFC failed\n");
- return IB_MAD_RESULT_FAILURE;
- }
- if (status == MTHCA_CMD_STAT_BAD_PKT)
+ port_num, in_wc, in_grh, in_mad, out_mad);
+ if (err == -EBADMSG)
return IB_MAD_RESULT_SUCCESS;
- if (status) {
- mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n",
- status);
+ else if (err) {
+ mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err);
return IB_MAD_RESULT_FAILURE;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index f24b79b805f..aa12a533ae9 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -149,7 +149,7 @@ static int mthca_tune_pci(struct mthca_dev *mdev)
} else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
- if (pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP)) {
+ if (pci_is_pcie(mdev->pdev)) {
if (pcie_set_readrq(mdev->pdev, 4096)) {
mthca_err(mdev, "Couldn't write PCI Express read request, "
"aborting.\n");
@@ -165,19 +165,14 @@ static int mthca_tune_pci(struct mthca_dev *mdev)
static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
{
int err;
- u8 status;
mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
- err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
+ err = mthca_QUERY_DEV_LIM(mdev, dev_lim);
if (err) {
- mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
+ mthca_err(mdev, "QUERY_DEV_LIM command returned %d"
+ ", aborting.\n", err);
return err;
}
- if (status) {
- mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
- "aborting.\n", status);
- return -EINVAL;
- }
if (dev_lim->min_page_sz > PAGE_SIZE) {
mthca_err(mdev, "HCA minimum page size of %d bigger than "
"kernel PAGE_SIZE of %ld, aborting.\n",
@@ -293,49 +288,32 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
static int mthca_init_tavor(struct mthca_dev *mdev)
{
s64 size;
- u8 status;
int err;
struct mthca_dev_lim dev_lim;
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
- err = mthca_SYS_EN(mdev, &status);
+ err = mthca_SYS_EN(mdev);
if (err) {
- mthca_err(mdev, "SYS_EN command failed, aborting.\n");
+ mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err);
return err;
}
- if (status) {
- mthca_err(mdev, "SYS_EN returned status 0x%02x, "
- "aborting.\n", status);
- return -EINVAL;
- }
- err = mthca_QUERY_FW(mdev, &status);
+ err = mthca_QUERY_FW(mdev);
if (err) {
- mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
- goto err_disable;
- }
- if (status) {
- mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "QUERY_FW command returned %d,"
+ " aborting.\n", err);
goto err_disable;
}
- err = mthca_QUERY_DDR(mdev, &status);
+ err = mthca_QUERY_DDR(mdev);
if (err) {
- mthca_err(mdev, "QUERY_DDR command failed, aborting.\n");
- goto err_disable;
- }
- if (status) {
- mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err);
goto err_disable;
}
err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
- mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
+ mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err);
goto err_disable;
}
@@ -351,29 +329,22 @@ static int mthca_init_tavor(struct mthca_dev *mdev)
goto err_disable;
}
- err = mthca_INIT_HCA(mdev, &init_hca, &status);
+ err = mthca_INIT_HCA(mdev, &init_hca);
if (err) {
- mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
- goto err_disable;
- }
- if (status) {
- mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
goto err_disable;
}
return 0;
err_disable:
- mthca_SYS_DIS(mdev, &status);
+ mthca_SYS_DIS(mdev);
return err;
}
static int mthca_load_fw(struct mthca_dev *mdev)
{
- u8 status;
int err;
/* FIXME: use HCA-attached memory for FW if present */
@@ -386,31 +357,21 @@ static int mthca_load_fw(struct mthca_dev *mdev)
return -ENOMEM;
}
- err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status);
+ err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm);
if (err) {
- mthca_err(mdev, "MAP_FA command failed, aborting.\n");
- goto err_free;
- }
- if (status) {
- mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err);
goto err_free;
}
- err = mthca_RUN_FW(mdev, &status);
+ err = mthca_RUN_FW(mdev);
if (err) {
- mthca_err(mdev, "RUN_FW command failed, aborting.\n");
- goto err_unmap_fa;
- }
- if (status) {
- mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err);
goto err_unmap_fa;
}
return 0;
err_unmap_fa:
- mthca_UNMAP_FA(mdev, &status);
+ mthca_UNMAP_FA(mdev);
err_free:
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
@@ -423,19 +384,13 @@ static int mthca_init_icm(struct mthca_dev *mdev,
u64 icm_size)
{
u64 aux_pages;
- u8 status;
int err;
- err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status);
+ err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
if (err) {
- mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n");
+ mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err);
return err;
}
- if (status) {
- mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
- "aborting.\n", status);
- return -EINVAL;
- }
mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
(unsigned long long) icm_size >> 10,
@@ -448,14 +403,9 @@ static int mthca_init_icm(struct mthca_dev *mdev,
return -ENOMEM;
}
- err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status);
+ err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm);
if (err) {
- mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n");
- goto err_free_aux;
- }
- if (status) {
- mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err);
goto err_free_aux;
}
@@ -596,7 +546,7 @@ err_unmap_eq:
mthca_unmap_eq_icm(mdev);
err_unmap_aux:
- mthca_UNMAP_ICM_AUX(mdev, &status);
+ mthca_UNMAP_ICM_AUX(mdev);
err_free_aux:
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
@@ -606,7 +556,6 @@ err_free_aux:
static void mthca_free_icms(struct mthca_dev *mdev)
{
- u8 status;
mthca_free_icm_table(mdev, mdev->mcg_table.table);
if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
@@ -619,7 +568,7 @@ static void mthca_free_icms(struct mthca_dev *mdev)
mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
mthca_unmap_eq_icm(mdev);
- mthca_UNMAP_ICM_AUX(mdev, &status);
+ mthca_UNMAP_ICM_AUX(mdev);
mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
}
@@ -629,43 +578,32 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
struct mthca_profile profile;
struct mthca_init_hca_param init_hca;
s64 icm_size;
- u8 status;
int err;
- err = mthca_QUERY_FW(mdev, &status);
+ err = mthca_QUERY_FW(mdev);
if (err) {
- mthca_err(mdev, "QUERY_FW command failed, aborting.\n");
+ mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err);
return err;
}
- if (status) {
- mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
- "aborting.\n", status);
- return -EINVAL;
- }
- err = mthca_ENABLE_LAM(mdev, &status);
- if (err) {
- mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
- return err;
- }
- if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
+ err = mthca_ENABLE_LAM(mdev);
+ if (err == -EAGAIN) {
mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
- } else if (status) {
- mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, "
- "aborting.\n", status);
- return -EINVAL;
+ } else if (err) {
+ mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err);
+ return err;
}
err = mthca_load_fw(mdev);
if (err) {
- mthca_err(mdev, "Failed to start FW, aborting.\n");
+ mthca_err(mdev, "Loading FW returned %d, aborting.\n", err);
goto err_disable;
}
err = mthca_dev_lim(mdev, &dev_lim);
if (err) {
- mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
+ mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err);
goto err_stop_fw;
}
@@ -685,15 +623,9 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
if (err)
goto err_stop_fw;
- err = mthca_INIT_HCA(mdev, &init_hca, &status);
+ err = mthca_INIT_HCA(mdev, &init_hca);
if (err) {
- mthca_err(mdev, "INIT_HCA command failed, aborting.\n");
- goto err_free_icm;
- }
- if (status) {
- mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
goto err_free_icm;
}
@@ -703,37 +635,34 @@ err_free_icm:
mthca_free_icms(mdev);
err_stop_fw:
- mthca_UNMAP_FA(mdev, &status);
+ mthca_UNMAP_FA(mdev);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
err_disable:
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
- mthca_DISABLE_LAM(mdev, &status);
+ mthca_DISABLE_LAM(mdev);
return err;
}
static void mthca_close_hca(struct mthca_dev *mdev)
{
- u8 status;
-
- mthca_CLOSE_HCA(mdev, 0, &status);
+ mthca_CLOSE_HCA(mdev, 0);
if (mthca_is_memfree(mdev)) {
mthca_free_icms(mdev);
- mthca_UNMAP_FA(mdev, &status);
+ mthca_UNMAP_FA(mdev);
mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
- mthca_DISABLE_LAM(mdev, &status);
+ mthca_DISABLE_LAM(mdev);
} else
- mthca_SYS_DIS(mdev, &status);
+ mthca_SYS_DIS(mdev);
}
static int mthca_init_hca(struct mthca_dev *mdev)
{
- u8 status;
int err;
struct mthca_adapter adapter;
@@ -745,15 +674,9 @@ static int mthca_init_hca(struct mthca_dev *mdev)
if (err)
return err;
- err = mthca_QUERY_ADAPTER(mdev, &adapter, &status);
+ err = mthca_QUERY_ADAPTER(mdev, &adapter);
if (err) {
- mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n");
- goto err_close;
- }
- if (status) {
- mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
- "aborting.\n", status);
- err = -EINVAL;
+ mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err);
goto err_close;
}
@@ -772,7 +695,6 @@ err_close:
static int mthca_setup_hca(struct mthca_dev *dev)
{
int err;
- u8 status;
MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
@@ -833,8 +755,8 @@ static int mthca_setup_hca(struct mthca_dev *dev)
goto err_eq_table_free;
}
- err = mthca_NOP(dev, &status);
- if (err || status) {
+ err = mthca_NOP(dev);
+ if (err) {
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
mthca_warn(dev, "NOP command failed to generate interrupt "
"(IRQ %d).\n",
@@ -1166,7 +1088,6 @@ err_disable_pdev:
static void __mthca_remove_one(struct pci_dev *pdev)
{
struct mthca_dev *mdev = pci_get_drvdata(pdev);
- u8 status;
int p;
if (mdev) {
@@ -1174,7 +1095,7 @@ static void __mthca_remove_one(struct pci_dev *pdev)
mthca_unregister_device(mdev);
for (p = 1; p <= mdev->limits.num_ports; ++p)
- mthca_CLOSE_IB(mdev, p, &status);
+ mthca_CLOSE_IB(mdev, p);
mthca_cleanup_mcg_table(mdev);
mthca_cleanup_av_table(mdev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 515790a606e..6304ae8f4a6 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -68,7 +68,6 @@ static int find_mgm(struct mthca_dev *dev,
struct mthca_mgm *mgm = mgm_mailbox->buf;
u8 *mgid;
int err;
- u8 status;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
@@ -77,12 +76,9 @@ static int find_mgm(struct mthca_dev *dev,
memcpy(mgid, gid, 16);
- err = mthca_MGID_HASH(dev, mailbox, hash, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "MGID_HASH returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_MGID_HASH(dev, mailbox, hash);
+ if (err) {
+ mthca_err(dev, "MGID_HASH failed (%d)\n", err);
goto out;
}
@@ -93,12 +89,9 @@ static int find_mgm(struct mthca_dev *dev,
*prev = -1;
do {
- err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "READ_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_READ_MGM(dev, *index, mgm_mailbox);
+ if (err) {
+ mthca_err(dev, "READ_MGM failed (%d)\n", err);
goto out;
}
@@ -134,7 +127,6 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int link = 0;
int i;
int err;
- u8 status;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
@@ -160,12 +152,9 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out;
}
- err = mthca_READ_MGM(dev, index, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "READ_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_READ_MGM(dev, index, mailbox);
+ if (err) {
+ mthca_err(dev, "READ_MGM failed (%d)\n", err);
goto out;
}
memset(mgm, 0, sizeof *mgm);
@@ -189,11 +178,9 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
goto out;
}
- err = mthca_WRITE_MGM(dev, index, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
+ err = mthca_WRITE_MGM(dev, index, mailbox);
+ if (err) {
+ mthca_err(dev, "WRITE_MGM failed %d\n", err);
err = -EINVAL;
goto out;
}
@@ -201,24 +188,17 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
if (!link)
goto out;
- err = mthca_READ_MGM(dev, prev, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "READ_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_READ_MGM(dev, prev, mailbox);
+ if (err) {
+ mthca_err(dev, "READ_MGM failed %d\n", err);
goto out;
}
mgm->next_gid_index = cpu_to_be32(index << 6);
- err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
+ err = mthca_WRITE_MGM(dev, prev, mailbox);
if (err)
- goto out;
- if (status) {
- mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
- err = -EINVAL;
- }
+ mthca_err(dev, "WRITE_MGM returned %d\n", err);
out:
if (err && link && index != -1) {
@@ -240,7 +220,6 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int prev, index;
int i, loc;
int err;
- u8 status;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
@@ -275,12 +254,9 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
mgm->qp[loc] = mgm->qp[i - 1];
mgm->qp[i - 1] = 0;
- err = mthca_WRITE_MGM(dev, index, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_WRITE_MGM(dev, index, mailbox);
+ if (err) {
+ mthca_err(dev, "WRITE_MGM returned %d\n", err);
goto out;
}
@@ -292,24 +268,17 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6;
if (amgm_index_to_free) {
err = mthca_READ_MGM(dev, amgm_index_to_free,
- mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "READ_MGM returned status %02x\n",
- status);
- err = -EINVAL;
+ mailbox);
+ if (err) {
+ mthca_err(dev, "READ_MGM returned %d\n", err);
goto out;
}
} else
memset(mgm->gid, 0, 16);
- err = mthca_WRITE_MGM(dev, index, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_WRITE_MGM(dev, index, mailbox);
+ if (err) {
+ mthca_err(dev, "WRITE_MGM returned %d\n", err);
goto out;
}
if (amgm_index_to_free) {
@@ -319,23 +288,17 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
} else {
/* Remove entry from AMGM */
int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
- err = mthca_READ_MGM(dev, prev, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "READ_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_READ_MGM(dev, prev, mailbox);
+ if (err) {
+ mthca_err(dev, "READ_MGM returned %d\n", err);
goto out;
}
mgm->next_gid_index = cpu_to_be32(curr_next_index << 6);
- err = mthca_WRITE_MGM(dev, prev, mailbox, &status);
- if (err)
- goto out;
- if (status) {
- mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_WRITE_MGM(dev, prev, mailbox);
+ if (err) {
+ mthca_err(dev, "WRITE_MGM returned %d\n", err);
goto out;
}
BUG_ON(index < dev->limits.num_mgms);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 8c2a83732b5..7d2e42dd692 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -223,7 +223,6 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
{
int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
int ret = 0;
- u8 status;
mutex_lock(&table->mutex);
@@ -240,8 +239,8 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
goto out;
}
- if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
- &status) || status) {
+ if (mthca_MAP_ICM(dev, table->icm[i],
+ table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
mthca_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
ret = -ENOMEM;
@@ -258,7 +257,6 @@ out:
void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
{
int i;
- u8 status;
if (!mthca_is_memfree(dev))
return;
@@ -269,8 +267,7 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
if (--table->icm[i]->refcount == 0) {
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
- MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
- &status);
+ MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
mthca_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
}
@@ -366,7 +363,6 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
int num_icm;
unsigned chunk_size;
int i;
- u8 status;
obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
@@ -396,8 +392,8 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
__GFP_NOWARN, use_coherent);
if (!table->icm[i])
goto err;
- if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
- &status) || status) {
+ if (mthca_MAP_ICM(dev, table->icm[i],
+ virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
mthca_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
goto err;
@@ -416,8 +412,7 @@ err:
for (i = 0; i < num_icm; ++i)
if (table->icm[i]) {
mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
- MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
- &status);
+ MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
mthca_free_icm(dev, table->icm[i], table->coherent);
}
@@ -429,13 +424,12 @@ err:
void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
{
int i;
- u8 status;
for (i = 0; i < table->num_icm; ++i)
if (table->icm[i]) {
- mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
- MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
- &status);
+ mthca_UNMAP_ICM(dev,
+ table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
+ MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
mthca_free_icm(dev, table->icm[i], table->coherent);
}
@@ -454,7 +448,6 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
{
struct page *pages[1];
int ret = 0;
- u8 status;
int i;
if (!mthca_is_memfree(dev))
@@ -494,9 +487,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
}
ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
- mthca_uarc_virt(dev, uar, i), &status);
- if (!ret && status)
- ret = -EINVAL;
+ mthca_uarc_virt(dev, uar, i));
if (ret) {
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
put_page(sg_page(&db_tab->page[i].mem));
@@ -557,14 +548,13 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
struct mthca_user_db_table *db_tab)
{
int i;
- u8 status;
if (!mthca_is_memfree(dev))
return;
for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
if (db_tab->page[i].uvirt) {
- mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
+ mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
put_page(sg_page(&db_tab->page[i].mem));
}
@@ -581,7 +571,6 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
int i, j;
struct mthca_db_page *page;
int ret = 0;
- u8 status;
mutex_lock(&dev->db_tab->mutex);
@@ -644,9 +633,7 @@ alloc:
memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
ret = mthca_MAP_ICM_page(dev, page->mapping,
- mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
- if (!ret && status)
- ret = -EINVAL;
+ mthca_uarc_virt(dev, &dev->driver_uar, i));
if (ret) {
dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
page->db_rec, page->mapping);
@@ -678,7 +665,6 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
{
int i, j;
struct mthca_db_page *page;
- u8 status;
i = db_index / MTHCA_DB_REC_PER_PAGE;
j = db_index % MTHCA_DB_REC_PER_PAGE;
@@ -694,7 +680,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
i >= dev->db_tab->max_group1 - 1) {
- mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
+ mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
page->db_rec, page->mapping);
@@ -745,7 +731,6 @@ int mthca_init_db_tab(struct mthca_dev *dev)
void mthca_cleanup_db_tab(struct mthca_dev *dev)
{
int i;
- u8 status;
if (!mthca_is_memfree(dev))
return;
@@ -763,7 +748,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
- mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
+ mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
dev->db_tab->page[i].db_rec,
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 44045c8846d..ab876f928a1 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -257,7 +257,6 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
struct mthca_mailbox *mailbox;
__be64 *mtt_entry;
int err = 0;
- u8 status;
int i;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -281,17 +280,11 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
if (i & 1)
mtt_entry[i + 2] = 0;
- err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
+ err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1);
if (err) {
mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
goto out;
}
- if (status) {
- mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto out;
- }
list_len -= i;
start_index += i;
@@ -441,7 +434,6 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
u32 key;
int i;
int err;
- u8 status;
WARN_ON(buffer_size_shift >= 32);
@@ -497,16 +489,10 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
}
err = mthca_SW2HW_MPT(dev, mailbox,
- key & (dev->limits.num_mpts - 1),
- &status);
+ key & (dev->limits.num_mpts - 1));
if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_out_mailbox;
- } else if (status) {
- mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto err_out_mailbox;
}
mthca_free_mailbox(dev, mailbox);
@@ -567,17 +553,12 @@ static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
{
int err;
- u8 status;
err = mthca_HW2SW_MPT(dev, NULL,
key_to_hw_index(dev, mr->ibmr.lkey) &
- (dev->limits.num_mpts - 1),
- &status);
+ (dev->limits.num_mpts - 1));
if (err)
mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
- else if (status)
- mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
- status);
mthca_free_region(dev, mr->ibmr.lkey);
mthca_free_mtt(dev, mr->mtt);
@@ -590,7 +571,6 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
struct mthca_mailbox *mailbox;
u64 mtt_seg;
u32 key, idx;
- u8 status;
int list_len = mr->attr.max_pages;
int err = -ENOMEM;
int i;
@@ -672,18 +652,11 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
}
err = mthca_SW2HW_MPT(dev, mailbox,
- key & (dev->limits.num_mpts - 1),
- &status);
+ key & (dev->limits.num_mpts - 1));
if (err) {
mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_out_mailbox_free;
}
- if (status) {
- mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto err_out_mailbox_free;
- }
mthca_free_mailbox(dev, mailbox);
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1e0b4b6074a..365fe0e1419 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -63,8 +63,6 @@ static int mthca_query_device(struct ib_device *ibdev,
int err = -ENOMEM;
struct mthca_dev *mdev = to_mdev(ibdev);
- u8 status;
-
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
if (!in_mad || !out_mad)
@@ -78,14 +76,9 @@ static int mthca_query_device(struct ib_device *ibdev,
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(mdev, 1, 1,
- 1, NULL, NULL, in_mad, out_mad,
- &status);
+ 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
props->device_cap_flags = mdev->device_cap_flags;
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
@@ -141,7 +134,6 @@ static int mthca_query_port(struct ib_device *ibdev,
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
- u8 status;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -155,14 +147,9 @@ static int mthca_query_port(struct ib_device *ibdev,
in_mad->attr_mod = cpu_to_be32(port);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
- port, NULL, NULL, in_mad, out_mad,
- &status);
+ port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
props->lmc = out_mad->data[34] & 0x7;
@@ -214,7 +201,6 @@ static int mthca_modify_port(struct ib_device *ibdev,
struct mthca_set_ib_param set_ib;
struct ib_port_attr attr;
int err;
- u8 status;
if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
return -ERESTARTSYS;
@@ -229,14 +215,9 @@ static int mthca_modify_port(struct ib_device *ibdev,
set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
~props->clr_port_cap_mask;
- err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status);
+ err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
-
out:
mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
return err;
@@ -248,7 +229,6 @@ static int mthca_query_pkey(struct ib_device *ibdev,
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
- u8 status;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -260,14 +240,9 @@ static int mthca_query_pkey(struct ib_device *ibdev,
in_mad->attr_mod = cpu_to_be32(index / 32);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
- port, NULL, NULL, in_mad, out_mad,
- &status);
+ port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
*pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
@@ -283,7 +258,6 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
- u8 status;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -295,14 +269,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
in_mad->attr_mod = cpu_to_be32(port);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
- port, NULL, NULL, in_mad, out_mad,
- &status);
+ port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
memcpy(gid->raw, out_mad->data + 8, 8);
@@ -311,14 +280,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
in_mad->attr_mod = cpu_to_be32(index / 8);
err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
- port, NULL, NULL, in_mad, out_mad,
- &status);
+ port, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
@@ -800,7 +764,6 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
struct mthca_cq *cq = to_mcq(ibcq);
struct mthca_resize_cq ucmd;
u32 lkey;
- u8 status;
int ret;
if (entries < 1 || entries > dev->limits.max_cqes)
@@ -827,9 +790,7 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
lkey = ucmd.lkey;
}
- ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status);
- if (status)
- ret = -EINVAL;
+ ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
if (ret) {
if (cq->resize_buf) {
@@ -1161,7 +1122,6 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
{
struct ib_fmr *fmr;
int err;
- u8 status;
struct mthca_dev *mdev = NULL;
list_for_each_entry(fmr, fmr_list, list) {
@@ -1182,12 +1142,8 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
list_for_each_entry(fmr, fmr_list, list)
mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
- err = mthca_SYNC_TPT(mdev, &status);
- if (err)
- return err;
- if (status)
- return -EINVAL;
- return 0;
+ err = mthca_SYNC_TPT(mdev);
+ return err;
}
static ssize_t show_rev(struct device *device, struct device_attribute *attr,
@@ -1253,7 +1209,6 @@ static int mthca_init_node_data(struct mthca_dev *dev)
struct ib_smp *in_mad = NULL;
struct ib_smp *out_mad = NULL;
int err = -ENOMEM;
- u8 status;
in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -1264,28 +1219,18 @@ static int mthca_init_node_data(struct mthca_dev *dev)
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mthca_MAD_IFC(dev, 1, 1,
- 1, NULL, NULL, in_mad, out_mad,
- &status);
+ 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(dev, 1, 1,
- 1, NULL, NULL, in_mad, out_mad,
- &status);
+ 1, NULL, NULL, in_mad, out_mad);
if (err)
goto out;
- if (status) {
- err = -EINVAL;
- goto out;
- }
if (mthca_is_memfree(dev))
dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index a34c9d38e82..9601049e14d 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -308,7 +308,6 @@ static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
static void init_port(struct mthca_dev *dev, int port)
{
int err;
- u8 status;
struct mthca_init_ib_param param;
memset(&param, 0, sizeof param);
@@ -319,11 +318,9 @@ static void init_port(struct mthca_dev *dev, int port)
param.gid_cap = dev->limits.gid_table_len;
param.pkey_cap = dev->limits.pkey_table_len;
- err = mthca_INIT_IB(dev, &param, port, &status);
+ err = mthca_INIT_IB(dev, &param, port);
if (err)
mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
- if (status)
- mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
}
static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
@@ -433,7 +430,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
struct mthca_qp_param *qp_param;
struct mthca_qp_context *context;
int mthca_state;
- u8 status;
mutex_lock(&qp->mutex);
@@ -448,12 +444,9 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
goto out;
}
- err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
- if (err)
- goto out_mailbox;
- if (status) {
- mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
- err = -EINVAL;
+ err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox);
+ if (err) {
+ mthca_warn(dev, "QUERY_QP failed (%d)\n", err);
goto out_mailbox;
}
@@ -555,7 +548,6 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
struct mthca_qp_param *qp_param;
struct mthca_qp_context *qp_context;
u32 sqd_event = 0;
- u8 status;
int err = -EINVAL;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -781,13 +773,10 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
sqd_event = 1 << 31;
err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
- mailbox, sqd_event, &status);
- if (err)
- goto out_mailbox;
- if (status) {
- mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
- cur_state, new_state, status);
- err = -EINVAL;
+ mailbox, sqd_event);
+ if (err) {
+ mthca_warn(dev, "modify QP %d->%d returned %d.\n",
+ cur_state, new_state, err);
goto out_mailbox;
}
@@ -817,7 +806,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
cur_state != IB_QPS_ERR &&
(new_state == IB_QPS_RESET ||
new_state == IB_QPS_ERR))
- mthca_CLOSE_IB(dev, qp->port, &status);
+ mthca_CLOSE_IB(dev, qp->port);
}
/*
@@ -1429,7 +1418,6 @@ static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
void mthca_free_qp(struct mthca_dev *dev,
struct mthca_qp *qp)
{
- u8 status;
struct mthca_cq *send_cq;
struct mthca_cq *recv_cq;
@@ -1454,7 +1442,7 @@ void mthca_free_qp(struct mthca_dev *dev,
if (qp->state != IB_QPS_RESET)
mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
- NULL, 0, &status);
+ NULL, 0);
/*
* If this is a userspace QP, the buffers, MR, CQs and so on
@@ -2263,7 +2251,6 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
int mthca_init_qp_table(struct mthca_dev *dev)
{
int err;
- u8 status;
int i;
spin_lock_init(&dev->qp_table.lock);
@@ -2290,15 +2277,10 @@ int mthca_init_qp_table(struct mthca_dev *dev)
for (i = 0; i < 2; ++i) {
err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
- dev->qp_table.sqp_start + i * 2,
- &status);
- if (err)
- goto err_out;
- if (status) {
+ dev->qp_table.sqp_start + i * 2);
+ if (err) {
mthca_warn(dev, "CONF_SPECIAL_QP returned "
- "status %02x, aborting.\n",
- status);
- err = -EINVAL;
+ "%d, aborting.\n", err);
goto err_out;
}
}
@@ -2306,7 +2288,7 @@ int mthca_init_qp_table(struct mthca_dev *dev)
err_out:
for (i = 0; i < 2; ++i)
- mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
+ mthca_CONF_SPECIAL_QP(dev, i, 0);
mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
mthca_alloc_cleanup(&dev->qp_table.alloc);
@@ -2317,10 +2299,9 @@ int mthca_init_qp_table(struct mthca_dev *dev)
void mthca_cleanup_qp_table(struct mthca_dev *dev)
{
int i;
- u8 status;
for (i = 0; i < 2; ++i)
- mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
+ mthca_CONF_SPECIAL_QP(dev, i, 0);
mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
mthca_alloc_cleanup(&dev->qp_table.alloc);
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 2a13a163d33..4fa3534ec23 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -113,7 +113,7 @@ int mthca_reset(struct mthca_dev *mdev)
}
hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
- hca_pcie_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP);
+ hca_pcie_cap = pci_pcie_cap(mdev->pdev);
if (bridge) {
bridge_header = kmalloc(256, GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 4fabe62aab8..d22f970480c 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -200,7 +200,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
struct ib_srq_attr *attr, struct mthca_srq *srq)
{
struct mthca_mailbox *mailbox;
- u8 status;
int ds;
int err;
@@ -266,18 +265,12 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
else
mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
- err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status);
+ err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
if (err) {
mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
goto err_out_free_buf;
}
- if (status) {
- mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
- status);
- err = -EINVAL;
- goto err_out_free_buf;
- }
spin_lock_irq(&dev->srq_table.lock);
if (mthca_array_set(&dev->srq_table.srq,
@@ -299,11 +292,9 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
return 0;
err_out_free_srq:
- err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
+ err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
if (err)
mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
- else if (status)
- mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
err_out_free_buf:
if (!pd->ibpd.uobject)
@@ -340,7 +331,6 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
{
struct mthca_mailbox *mailbox;
int err;
- u8 status;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
@@ -348,11 +338,9 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
return;
}
- err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status);
+ err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
if (err)
mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
- else if (status)
- mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
spin_lock_irq(&dev->srq_table.lock);
mthca_array_clear(&dev->srq_table.srq,
@@ -378,8 +366,7 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
- int ret;
- u8 status;
+ int ret = 0;
/* We don't support resizing SRQs (yet?) */
if (attr_mask & IB_SRQ_MAX_WR)
@@ -391,16 +378,11 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return -EINVAL;
mutex_lock(&srq->mutex);
- ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
+ ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
mutex_unlock(&srq->mutex);
-
- if (ret)
- return ret;
- if (status)
- return -EINVAL;
}
- return 0;
+ return ret;
}
int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
@@ -410,14 +392,13 @@ int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
struct mthca_mailbox *mailbox;
struct mthca_arbel_srq_context *arbel_ctx;
struct mthca_tavor_srq_context *tavor_ctx;
- u8 status;
int err;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
- err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status);
+ err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
if (err)
goto out;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 95ca93ceeda..9f2f7d4b119 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -605,16 +605,6 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
/**
- * nes_modify_port
- */
-static int nes_modify_port(struct ib_device *ibdev, u8 port,
- int port_modify_mask, struct ib_port_modify *props)
-{
- return 0;
-}
-
-
-/**
* nes_query_pkey
*/
static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
@@ -3882,7 +3872,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev;
nesibdev->ibdev.query_device = nes_query_device;
nesibdev->ibdev.query_port = nes_query_port;
- nesibdev->ibdev.modify_port = nes_modify_port;
nesibdev->ibdev.query_pkey = nes_query_pkey;
nesibdev->ibdev.query_gid = nes_query_gid;
nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 769a1d9da4b..c9624ea8720 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1012,6 +1012,8 @@ struct qib_devdata {
u8 psxmitwait_supported;
/* cycle length of PS* counters in HW (in picoseconds) */
u16 psxmitwait_check_rate;
+ /* high volume overflow errors defered to tasklet */
+ struct tasklet_struct error_tasklet;
};
/* hol_state values */
@@ -1433,6 +1435,7 @@ extern struct mutex qib_mutex;
struct qib_hwerror_msgs {
u64 mask;
const char *msg;
+ size_t sz;
};
#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 406fca50d03..26253039d2c 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1527,6 +1527,7 @@ done_chk_sdma:
struct qib_filedata *fd = fp->private_data;
const struct qib_ctxtdata *rcd = fd->rcd;
const struct qib_devdata *dd = rcd->dd;
+ unsigned int weight;
if (dd->flags & QIB_HAS_SEND_DMA) {
fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
@@ -1545,8 +1546,8 @@ done_chk_sdma:
* it just means that sooner or later we don't recommend
* a cpu, and let the scheduler do it's best.
*/
- if (!ret && cpus_weight(current->cpus_allowed) >=
- qib_cpulist_count) {
+ weight = cpumask_weight(tsk_cpus_allowed(current));
+ if (!ret && weight >= qib_cpulist_count) {
int cpu;
cpu = find_first_zero_bit(qib_cpulist,
qib_cpulist_count);
@@ -1554,13 +1555,13 @@ done_chk_sdma:
__set_bit(cpu, qib_cpulist);
fd->rec_cpu_num = cpu;
}
- } else if (cpus_weight(current->cpus_allowed) == 1 &&
- test_bit(first_cpu(current->cpus_allowed),
+ } else if (weight == 1 &&
+ test_bit(cpumask_first(tsk_cpus_allowed(current)),
qib_cpulist))
qib_devinfo(dd->pcidev, "%s PID %u affinity "
"set to cpu %d; already allocated\n",
current->comm, current->pid,
- first_cpu(current->cpus_allowed));
+ cpumask_first(tsk_cpus_allowed(current)));
}
mutex_unlock(&qib_mutex);
@@ -1904,8 +1905,9 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
struct qib_ctxtdata *rcd;
unsigned ctxt;
int ret = 0;
+ unsigned long flags;
- spin_lock(&ppd->dd->uctxt_lock);
+ spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
ctxt++) {
rcd = ppd->dd->rcd[ctxt];
@@ -1924,7 +1926,7 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
ret = 1;
break;
}
- spin_unlock(&ppd->dd->uctxt_lock);
+ spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
return ret;
}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index c765a2eb04c..e1f947446c2 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2434,6 +2434,7 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
int lsb, ret = 0, setforce = 0;
u16 lcmd, licmd;
unsigned long flags;
+ u32 tmp = 0;
switch (which) {
case QIB_IB_CFG_LIDLMC:
@@ -2467,9 +2468,6 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
maskr = IBA7220_IBC_WIDTH_MASK;
lsb = IBA7220_IBC_WIDTH_SHIFT;
setforce = 1;
- spin_lock_irqsave(&ppd->lflags_lock, flags);
- ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
- spin_unlock_irqrestore(&ppd->lflags_lock, flags);
break;
case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
@@ -2643,6 +2641,28 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
goto bail;
}
qib_set_ib_7220_lstate(ppd, lcmd, licmd);
+
+ maskr = IBA7220_IBC_WIDTH_MASK;
+ lsb = IBA7220_IBC_WIDTH_SHIFT;
+ tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr;
+ /* If the width active on the chip does not match the
+ * width in the shadow register, write the new active
+ * width to the chip.
+ * We don't have to worry about speed as the speed is taken
+ * care of by set_7220_ibspeed_fast called by ib_updown.
+ */
+ if (ppd->link_width_enabled-1 != tmp) {
+ ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
+ ppd->cpspec->ibcddrctrl |=
+ (((u64)(ppd->link_width_enabled-1) & maskr) <<
+ lsb);
+ qib_write_kreg(dd, kr_ibcddrctrl,
+ ppd->cpspec->ibcddrctrl);
+ qib_write_kreg(dd, kr_scratch, 0);
+ spin_lock_irqsave(&ppd->lflags_lock, flags);
+ ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
+ spin_unlock_irqrestore(&ppd->lflags_lock, flags);
+ }
goto bail;
case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 8ec5237031a..5ea9ece23b3 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -114,6 +114,10 @@ static ushort qib_singleport;
module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
+static ushort qib_krcvq01_no_msi;
+module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
+MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
+
/*
* Receive header queue sizes
*/
@@ -397,7 +401,6 @@ MODULE_PARM_DESC(txselect, \
#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
#define crp_txlenerr CREG_IDX(TxLenErrCnt)
-#define crp_txlenerr CREG_IDX(TxLenErrCnt)
#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
@@ -1107,9 +1110,9 @@ static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname , .sz = sizeof(#fldname) }
#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
- fldname##Mask##_##port), .msg = #fldname }
+ fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
@@ -1127,14 +1130,16 @@ static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
HWE_AUTO(statusValidNoEop),
HWE_AUTO(LATriggered),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname, .sz = sizeof(#fldname) }
#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname, .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
+ E_AUTO(RcvEgrFullErr),
+ E_AUTO(RcvHdrFullErr),
E_AUTO(ResetNegated),
E_AUTO(HardwareErr),
E_AUTO(InvalidAddrErr),
@@ -1147,9 +1152,7 @@ static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
E_AUTO(SendSpecialTriggerErr),
E_AUTO(SDmaWrongPortErr),
E_AUTO(SDmaBufMaskDuplicateErr),
- E_AUTO(RcvHdrFullErr),
- E_AUTO(RcvEgrFullErr),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
@@ -1159,7 +1162,8 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
/*
* SDmaHaltErr is not really an error, make it clearer;
*/
- {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"},
+ {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
+ .sz = 11},
E_P_AUTO(SDmaDescAddrMisalignErr),
E_P_AUTO(SDmaUnexpDataErr),
E_P_AUTO(SDmaMissingDwErr),
@@ -1195,7 +1199,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
E_P_AUTO(RcvICRCErr),
E_P_AUTO(RcvVCRCErr),
E_P_AUTO(RcvFormatErr),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
/*
@@ -1203,17 +1207,17 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
* context
*/
#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
- .msg = #fldname }
+ .msg = #fldname, .sz = sizeof(#fldname) }
/* Below generates "auto-message" for interrupts specific to a port */
#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_0), \
SYM_LSB(IntMask, fldname##Mask##_1)), \
- .msg = #fldname "_P" }
+ .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
/* For some reason, the SerDesTrimDone bits are reversed */
#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##Mask##_1), \
SYM_LSB(IntMask, fldname##Mask##_0)), \
- .msg = #fldname "_P" }
+ .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
/*
* Below generates "auto-message" for interrupts specific to a context,
* with ctxt-number appended
@@ -1221,7 +1225,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
SYM_LSB(IntMask, fldname##0IntMask), \
SYM_LSB(IntMask, fldname##17IntMask)), \
- .msg = #fldname "_C"}
+ .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
INTR_AUTO_P(SDmaInt),
@@ -1235,11 +1239,12 @@ static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
INTR_AUTO_P(SendDoneInt),
INTR_AUTO(SendBufAvailInt),
INTR_AUTO_C(RcvAvail),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
#define TXSYMPTOM_AUTO_P(fldname) \
- { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname }
+ { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
+ .msg = #fldname, .sz = sizeof(#fldname) }
static const struct qib_hwerror_msgs hdrchk_msgs[] = {
TXSYMPTOM_AUTO_P(NonKeyPacket),
TXSYMPTOM_AUTO_P(GRHFail),
@@ -1248,7 +1253,7 @@ static const struct qib_hwerror_msgs hdrchk_msgs[] = {
TXSYMPTOM_AUTO_P(SLIDFail),
TXSYMPTOM_AUTO_P(RawIPV6),
TXSYMPTOM_AUTO_P(PacketTooSmall),
- { .mask = 0 }
+ { .mask = 0, .sz = 0 }
};
#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
@@ -1293,7 +1298,7 @@ static void err_decode(char *msg, size_t len, u64 errs,
u64 these, lmask;
int took, multi, n = 0;
- while (msp && msp->mask) {
+ while (errs && msp && msp->mask) {
multi = (msp->mask & (msp->mask - 1));
while (errs & msp->mask) {
these = (errs & msp->mask);
@@ -1304,9 +1309,14 @@ static void err_decode(char *msg, size_t len, u64 errs,
*msg++ = ',';
len--;
}
- took = scnprintf(msg, len, "%s", msp->msg);
+ BUG_ON(!msp->sz);
+ /* msp->sz counts the nul */
+ took = min_t(size_t, msp->sz - (size_t)1, len);
+ memcpy(msg, msp->msg, took);
len -= took;
msg += took;
+ if (len)
+ *msg = '\0';
}
errs &= ~lmask;
if (len && multi) {
@@ -1644,6 +1654,14 @@ done:
return;
}
+static void qib_error_tasklet(unsigned long data)
+{
+ struct qib_devdata *dd = (struct qib_devdata *)data;
+
+ handle_7322_errors(dd);
+ qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
+}
+
static void reenable_chase(unsigned long opaque)
{
struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
@@ -2725,8 +2743,10 @@ static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
unknown_7322_ibits(dd, istat);
if (istat & QIB_I_GPIO)
unknown_7322_gpio_intr(dd);
- if (istat & QIB_I_C_ERROR)
- handle_7322_errors(dd);
+ if (istat & QIB_I_C_ERROR) {
+ qib_write_kreg(dd, kr_errmask, 0ULL);
+ tasklet_schedule(&dd->error_tasklet);
+ }
if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
handle_7322_p_errors(dd->rcd[0]->ppd);
if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
@@ -3125,6 +3145,8 @@ try_intx:
arg = dd->rcd[ctxt];
if (!arg)
continue;
+ if (qib_krcvq01_no_msi && ctxt < 2)
+ continue;
lsb = QIB_I_RCVAVAIL_LSB + ctxt;
handler = qib_7322pintr;
name = QIB_DRV_NAME " (kctx)";
@@ -3159,6 +3181,8 @@ try_intx:
for (i = 0; i < ARRAY_SIZE(redirect); i++)
qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
dd->cspec->main_int_mask = mask;
+ tasklet_init(&dd->error_tasklet, qib_error_tasklet,
+ (unsigned long)dd);
bail:;
}
@@ -6788,6 +6812,10 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
(i >= ARRAY_SIZE(irq_table) &&
dd->rcd[i - ARRAY_SIZE(irq_table)]))
actual_cnt++;
+ /* reduce by ctxt's < 2 */
+ if (qib_krcvq01_no_msi)
+ actual_cnt -= dd->num_pports;
+
tabsize = actual_cnt;
dd->cspec->msix_entries = kmalloc(tabsize *
sizeof(struct msix_entry), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 8fd3df5bf04..3b3745f261f 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1125,22 +1125,22 @@ static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
}
-static int pma_get_classportinfo(struct ib_perf *pmp,
+static int pma_get_classportinfo(struct ib_pma_mad *pmp,
struct ib_device *ibdev)
{
- struct ib_pma_classportinfo *p =
- (struct ib_pma_classportinfo *)pmp->data;
+ struct ib_class_port_info *p =
+ (struct ib_class_port_info *)pmp->data;
struct qib_devdata *dd = dd_from_ibdev(ibdev);
memset(pmp->data, 0, sizeof(pmp->data));
- if (pmp->attr_mod != 0)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0)
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
/* Note that AllPortSelect is not valid */
p->base_version = 1;
p->class_version = 1;
- p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
/*
* Set the most significant bit of CM2 to indicate support for
* congestion statistics
@@ -1154,7 +1154,7 @@ static int pma_get_classportinfo(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portsamplescontrol(struct ib_perf *pmp,
+static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
@@ -1169,8 +1169,8 @@ static int pma_get_portsamplescontrol(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 || port_select != port) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
goto bail;
}
spin_lock_irqsave(&ibp->lock, flags);
@@ -1192,7 +1192,7 @@ bail:
return reply((struct ib_smp *) pmp);
}
-static int pma_set_portsamplescontrol(struct ib_perf *pmp,
+static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplescontrol *p =
@@ -1205,8 +1205,8 @@ static int pma_set_portsamplescontrol(struct ib_perf *pmp,
u8 status, xmit_flags;
int ret;
- if (pmp->attr_mod != 0 || p->port_select != port) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
@@ -1321,7 +1321,7 @@ static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
return ret;
}
-static int pma_get_portsamplesresult(struct ib_perf *pmp,
+static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplesresult *p =
@@ -1360,7 +1360,7 @@ static int pma_get_portsamplesresult(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
+static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portsamplesresult_ext *p =
@@ -1402,7 +1402,7 @@ static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portcounters(struct ib_perf *pmp,
+static int pma_get_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1436,8 +1436,8 @@ static int pma_get_portcounters(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 || port_select != port)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
if (cntrs.symbol_error_counter > 0xFFFFUL)
p->symbol_error_counter = cpu_to_be16(0xFFFF);
@@ -1472,7 +1472,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1500,7 +1500,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,
return reply((struct ib_smp *) pmp);
}
-static int pma_get_portcounters_cong(struct ib_perf *pmp,
+static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
/* Congestion PMA packets start at offset 24 not 64 */
@@ -1510,7 +1510,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = dd_from_ppd(ppd);
- u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF;
+ u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
u64 xmit_wait_counter;
unsigned long flags;
@@ -1519,9 +1519,9 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
* SET method ends up calling this anyway.
*/
if (!dd->psxmitwait_supported)
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
if (port_select != port)
- pmp->status |= IB_SMP_INVALID_FIELD;
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
qib_get_counters(ppd, &cntrs);
spin_lock_irqsave(&ppd->ibport_data.lock, flags);
@@ -1603,7 +1603,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
cntrs.local_link_integrity_errors = 0xFUL;
if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
cntrs.excessive_buffer_overrun_errors = 0xFUL;
- p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) |
+ p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
cntrs.excessive_buffer_overrun_errors;
if (cntrs.vl15_dropped > 0xFFFFUL)
p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1613,7 +1613,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
return reply((struct ib_smp *)pmp);
}
-static int pma_get_portcounters_ext(struct ib_perf *pmp,
+static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters_ext *p =
@@ -1626,8 +1626,8 @@ static int pma_get_portcounters_ext(struct ib_perf *pmp,
memset(pmp->data, 0, sizeof(pmp->data));
p->port_select = port_select;
- if (pmp->attr_mod != 0 || port_select != port) {
- pmp->status |= IB_SMP_INVALID_FIELD;
+ if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
+ pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
goto bail;
}
@@ -1652,7 +1652,7 @@ bail:
return reply((struct ib_smp *) pmp);
}
-static int pma_set_portcounters(struct ib_perf *pmp,
+static int pma_set_portcounters(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1715,14 +1715,14 @@ static int pma_set_portcounters(struct ib_perf *pmp,
return pma_get_portcounters(pmp, ibdev, port);
}
-static int pma_set_portcounters_cong(struct ib_perf *pmp,
+static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct qib_ibport *ibp = to_iport(ibdev, port);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_devdata *dd = dd_from_ppd(ppd);
struct qib_verbs_counters cntrs;
- u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF;
+ u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
int ret = 0;
unsigned long flags;
@@ -1766,7 +1766,7 @@ static int pma_set_portcounters_cong(struct ib_perf *pmp,
return ret;
}
-static int pma_set_portcounters_ext(struct ib_perf *pmp,
+static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
struct ib_device *ibdev, u8 port)
{
struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1959,19 +1959,19 @@ static int process_perf(struct ib_device *ibdev, u8 port,
struct ib_mad *in_mad,
struct ib_mad *out_mad)
{
- struct ib_perf *pmp = (struct ib_perf *)out_mad;
+ struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
int ret;
*out_mad = *in_mad;
- if (pmp->class_version != 1) {
- pmp->status |= IB_SMP_UNSUP_VERSION;
+ if (pmp->mad_hdr.class_version != 1) {
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
- switch (pmp->method) {
+ switch (pmp->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
- switch (pmp->attr_id) {
+ switch (pmp->mad_hdr.attr_id) {
case IB_PMA_CLASS_PORT_INFO:
ret = pma_get_classportinfo(pmp, ibdev);
goto bail;
@@ -1994,13 +1994,13 @@ static int process_perf(struct ib_device *ibdev, u8 port,
ret = pma_get_portcounters_cong(pmp, ibdev, port);
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
case IB_MGMT_METHOD_SET:
- switch (pmp->attr_id) {
+ switch (pmp->mad_hdr.attr_id) {
case IB_PMA_PORT_SAMPLES_CONTROL:
ret = pma_set_portsamplescontrol(pmp, ibdev, port);
goto bail;
@@ -2014,7 +2014,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
ret = pma_set_portcounters_cong(pmp, ibdev, port);
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METH_ATTR;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
ret = reply((struct ib_smp *) pmp);
goto bail;
}
@@ -2030,7 +2030,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
goto bail;
default:
- pmp->status |= IB_SMP_UNSUP_METHOD;
+ pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
ret = reply((struct ib_smp *) pmp);
}
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
index 7840ab593bc..ecc416cdbaa 100644
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -32,6 +32,8 @@
* SOFTWARE.
*/
+#include <rdma/ib_pma.h>
+
#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
@@ -180,109 +182,8 @@ struct ib_vl_weight_elem {
#define IB_VLARB_HIGHPRI_0_31 3
#define IB_VLARB_HIGHPRI_32_63 4
-/*
- * PMA class portinfo capability mask bits
- */
-#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
-#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
-#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
-
-#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
-#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
-#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
-#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
-#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
-#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
-struct ib_perf {
- u8 base_version;
- u8 mgmt_class;
- u8 class_version;
- u8 method;
- __be16 status;
- __be16 unused;
- __be64 tid;
- __be16 attr_id;
- __be16 resv;
- __be32 attr_mod;
- u8 reserved[40];
- u8 data[192];
-} __attribute__ ((packed));
-
-struct ib_pma_classportinfo {
- u8 base_version;
- u8 class_version;
- __be16 cap_mask;
- u8 reserved[3];
- u8 resp_time_value; /* only lower 5 bits */
- union ib_gid redirect_gid;
- __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 redirect_lid;
- __be16 redirect_pkey;
- __be32 redirect_qp; /* only lower 24 bits */
- __be32 redirect_qkey;
- union ib_gid trap_gid;
- __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
- __be16 trap_lid;
- __be16 trap_pkey;
- __be32 trap_hl_qp; /* 8, 24 bits respectively */
- __be32 trap_qkey;
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplescontrol {
- u8 opcode;
- u8 port_select;
- u8 tick;
- u8 counter_width; /* only lower 3 bits */
- __be32 counter_mask0_9; /* 2, 10 * 3, bits */
- __be16 counter_mask10_14; /* 1, 5 * 3, bits */
- u8 sample_mechanisms;
- u8 sample_status; /* only lower 2 bits */
- __be64 option_mask;
- __be64 vendor_mask;
- __be32 sample_start;
- __be32 sample_interval;
- __be16 tag;
- __be16 counter_select[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplesresult {
- __be16 tag;
- __be16 sample_status; /* only lower 2 bits */
- __be32 counter[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portsamplesresult_ext {
- __be16 tag;
- __be16 sample_status; /* only lower 2 bits */
- __be32 extended_width; /* only upper 2 bits */
- __be64 counter[15];
-} __attribute__ ((packed));
-
-struct ib_pma_portcounters {
- u8 reserved;
- u8 port_select;
- __be16 counter_select;
- __be16 symbol_error_counter;
- u8 link_error_recovery_counter;
- u8 link_downed_counter;
- __be16 port_rcv_errors;
- __be16 port_rcv_remphys_errors;
- __be16 port_rcv_switch_relay_errors;
- __be16 port_xmit_discards;
- u8 port_xmit_constraint_errors;
- u8 port_rcv_constraint_errors;
- u8 reserved1;
- u8 lli_ebor_errors; /* 4, 4, bits */
- __be16 reserved2;
- __be16 vl15_dropped;
- __be32 port_xmit_data;
- __be32 port_rcv_data;
- __be32 port_xmit_packets;
- __be32 port_rcv_packets;
-} __attribute__ ((packed));
-
struct ib_pma_portcounters_cong {
u8 reserved;
u8 reserved1;
@@ -297,7 +198,7 @@ struct ib_pma_portcounters_cong {
u8 port_xmit_constraint_errors;
u8 port_rcv_constraint_errors;
u8 reserved2;
- u8 lli_ebor_errors; /* 4, 4, bits */
+ u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
__be16 reserved3;
__be16 vl15_dropped;
__be64 port_xmit_data;
@@ -316,49 +217,11 @@ struct ib_pma_portcounters_cong {
/* number of 4nsec cycles equaling 2secs */
#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
-#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
-#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
-#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
-#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
-#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
-#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
-#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
-#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
-#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
-#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
-#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
-#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
-#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
-
#define IB_PMA_SEL_CONG_ALL 0x01
#define IB_PMA_SEL_CONG_PORT_DATA 0x02
#define IB_PMA_SEL_CONG_XMIT 0x04
#define IB_PMA_SEL_CONG_ROUTING 0x08
-struct ib_pma_portcounters_ext {
- u8 reserved;
- u8 port_select;
- __be16 counter_select;
- __be32 reserved1;
- __be64 port_xmit_data;
- __be64 port_rcv_data;
- __be64 port_xmit_packets;
- __be64 port_rcv_packets;
- __be64 port_unicast_xmit_packets;
- __be64 port_unicast_rcv_packets;
- __be64 port_multicast_xmit_packets;
- __be64 port_multicast_rcv_packets;
-} __attribute__ ((packed));
-
-#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
-#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
-#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
-#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
-#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
-#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
-#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
-#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
-
/*
* The PortSamplesControl.CounterMasks field is an array of 3 bit fields
* which specify the N'th counter's capabilities. See ch. 16.1.3.2.
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 891cc2ff5f0..4426782ad28 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -255,7 +255,7 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
u16 linkstat, speed;
int pos = 0, pose, ret = 1;
- pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ pose = pci_pcie_cap(dd->pcidev);
if (!pose) {
qib_dev_err(dd, "Can't find PCI Express capability!\n");
/* set up something... */
@@ -509,7 +509,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
qib_devinfo(dd->pcidev, "Parent not root\n");
return 1;
}
- ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ ppos = pci_pcie_cap(parent);
if (!ppos)
return 1;
if (parent->vendor != 0x8086)
@@ -578,14 +578,14 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
qib_devinfo(dd->pcidev, "Parent not root\n");
goto bail;
}
- ppos = pci_find_capability(parent, PCI_CAP_ID_EXP);
+ ppos = pci_pcie_cap(parent);
if (ppos) {
pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
} else
goto bail;
/* Find out supported and configured values for endpoint (us) */
- epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP);
+ epos = pci_pcie_cap(dd->pcidev);
if (epos) {
pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index d50a33fe8bb..14d129de432 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -507,6 +507,18 @@ static ssize_t show_nctxts(struct device *device,
dd->first_user_ctxt);
}
+static ssize_t show_nfreectxts(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct qib_ibdev *dev =
+ container_of(device, struct qib_ibdev, ibdev.dev);
+ struct qib_devdata *dd = dd_from_dev(dev);
+
+ /* Return the number of free user ports (contexts) available. */
+ return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
+ dd->first_user_ctxt - (u32)qib_stats.sps_ctxts);
+}
+
static ssize_t show_serial(struct device *device,
struct device_attribute *attr, char *buf)
{
@@ -604,6 +616,7 @@ static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
+static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
@@ -617,6 +630,7 @@ static struct device_attribute *qib_attributes[] = {
&dev_attr_board_id,
&dev_attr_version,
&dev_attr_nctxts,
+ &dev_attr_nfreectxts,
&dev_attr_serial,
&dev_attr_boardversion,
&dev_attr_logged_errors,
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index ee165fdcb59..7d5109bbd1a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2127,6 +2127,8 @@ static ssize_t srp_create_target(struct device *dev,
return -ENOMEM;
target_host->transportt = ib_srp_transport_template;
+ target_host->max_channel = 0;
+ target_host->max_id = 1;
target_host->max_lun = SRP_MAX_LUN;
target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 713d43b4e56..6c21c2986ca 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -92,7 +92,7 @@ config LEDS_NET48XX
config LEDS_NET5501
tristate "LED Support for Soekris net5501 series Error LED"
depends on LEDS_TRIGGERS
- depends on X86 && LEDS_GPIO_PLATFORM && GPIO_CS5535
+ depends on X86 && GPIO_CS5535
select LEDS_TRIGGER_DEFAULT_ON
default n
help
@@ -182,23 +182,6 @@ config LEDS_GPIO
defined as platform devices and/or OpenFirmware platform devices.
The code to use these bindings can be selected below.
-config LEDS_GPIO_PLATFORM
- bool "Platform device bindings for GPIO LEDs"
- depends on LEDS_GPIO
- default y
- help
- Let the leds-gpio driver drive LEDs which have been defined as
- platform devices. If you don't know what this means, say yes.
-
-config LEDS_GPIO_OF
- bool "OpenFirmware platform device bindings for GPIO LEDs"
- depends on LEDS_GPIO && OF_DEVICE
- default y
- help
- Let the leds-gpio driver drive LEDs which have been defined as
- of_platform devices. For instance, LEDs which are listed in a "dts"
- file.
-
config LEDS_LP3944
tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
depends on LEDS_CLASS
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index b0480c8fbcb..3d8bc327a68 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -165,7 +165,7 @@ static inline int sizeof_gpio_leds_priv(int num_leds)
}
/* Code to create from OpenFirmware platform devices */
-#ifdef CONFIG_LEDS_GPIO_OF
+#ifdef CONFIG_OF_GPIO
static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *child;
@@ -223,13 +223,13 @@ static const struct of_device_id of_gpio_leds_match[] = {
{ .compatible = "gpio-leds", },
{},
};
-#else
+#else /* CONFIG_OF_GPIO */
static struct gpio_leds_priv * __devinit gpio_leds_create_of(struct platform_device *pdev)
{
return NULL;
}
#define of_gpio_leds_match NULL
-#endif
+#endif /* CONFIG_OF_GPIO */
static int __devinit gpio_led_probe(struct platform_device *pdev)
diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
index efa202499e3..2535933c49f 100644
--- a/drivers/lguest/core.c
+++ b/drivers/lguest/core.c
@@ -117,7 +117,7 @@ static __init int map_switcher(void)
/*
* Now the Switcher is mapped at the right address, we can't fail!
- * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
+ * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
*/
memcpy(switcher_vma->addr, start_switcher_text,
end_switcher_text - start_switcher_text);
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
index daaf8663164..28433a155d6 100644
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -375,11 +375,9 @@ static bool direct_trap(unsigned int num)
/*
* The Host needs to see page faults (for shadow paging and to save the
* fault address), general protection faults (in/out emulation) and
- * device not available (TS handling), invalid opcode fault (kvm hcall),
- * and of course, the hypercall trap.
+ * device not available (TS handling) and of course, the hypercall trap.
*/
- return num != 14 && num != 13 && num != 7 &&
- num != 6 && num != LGUEST_TRAP_ENTRY;
+ return num != 14 && num != 13 && num != 7 && num != LGUEST_TRAP_ENTRY;
}
/*:*/
@@ -429,8 +427,8 @@ void pin_stack_pages(struct lg_cpu *cpu)
/*
* Direct traps also mean that we need to know whenever the Guest wants to use
- * a different kernel stack, so we can change the IDT entries to use that
- * stack. The IDT entries expect a virtual address, so unlike most addresses
+ * a different kernel stack, so we can change the guest TSS to use that
+ * stack. The TSS entries expect a virtual address, so unlike most addresses
* the Guest gives us, the "esp" (stack pointer) value here is virtual, not
* physical.
*
diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h
index 9136411fadd..295df06e659 100644
--- a/drivers/lguest/lg.h
+++ b/drivers/lguest/lg.h
@@ -59,6 +59,8 @@ struct lg_cpu {
struct lguest_pages *last_pages;
+ /* Initialization mode: linear map everything. */
+ bool linear_pages;
int cpu_pgd; /* Which pgd this cpu is currently using */
/* If a hypercall was asked for, this points to the arguments. */
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 69c84a1d88e..5289ffa2e50 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -109,6 +109,17 @@ static u32 lg_get_features(struct virtio_device *vdev)
}
/*
+ * To notify on reset or feature finalization, we (ab)use the NOTIFY
+ * hypercall, with the descriptor address of the device.
+ */
+static void status_notify(struct virtio_device *vdev)
+{
+ unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices;
+
+ hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
+}
+
+/*
* The virtio core takes the features the Host offers, and copies the ones
* supported by the driver into the vdev->features array. Once that's all
* sorted out, this routine is called so we can tell the Host which features we
@@ -135,6 +146,9 @@ static void lg_finalize_features(struct virtio_device *vdev)
if (test_bit(i, vdev->features))
out_features[i / 8] |= (1 << (i % 8));
}
+
+ /* Tell Host we've finished with this device's feature negotiation */
+ status_notify(vdev);
}
/* Once they've found a field, getting a copy of it is easy. */
@@ -168,28 +182,21 @@ static u8 lg_get_status(struct virtio_device *vdev)
return to_lgdev(vdev)->desc->status;
}
-/*
- * To notify on status updates, we (ab)use the NOTIFY hypercall, with the
- * descriptor address of the device. A zero status means "reset".
- */
-static void set_status(struct virtio_device *vdev, u8 status)
-{
- unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices;
-
- /* We set the status. */
- to_lgdev(vdev)->desc->status = status;
- hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0);
-}
-
static void lg_set_status(struct virtio_device *vdev, u8 status)
{
BUG_ON(!status);
- set_status(vdev, status);
+ to_lgdev(vdev)->desc->status = status;
+
+ /* Tell Host immediately if we failed. */
+ if (status & VIRTIO_CONFIG_S_FAILED)
+ status_notify(vdev);
}
static void lg_reset(struct virtio_device *vdev)
{
- set_status(vdev, 0);
+ /* 0 status means "reset" */
+ to_lgdev(vdev)->desc->status = 0;
+ status_notify(vdev);
}
/*
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index 948c547b8e9..f97e625241a 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -1,8 +1,10 @@
-/*P:200 This contains all the /dev/lguest code, whereby the userspace launcher
- * controls and communicates with the Guest. For example, the first write will
- * tell us the Guest's memory layout and entry point. A read will run the
- * Guest until something happens, such as a signal or the Guest doing a NOTIFY
- * out to the Launcher.
+/*P:200 This contains all the /dev/lguest code, whereby the userspace
+ * launcher controls and communicates with the Guest. For example,
+ * the first write will tell us the Guest's memory layout and entry
+ * point. A read will run the Guest until something happens, such as
+ * a signal or the Guest doing a NOTIFY out to the Launcher. There is
+ * also a way for the Launcher to attach eventfds to particular NOTIFY
+ * values instead of returning from the read() call.
:*/
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
@@ -357,8 +359,8 @@ static int initialize(struct file *file, const unsigned long __user *input)
goto free_eventfds;
/*
- * Initialize the Guest's shadow page tables, using the toplevel
- * address the Launcher gave us. This allocates memory, so can fail.
+ * Initialize the Guest's shadow page tables. This allocates
+ * memory, so can fail.
*/
err = init_guest_pagetable(lg);
if (err)
@@ -516,6 +518,7 @@ static const struct file_operations lguest_fops = {
.read = read,
.llseek = default_llseek,
};
+/*:*/
/*
* This is a textbook example of a "misc" character device. Populate a "struct
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index d21578ee95d..3b62be160a6 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -17,7 +17,6 @@
#include <linux/percpu.h>
#include <asm/tlbflush.h>
#include <asm/uaccess.h>
-#include <asm/bootparam.h>
#include "lg.h"
/*M:008
@@ -156,7 +155,7 @@ static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
}
/*
- * These functions are just like the above two, except they access the Guest
+ * These functions are just like the above, except they access the Guest
* page tables. Hence they return a Guest address.
*/
static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
@@ -196,7 +195,7 @@ static unsigned long gpte_addr(struct lg_cpu *cpu,
#endif
/*:*/
-/*M:014
+/*M:007
* get_pfn is slow: we could probably try to grab batches of pages here as
* an optimization (ie. pre-faulting).
:*/
@@ -325,10 +324,15 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
#endif
/* First step: get the top-level Guest page table entry. */
- gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
- /* Toplevel not present? We can't map it in. */
- if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
- return false;
+ if (unlikely(cpu->linear_pages)) {
+ /* Faking up a linear mapping. */
+ gpgd = __pgd(CHECK_GPGD_MASK);
+ } else {
+ gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
+ /* Toplevel not present? We can't map it in. */
+ if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
+ return false;
+ }
/* Now look at the matching shadow entry. */
spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
@@ -353,10 +357,15 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
}
#ifdef CONFIG_X86_PAE
- gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
- /* Middle level not present? We can't map it in. */
- if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
- return false;
+ if (unlikely(cpu->linear_pages)) {
+ /* Faking up a linear mapping. */
+ gpmd = __pmd(_PAGE_TABLE);
+ } else {
+ gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
+ /* Middle level not present? We can't map it in. */
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ return false;
+ }
/* Now look at the matching shadow entry. */
spmd = spmd_addr(cpu, *spgd, vaddr);
@@ -397,8 +406,13 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
#endif
- /* Read the actual PTE value. */
- gpte = lgread(cpu, gpte_ptr, pte_t);
+ if (unlikely(cpu->linear_pages)) {
+ /* Linear? Make up a PTE which points to same page. */
+ gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
+ } else {
+ /* Read the actual PTE value. */
+ gpte = lgread(cpu, gpte_ptr, pte_t);
+ }
/* If this page isn't in the Guest page tables, we can't page it in. */
if (!(pte_flags(gpte) & _PAGE_PRESENT))
@@ -454,7 +468,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
* Finally, we write the Guest PTE entry back: we've set the
* _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
*/
- lgwrite(cpu, gpte_ptr, pte_t, gpte);
+ if (likely(!cpu->linear_pages))
+ lgwrite(cpu, gpte_ptr, pte_t, gpte);
/*
* The fault is fixed, the page table is populated, the mapping
@@ -612,6 +627,11 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
#ifdef CONFIG_X86_PAE
pmd_t gpmd;
#endif
+
+ /* Still not set up? Just map 1:1. */
+ if (unlikely(cpu->linear_pages))
+ return vaddr;
+
/* First step: get the top-level Guest page table entry. */
gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
/* Toplevel not present? We can't map it in. */
@@ -708,32 +728,6 @@ static unsigned int new_pgdir(struct lg_cpu *cpu,
return next;
}
-/*H:430
- * (iv) Switching page tables
- *
- * Now we've seen all the page table setting and manipulation, let's see
- * what happens when the Guest changes page tables (ie. changes the top-level
- * pgdir). This occurs on almost every context switch.
- */
-void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
-{
- int newpgdir, repin = 0;
-
- /* Look to see if we have this one already. */
- newpgdir = find_pgdir(cpu->lg, pgtable);
- /*
- * If not, we allocate or mug an existing one: if it's a fresh one,
- * repin gets set to 1.
- */
- if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
- newpgdir = new_pgdir(cpu, pgtable, &repin);
- /* Change the current pgd index to the new one. */
- cpu->cpu_pgd = newpgdir;
- /* If it was completely blank, we map in the Guest kernel stack */
- if (repin)
- pin_stack_pages(cpu);
-}
-
/*H:470
* Finally, a routine which throws away everything: all PGD entries in all
* the shadow page tables, including the Guest's kernel mappings. This is used
@@ -780,6 +774,44 @@ void guest_pagetable_clear_all(struct lg_cpu *cpu)
/* We need the Guest kernel stack mapped again. */
pin_stack_pages(cpu);
}
+
+/*H:430
+ * (iv) Switching page tables
+ *
+ * Now we've seen all the page table setting and manipulation, let's see
+ * what happens when the Guest changes page tables (ie. changes the top-level
+ * pgdir). This occurs on almost every context switch.
+ */
+void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
+{
+ int newpgdir, repin = 0;
+
+ /*
+ * The very first time they call this, we're actually running without
+ * any page tables; we've been making it up. Throw them away now.
+ */
+ if (unlikely(cpu->linear_pages)) {
+ release_all_pagetables(cpu->lg);
+ cpu->linear_pages = false;
+ /* Force allocation of a new pgdir. */
+ newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
+ } else {
+ /* Look to see if we have this one already. */
+ newpgdir = find_pgdir(cpu->lg, pgtable);
+ }
+
+ /*
+ * If not, we allocate or mug an existing one: if it's a fresh one,
+ * repin gets set to 1.
+ */
+ if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
+ newpgdir = new_pgdir(cpu, pgtable, &repin);
+ /* Change the current pgd index to the new one. */
+ cpu->cpu_pgd = newpgdir;
+ /* If it was completely blank, we map in the Guest kernel stack */
+ if (repin)
+ pin_stack_pages(cpu);
+}
/*:*/
/*M:009
@@ -919,168 +951,26 @@ void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
}
#endif
-/*H:505
- * To get through boot, we construct simple identity page mappings (which
- * set virtual == physical) and linear mappings which will get the Guest far
- * enough into the boot to create its own. The linear mapping means we
- * simplify the Guest boot, but it makes assumptions about their PAGE_OFFSET,
- * as you'll see.
- *
- * We lay them out of the way, just below the initrd (which is why we need to
- * know its size here).
- */
-static unsigned long setup_pagetables(struct lguest *lg,
- unsigned long mem,
- unsigned long initrd_size)
-{
- pgd_t __user *pgdir;
- pte_t __user *linear;
- unsigned long mem_base = (unsigned long)lg->mem_base;
- unsigned int mapped_pages, i, linear_pages;
-#ifdef CONFIG_X86_PAE
- pmd_t __user *pmds;
- unsigned int j;
- pgd_t pgd;
- pmd_t pmd;
-#else
- unsigned int phys_linear;
-#endif
-
- /*
- * We have mapped_pages frames to map, so we need linear_pages page
- * tables to map them.
- */
- mapped_pages = mem / PAGE_SIZE;
- linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE;
-
- /* We put the toplevel page directory page at the top of memory. */
- pgdir = (pgd_t *)(mem + mem_base - initrd_size - PAGE_SIZE);
-
- /* Now we use the next linear_pages pages as pte pages */
- linear = (void *)pgdir - linear_pages * PAGE_SIZE;
-
-#ifdef CONFIG_X86_PAE
- /*
- * And the single mid page goes below that. We only use one, but
- * that's enough to map 1G, which definitely gets us through boot.
- */
- pmds = (void *)linear - PAGE_SIZE;
-#endif
- /*
- * Linear mapping is easy: put every page's address into the
- * mapping in order.
- */
- for (i = 0; i < mapped_pages; i++) {
- pte_t pte;
- pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER));
- if (copy_to_user(&linear[i], &pte, sizeof(pte)) != 0)
- return -EFAULT;
- }
-
-#ifdef CONFIG_X86_PAE
- /*
- * Make the Guest PMD entries point to the corresponding place in the
- * linear mapping (up to one page worth of PMD).
- */
- for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD;
- i += PTRS_PER_PTE, j++) {
- pmd = pfn_pmd(((unsigned long)&linear[i] - mem_base)/PAGE_SIZE,
- __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
-
- if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0)
- return -EFAULT;
- }
-
- /* One PGD entry, pointing to that PMD page. */
- pgd = __pgd(((unsigned long)pmds - mem_base) | _PAGE_PRESENT);
- /* Copy it in as the first PGD entry (ie. addresses 0-1G). */
- if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0)
- return -EFAULT;
- /*
- * And the other PGD entry to make the linear mapping at PAGE_OFFSET
- */
- if (copy_to_user(&pgdir[KERNEL_PGD_BOUNDARY], &pgd, sizeof(pgd)))
- return -EFAULT;
-#else
- /*
- * The top level points to the linear page table pages above.
- * We setup the identity and linear mappings here.
- */
- phys_linear = (unsigned long)linear - mem_base;
- for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) {
- pgd_t pgd;
- /*
- * Create a PGD entry which points to the right part of the
- * linear PTE pages.
- */
- pgd = __pgd((phys_linear + i * sizeof(pte_t)) |
- (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER));
-
- /*
- * Copy it into the PGD page at 0 and PAGE_OFFSET.
- */
- if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd))
- || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET)
- + i / PTRS_PER_PTE],
- &pgd, sizeof(pgd)))
- return -EFAULT;
- }
-#endif
-
- /*
- * We return the top level (guest-physical) address: we remember where
- * this is to write it into lguest_data when the Guest initializes.
- */
- return (unsigned long)pgdir - mem_base;
-}
-
/*H:500
* (vii) Setting up the page tables initially.
*
- * When a Guest is first created, the Launcher tells us where the toplevel of
- * its first page table is. We set some things up here:
+ * When a Guest is first created, set initialize a shadow page table which
+ * we will populate on future faults. The Guest doesn't have any actual
+ * pagetables yet, so we set linear_pages to tell demand_page() to fake it
+ * for the moment.
*/
int init_guest_pagetable(struct lguest *lg)
{
- u64 mem;
- u32 initrd_size;
- struct boot_params __user *boot = (struct boot_params *)lg->mem_base;
-#ifdef CONFIG_X86_PAE
- pgd_t *pgd;
- pmd_t *pmd_table;
-#endif
- /*
- * Get the Guest memory size and the ramdisk size from the boot header
- * located at lg->mem_base (Guest address 0).
- */
- if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem))
- || get_user(initrd_size, &boot->hdr.ramdisk_size))
- return -EFAULT;
+ struct lg_cpu *cpu = &lg->cpus[0];
+ int allocated = 0;
- /*
- * We start on the first shadow page table, and give it a blank PGD
- * page.
- */
- lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size);
- if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir))
- return lg->pgdirs[0].gpgdir;
- lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL);
- if (!lg->pgdirs[0].pgdir)
+ /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
+ cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
+ if (!allocated)
return -ENOMEM;
-#ifdef CONFIG_X86_PAE
- /* For PAE, we also create the initial mid-level. */
- pgd = lg->pgdirs[0].pgdir;
- pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL);
- if (!pmd_table)
- return -ENOMEM;
-
- set_pgd(pgd + SWITCHER_PGD_INDEX,
- __pgd(__pa(pmd_table) | _PAGE_PRESENT));
-#endif
-
- /* This is the current page table. */
- lg->cpus[0].cpu_pgd = 0;
+ /* We start with a linear mapping until the initialize. */
+ cpu->linear_pages = true;
return 0;
}
@@ -1095,10 +985,10 @@ void page_table_guest_data_init(struct lg_cpu *cpu)
* of virtual addresses used by the Switcher.
*/
|| put_user(RESERVE_MEM * 1024 * 1024,
- &cpu->lg->lguest_data->reserve_mem)
- || put_user(cpu->lg->pgdirs[0].gpgdir,
- &cpu->lg->lguest_data->pgdir))
+ &cpu->lg->lguest_data->reserve_mem)) {
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
+ return;
+ }
/*
* In flush_user_mappings() we loop from 0 to
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 9f1659c3d1f..65af42f2d59 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -269,10 +269,10 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
static int emulate_insn(struct lg_cpu *cpu)
{
u8 insn;
- unsigned int insnlen = 0, in = 0, shift = 0;
+ unsigned int insnlen = 0, in = 0, small_operand = 0;
/*
* The eip contains the *virtual* address of the Guest's instruction:
- * guest_pa just subtracts the Guest's page_offset.
+ * walk the Guest's page tables to find the "physical" address.
*/
unsigned long physaddr = guest_pa(cpu, cpu->regs->eip);
@@ -300,11 +300,10 @@ static int emulate_insn(struct lg_cpu *cpu)
}
/*
- * 0x66 is an "operand prefix". It means it's using the upper 16 bits
- * of the eax register.
+ * 0x66 is an "operand prefix". It means a 16, not 32 bit in/out.
*/
if (insn == 0x66) {
- shift = 16;
+ small_operand = 1;
/* The instruction is 1 byte so far, read the next byte. */
insnlen = 1;
insn = lgread(cpu, physaddr + insnlen, u8);
@@ -340,11 +339,14 @@ static int emulate_insn(struct lg_cpu *cpu)
* traditionally means "there's nothing there".
*/
if (in) {
- /* Lower bit tells is whether it's a 16 or 32 bit access */
- if (insn & 0x1)
- cpu->regs->eax = 0xFFFFFFFF;
- else
- cpu->regs->eax |= (0xFFFF << shift);
+ /* Lower bit tells means it's a 32/16 bit access */
+ if (insn & 0x1) {
+ if (small_operand)
+ cpu->regs->eax |= 0xFFFF;
+ else
+ cpu->regs->eax = 0xFFFFFFFF;
+ } else
+ cpu->regs->eax |= 0xFF;
}
/* Finally, we've "done" the instruction, so move past it. */
cpu->regs->eip += insnlen;
@@ -352,69 +354,6 @@ static int emulate_insn(struct lg_cpu *cpu)
return 1;
}
-/*
- * Our hypercalls mechanism used to be based on direct software interrupts.
- * After Anthony's "Refactor hypercall infrastructure" kvm patch, we decided to
- * change over to using kvm hypercalls.
- *
- * KVM_HYPERCALL is actually a "vmcall" instruction, which generates an invalid
- * opcode fault (fault 6) on non-VT cpus, so the easiest solution seemed to be
- * an *emulation approach*: if the fault was really produced by an hypercall
- * (is_hypercall() does exactly this check), we can just call the corresponding
- * hypercall host implementation function.
- *
- * But these invalid opcode faults are notably slower than software interrupts.
- * So we implemented the *patching (or rewriting) approach*: every time we hit
- * the KVM_HYPERCALL opcode in Guest code, we patch it to the old "int 0x1f"
- * opcode, so next time the Guest calls this hypercall it will use the
- * faster trap mechanism.
- *
- * Matias even benchmarked it to convince you: this shows the average cycle
- * cost of a hypercall. For each alternative solution mentioned above we've
- * made 5 runs of the benchmark:
- *
- * 1) direct software interrupt: 2915, 2789, 2764, 2721, 2898
- * 2) emulation technique: 3410, 3681, 3466, 3392, 3780
- * 3) patching (rewrite) technique: 2977, 2975, 2891, 2637, 2884
- *
- * One two-line function is worth a 20% hypercall speed boost!
- */
-static void rewrite_hypercall(struct lg_cpu *cpu)
-{
- /*
- * This are the opcodes we use to patch the Guest. The opcode for "int
- * $0x1f" is "0xcd 0x1f" but vmcall instruction is 3 bytes long, so we
- * complete the sequence with a NOP (0x90).
- */
- u8 insn[3] = {0xcd, 0x1f, 0x90};
-
- __lgwrite(cpu, guest_pa(cpu, cpu->regs->eip), insn, sizeof(insn));
- /*
- * The above write might have caused a copy of that page to be made
- * (if it was read-only). We need to make sure the Guest has
- * up-to-date pagetables. As this doesn't happen often, we can just
- * drop them all.
- */
- guest_pagetable_clear_all(cpu);
-}
-
-static bool is_hypercall(struct lg_cpu *cpu)
-{
- u8 insn[3];
-
- /*
- * This must be the Guest kernel trying to do something.
- * The bottom two bits of the CS segment register are the privilege
- * level.
- */
- if ((cpu->regs->cs & 3) != GUEST_PL)
- return false;
-
- /* Is it a vmcall? */
- __lgread(cpu, insn, guest_pa(cpu, cpu->regs->eip), sizeof(insn));
- return insn[0] == 0x0f && insn[1] == 0x01 && insn[2] == 0xc1;
-}
-
/*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */
void lguest_arch_handle_trap(struct lg_cpu *cpu)
{
@@ -429,20 +368,6 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
if (emulate_insn(cpu))
return;
}
- /*
- * If KVM is active, the vmcall instruction triggers a General
- * Protection Fault. Normally it triggers an invalid opcode
- * fault (6):
- */
- case 6:
- /*
- * We need to check if ring == GUEST_PL and faulting
- * instruction == vmcall.
- */
- if (is_hypercall(cpu)) {
- rewrite_hypercall(cpu);
- return;
- }
break;
case 14: /* We've intercepted a Page Fault. */
/*
@@ -486,7 +411,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu)
* These values mean a real interrupt occurred, in which case
* the Host handler has already been run. We just do a
* friendly check if another process should now be run, then
- * return to run the Guest again
+ * return to run the Guest again.
*/
cond_resched();
return;
@@ -536,7 +461,7 @@ void __init lguest_arch_host_init(void)
int i;
/*
- * Most of the i386/switcher.S doesn't care that it's been moved; on
+ * Most of the x86/switcher_32.S doesn't care that it's been moved; on
* Intel, jumps are relative, and it doesn't access any references to
* external code or data.
*
@@ -664,7 +589,7 @@ void __init lguest_arch_host_init(void)
clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
}
put_online_cpus();
-};
+}
/*:*/
void __exit lguest_arch_host_fini(void)
@@ -747,8 +672,6 @@ int lguest_arch_init_hypercalls(struct lg_cpu *cpu)
/*:*/
/*L:030
- * lguest_arch_setup_regs()
- *
* Most of the Guest's registers are left alone: we used get_zeroed_page() to
* allocate the structure, so they will be 0.
*/
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f85e4222455..1ff5486213f 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -106,6 +106,16 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_RETRY,
+ MMC_BLK_RETRY_SINGLE,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_ABORT,
+};
+
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
@@ -427,14 +437,6 @@ static const struct block_device_operations mmc_bdops = {
#endif
};
-struct mmc_blk_request {
- struct mmc_request mrq;
- struct mmc_command sbc;
- struct mmc_command cmd;
- struct mmc_command stop;
- struct mmc_data data;
-};
-
static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md)
{
@@ -525,7 +527,20 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
return result;
}
-static u32 get_card_status(struct mmc_card *card, struct request *req)
+static int send_stop(struct mmc_card *card, u32 *status)
+{
+ struct mmc_command cmd = {0};
+ int err;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+static int get_card_status(struct mmc_card *card, u32 *status, int retries)
{
struct mmc_command cmd = {0};
int err;
@@ -534,11 +549,141 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ err = mmc_wait_for_cmd(card->host, &cmd, retries);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+#define ERR_RETRY 2
+#define ERR_ABORT 1
+#define ERR_CONTINUE 0
+
+static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+ bool status_valid, u32 status)
+{
+ switch (error) {
+ case -EILSEQ:
+ /* response crc error, retry the r/w cmd */
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "response CRC error",
+ name, status);
+ return ERR_RETRY;
+
+ case -ETIMEDOUT:
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "timed out", name, status);
+
+ /* If the status cmd initially failed, retry the r/w cmd */
+ if (!status_valid)
+ return ERR_RETRY;
+
+ /*
+ * If it was a r/w cmd crc error, or illegal command
+ * (eg, issued in wrong state) then retry - we should
+ * have corrected the state problem above.
+ */
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ return ERR_RETRY;
+
+ /* Otherwise abort the command */
+ return ERR_ABORT;
+
+ default:
+ /* We don't understand the error code the driver gave us */
+ pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+ req->rq_disk->disk_name, error, status);
+ return ERR_ABORT;
+ }
+}
+
+/*
+ * Initial r/w and stop cmd error recovery.
+ * We don't know whether the card received the r/w cmd or not, so try to
+ * restore things back to a sane state. Essentially, we do this as follows:
+ * - Obtain card status. If the first attempt to obtain card status fails,
+ * the status word will reflect the failed status cmd, not the failed
+ * r/w cmd. If we fail to obtain card status, it suggests we can no
+ * longer communicate with the card.
+ * - Check the card state. If the card received the cmd but there was a
+ * transient problem with the response, it might still be in a data transfer
+ * mode. Try to send it a stop command. If this fails, we can't recover.
+ * - If the r/w cmd failed due to a response CRC error, it was probably
+ * transient, so retry the cmd.
+ * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
+ * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
+ * illegal cmd, retry.
+ * Otherwise we don't understand what happened, so abort.
+ */
+static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+ struct mmc_blk_request *brq)
+{
+ bool prev_cmd_status_valid = true;
+ u32 status, stop_status = 0;
+ int err, retry;
+
+ /*
+ * Try to get card status which indicates both the card state
+ * and why there was no response. If the first attempt fails,
+ * we can't be sure the returned status is for the r/w command.
+ */
+ for (retry = 2; retry >= 0; retry--) {
+ err = get_card_status(card, &status, 0);
+ if (!err)
+ break;
+
+ prev_cmd_status_valid = false;
+ pr_err("%s: error %d sending status command, %sing\n",
+ req->rq_disk->disk_name, err, retry ? "retry" : "abort");
+ }
+
+ /* We couldn't get a response from the card. Give up. */
if (err)
- printk(KERN_ERR "%s: error %d sending status command",
- req->rq_disk->disk_name, err);
- return cmd.resp[0];
+ return ERR_ABORT;
+
+ /*
+ * Check the current card state. If it is in some data transfer
+ * mode, tell it to stop (and hopefully transition back to TRAN.)
+ */
+ if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
+ R1_CURRENT_STATE(status) == R1_STATE_RCV) {
+ err = send_stop(card, &stop_status);
+ if (err)
+ pr_err("%s: error %d sending stop command\n",
+ req->rq_disk->disk_name, err);
+
+ /*
+ * If the stop cmd also timed out, the card is probably
+ * not present, so abort. Other errors are bad news too.
+ */
+ if (err)
+ return ERR_ABORT;
+ }
+
+ /* Check for set block count errors */
+ if (brq->sbc.error)
+ return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
+ prev_cmd_status_valid, status);
+
+ /* Check for r/w command errors */
+ if (brq->cmd.error)
+ return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
+ prev_cmd_status_valid, status);
+
+ /* Now for stop errors. These aren't fatal to the transfer. */
+ pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->stop.error,
+ brq->cmd.resp[0], status);
+
+ /*
+ * Subsitute in our own stop status as this will give the error
+ * state which happened during the execution of the r/w command.
+ */
+ if (stop_status) {
+ brq->stop.resp[0] = stop_status;
+ brq->stop.error = 0;
+ }
+ return ERR_CONTINUE;
}
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -669,240 +814,324 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
}
}
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+#define CMD_ERRORS \
+ (R1_OUT_OF_RANGE | /* Command argument out of range */ \
+ R1_ADDRESS_ERROR | /* Misaligned address */ \
+ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
+ R1_WP_VIOLATION | /* Tried to write to protected block */ \
+ R1_CC_ERROR | /* Card controller error */ \
+ R1_ERROR) /* General/unknown error */
+
+static int mmc_blk_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
{
- struct mmc_blk_data *md = mq->data;
- struct mmc_card *card = md->queue.card;
- struct mmc_blk_request brq;
- int ret = 1, disable_multi = 0;
+ enum mmc_blk_status ret = MMC_BLK_SUCCESS;
+ struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct mmc_blk_request *brq = &mq_mrq->brq;
+ struct request *req = mq_mrq->req;
/*
- * Reliable writes are used to implement Forced Unit Access and
- * REQ_META accesses, and are supported only on MMCs.
+ * sbc.error indicates a problem with the set block count
+ * command. No data will have been transferred.
+ *
+ * cmd.error indicates a problem with the r/w command. No
+ * data will have been transferred.
+ *
+ * stop.error indicates a problem with the stop command. Data
+ * may have been transferred, or may still be transferring.
*/
- bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
- (req->cmd_flags & REQ_META)) &&
- (rq_data_dir(req) == WRITE) &&
- (md->flags & MMC_BLK_REL_WR);
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
+ switch (mmc_blk_cmd_recovery(card, req, brq)) {
+ case ERR_RETRY:
+ return MMC_BLK_RETRY;
+ case ERR_ABORT:
+ return MMC_BLK_ABORT;
+ case ERR_CONTINUE:
+ break;
+ }
+ }
- do {
- struct mmc_command cmd = {0};
- u32 readcmd, writecmd, status = 0;
-
- memset(&brq, 0, sizeof(struct mmc_blk_request));
- brq.mrq.cmd = &brq.cmd;
- brq.mrq.data = &brq.data;
-
- brq.cmd.arg = blk_rq_pos(req);
- if (!mmc_card_blockaddr(card))
- brq.cmd.arg <<= 9;
- brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.blksz = 512;
- brq.stop.opcode = MMC_STOP_TRANSMISSION;
- brq.stop.arg = 0;
- brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- brq.data.blocks = blk_rq_sectors(req);
+ /*
+ * Check for errors relating to the execution of the
+ * initial command - such as address errors. No data
+ * has been transferred.
+ */
+ if (brq->cmd.resp[0] & CMD_ERRORS) {
+ pr_err("%s: r/w command failed, status = %#x\n",
+ req->rq_disk->disk_name, brq->cmd.resp[0]);
+ return MMC_BLK_ABORT;
+ }
- /*
- * The block layer doesn't support all sector count
- * restrictions, so we need to be prepared for too big
- * requests.
- */
- if (brq.data.blocks > card->host->max_blk_count)
- brq.data.blocks = card->host->max_blk_count;
+ /*
+ * Everything else is either success, or a data error of some
+ * kind. If it was a write, we may have transitioned to
+ * program mode, which we have to wait for it to complete.
+ */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+ u32 status;
+ do {
+ int err = get_card_status(card, &status, 5);
+ if (err) {
+ printk(KERN_ERR "%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_CMD_ERR;
+ }
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!(status & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ }
- /*
- * After a read error, we redo the request one sector at a time
- * in order to accurately determine which sectors can be read
- * successfully.
- */
- if (disable_multi && brq.data.blocks > 1)
- brq.data.blocks = 1;
+ if (brq->data.error) {
+ pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->data.error,
+ (unsigned)blk_rq_pos(req),
+ (unsigned)blk_rq_sectors(req),
+ brq->cmd.resp[0], brq->stop.resp[0]);
- if (brq.data.blocks > 1 || do_rel_wr) {
- /* SPI multiblock writes terminate using a special
- * token, not a STOP_TRANSMISSION request.
- */
- if (!mmc_host_is_spi(card->host) ||
- rq_data_dir(req) == READ)
- brq.mrq.stop = &brq.stop;
- readcmd = MMC_READ_MULTIPLE_BLOCK;
- writecmd = MMC_WRITE_MULTIPLE_BLOCK;
- } else {
- brq.mrq.stop = NULL;
- readcmd = MMC_READ_SINGLE_BLOCK;
- writecmd = MMC_WRITE_BLOCK;
- }
if (rq_data_dir(req) == READ) {
- brq.cmd.opcode = readcmd;
- brq.data.flags |= MMC_DATA_READ;
+ if (brq->data.blocks > 1) {
+ /* Redo read one sector at a time */
+ pr_warning("%s: retrying using single block read\n",
+ req->rq_disk->disk_name);
+ return MMC_BLK_RETRY_SINGLE;
+ }
+ return MMC_BLK_DATA_ERR;
} else {
- brq.cmd.opcode = writecmd;
- brq.data.flags |= MMC_DATA_WRITE;
+ return MMC_BLK_CMD_ERR;
}
+ }
- if (do_rel_wr)
- mmc_apply_rel_rw(&brq, card, req);
+ if (ret == MMC_BLK_SUCCESS &&
+ blk_rq_bytes(req) != brq->data.bytes_xfered)
+ ret = MMC_BLK_PARTIAL;
- /*
- * Pre-defined multi-block transfers are preferable to
- * open ended-ones (and necessary for reliable writes).
- * However, it is not sufficient to just send CMD23,
- * and avoid the final CMD12, as on an error condition
- * CMD12 (stop) needs to be sent anyway. This, coupled
- * with Auto-CMD23 enhancements provided by some
- * hosts, means that the complexity of dealing
- * with this is best left to the host. If CMD23 is
- * supported by card and host, we'll fill sbc in and let
- * the host deal with handling it correctly. This means
- * that for hosts that don't expose MMC_CAP_CMD23, no
- * change of behavior will be observed.
- *
- * N.B: Some MMC cards experience perf degradation.
- * We'll avoid using CMD23-bounded multiblock writes for
- * these, while retaining features like reliable writes.
- */
+ return ret;
+}
- if ((md->flags & MMC_BLK_CMD23) &&
- mmc_op_multi(brq.cmd.opcode) &&
- (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
- brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
- brq.sbc.arg = brq.data.blocks |
- (do_rel_wr ? (1 << 31) : 0);
- brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
- brq.mrq.sbc = &brq.sbc;
- }
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ int disable_multi,
+ struct mmc_queue *mq)
+{
+ u32 readcmd, writecmd;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct mmc_blk_data *md = mq->data;
- mmc_set_data_timeout(&brq.data, card);
+ /*
+ * Reliable writes are used to implement Forced Unit Access and
+ * REQ_META accesses, and are supported only on MMCs.
+ */
+ bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+ (req->cmd_flags & REQ_META)) &&
+ (rq_data_dir(req) == WRITE) &&
+ (md->flags & MMC_BLK_REL_WR);
- brq.data.sg = mq->sg;
- brq.data.sg_len = mmc_queue_map_sg(mq);
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
- /*
- * Adjust the sg list so it is the same size as the
- * request.
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq->data.blksz = 512;
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ brq->data.blocks = blk_rq_sectors(req);
+
+ /*
+ * The block layer doesn't support all sector count
+ * restrictions, so we need to be prepared for too big
+ * requests.
+ */
+ if (brq->data.blocks > card->host->max_blk_count)
+ brq->data.blocks = card->host->max_blk_count;
+
+ /*
+ * After a read error, we redo the request one sector at a time
+ * in order to accurately determine which sectors can be read
+ * successfully.
+ */
+ if (disable_multi && brq->data.blocks > 1)
+ brq->data.blocks = 1;
+
+ if (brq->data.blocks > 1 || do_rel_wr) {
+ /* SPI multiblock writes terminate using a special
+ * token, not a STOP_TRANSMISSION request.
*/
- if (brq.data.blocks != blk_rq_sectors(req)) {
- int i, data_size = brq.data.blocks << 9;
- struct scatterlist *sg;
-
- for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
- data_size -= sg->length;
- if (data_size <= 0) {
- sg->length += data_size;
- i++;
- break;
- }
- }
- brq.data.sg_len = i;
- }
+ if (!mmc_host_is_spi(card->host) ||
+ rq_data_dir(req) == READ)
+ brq->mrq.stop = &brq->stop;
+ readcmd = MMC_READ_MULTIPLE_BLOCK;
+ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+ } else {
+ brq->mrq.stop = NULL;
+ readcmd = MMC_READ_SINGLE_BLOCK;
+ writecmd = MMC_WRITE_BLOCK;
+ }
+ if (rq_data_dir(req) == READ) {
+ brq->cmd.opcode = readcmd;
+ brq->data.flags |= MMC_DATA_READ;
+ } else {
+ brq->cmd.opcode = writecmd;
+ brq->data.flags |= MMC_DATA_WRITE;
+ }
- mmc_queue_bounce_pre(mq);
+ if (do_rel_wr)
+ mmc_apply_rel_rw(brq, card, req);
- mmc_wait_for_req(card->host, &brq.mrq);
+ /*
+ * Pre-defined multi-block transfers are preferable to
+ * open ended-ones (and necessary for reliable writes).
+ * However, it is not sufficient to just send CMD23,
+ * and avoid the final CMD12, as on an error condition
+ * CMD12 (stop) needs to be sent anyway. This, coupled
+ * with Auto-CMD23 enhancements provided by some
+ * hosts, means that the complexity of dealing
+ * with this is best left to the host. If CMD23 is
+ * supported by card and host, we'll fill sbc in and let
+ * the host deal with handling it correctly. This means
+ * that for hosts that don't expose MMC_CAP_CMD23, no
+ * change of behavior will be observed.
+ *
+ * N.B: Some MMC cards experience perf degradation.
+ * We'll avoid using CMD23-bounded multiblock writes for
+ * these, while retaining features like reliable writes.
+ */
- mmc_queue_bounce_post(mq);
+ if ((md->flags & MMC_BLK_CMD23) &&
+ mmc_op_multi(brq->cmd.opcode) &&
+ (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = brq->data.blocks |
+ (do_rel_wr ? (1 << 31) : 0);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ brq->mrq.sbc = &brq->sbc;
+ }
- /*
- * Check for errors here, but don't jump to cmd_err
- * until later as we need to wait for the card to leave
- * programming mode even when things go wrong.
- */
- if (brq.sbc.error || brq.cmd.error ||
- brq.data.error || brq.stop.error) {
- if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
- /* Redo read one sector at a time */
- printk(KERN_WARNING "%s: retrying using single "
- "block read\n", req->rq_disk->disk_name);
- disable_multi = 1;
- continue;
- }
- status = get_card_status(card, req);
- }
+ mmc_set_data_timeout(&brq->data, card);
- if (brq.sbc.error) {
- printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
- "command, response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.sbc.error,
- brq.sbc.resp[0], status);
- }
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
- if (brq.cmd.error) {
- printk(KERN_ERR "%s: error %d sending read/write "
- "command, response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.cmd.error,
- brq.cmd.resp[0], status);
+ /*
+ * Adjust the sg list so it is the same size as the
+ * request.
+ */
+ if (brq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = brq->data.blocks << 9;
+ struct scatterlist *sg;
+
+ for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+ data_size -= sg->length;
+ if (data_size <= 0) {
+ sg->length += data_size;
+ i++;
+ break;
+ }
}
+ brq->data.sg_len = i;
+ }
- if (brq.data.error) {
- if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
- /* 'Stop' response contains card status */
- status = brq.mrq.stop->resp[0];
- printk(KERN_ERR "%s: error %d transferring data,"
- " sector %u, nr %u, card status %#x\n",
- req->rq_disk->disk_name, brq.data.error,
- (unsigned)blk_rq_pos(req),
- (unsigned)blk_rq_sectors(req), status);
- }
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_err_check;
- if (brq.stop.error) {
- printk(KERN_ERR "%s: error %d sending stop command, "
- "response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.stop.error,
- brq.stop.resp[0], status);
- }
+ mmc_queue_bounce_pre(mqrq);
+}
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- do {
- int err;
-
- cmd.opcode = MMC_SEND_STATUS;
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 5);
- if (err) {
- printk(KERN_ERR "%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- goto cmd_err;
- }
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(cmd.resp[0]) == 7));
-
-#if 0
- if (cmd.resp[0] & ~0x00000900)
- printk(KERN_ERR "%s: status = %08x\n",
- req->rq_disk->disk_name, cmd.resp[0]);
- if (mmc_decode_status(cmd.resp))
- goto cmd_err;
-#endif
- }
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
+ int ret = 1, disable_multi = 0, retry = 0;
+ enum mmc_blk_status status;
+ struct mmc_queue_req *mq_rq;
+ struct request *req;
+ struct mmc_async_req *areq;
+
+ if (!rqc && !mq->mqrq_prev->req)
+ return 0;
- if (brq.cmd.error || brq.stop.error || brq.data.error) {
- if (rq_data_dir(req) == READ) {
+ do {
+ if (rqc) {
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ areq = &mq->mqrq_cur->mmc_active;
+ } else
+ areq = NULL;
+ areq = mmc_start_req(card->host, areq, (int *) &status);
+ if (!areq)
+ return 0;
+
+ mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+ brq = &mq_rq->brq;
+ req = mq_rq->req;
+ mmc_queue_bounce_post(mq_rq);
+
+ switch (status) {
+ case MMC_BLK_SUCCESS:
+ case MMC_BLK_PARTIAL:
+ /*
+ * A block was successfully transferred.
+ */
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0,
+ brq->data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ if (status == MMC_BLK_SUCCESS && ret) {
/*
- * After an error, we redo I/O one sector at a
- * time, so we only reach here after trying to
- * read a single sector.
+ * The blk_end_request has returned non zero
+ * even though all data is transfered and no
+ * erros returned by host.
+ * If this happen it's a bug.
*/
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, -EIO, brq.data.blksz);
- spin_unlock_irq(&md->lock);
- continue;
+ printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
+ __func__, blk_rq_bytes(req),
+ brq->data.bytes_xfered);
+ rqc = NULL;
+ goto cmd_abort;
}
+ break;
+ case MMC_BLK_CMD_ERR:
goto cmd_err;
+ case MMC_BLK_RETRY_SINGLE:
+ disable_multi = 1;
+ break;
+ case MMC_BLK_RETRY:
+ if (retry++ < 5)
+ break;
+ case MMC_BLK_ABORT:
+ goto cmd_abort;
+ case MMC_BLK_DATA_ERR:
+ /*
+ * After an error, we redo I/O one sector at a
+ * time, so we only reach here after trying to
+ * read a single sector.
+ */
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, -EIO,
+ brq->data.blksz);
+ spin_unlock_irq(&md->lock);
+ if (!ret)
+ goto start_new_req;
+ break;
}
- /*
- * A block was successfully transferred.
- */
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ if (ret) {
+ /*
+ * In case of a none complete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ }
} while (ret);
return 1;
@@ -927,15 +1156,22 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
}
} else {
spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
+ ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
spin_unlock_irq(&md->lock);
}
+ cmd_abort:
spin_lock_irq(&md->lock);
while (ret)
ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
spin_unlock_irq(&md->lock);
+ start_new_req:
+ if (rqc) {
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
+ }
+
return 0;
}
@@ -945,26 +1181,37 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
- mmc_claim_host(card->host);
+ if (req && !mq->mqrq_prev->req)
+ /* claim host only for the first request */
+ mmc_claim_host(card->host);
+
ret = mmc_blk_part_switch(card, md);
if (ret) {
ret = 0;
goto out;
}
- if (req->cmd_flags & REQ_DISCARD) {
+ if (req && req->cmd_flags & REQ_DISCARD) {
+ /* complete ongoing async transfer before issuing discard */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
if (req->cmd_flags & REQ_SECURE)
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (req->cmd_flags & REQ_FLUSH) {
+ } else if (req && req->cmd_flags & REQ_FLUSH) {
+ /* complete ongoing async transfer before issuing flush */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
ret = mmc_blk_issue_flush(mq, req);
} else {
ret = mmc_blk_issue_rw_rq(mq, req);
}
out:
- mmc_release_host(card->host);
+ if (!req)
+ /* release host only when there are no more requests */
+ mmc_release_host(card->host);
return ret;
}
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 233cdfae92f..006a5e9f8ab 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -148,6 +148,27 @@ struct mmc_test_card {
struct mmc_test_general_result *gr;
};
+enum mmc_test_prep_media {
+ MMC_TEST_PREP_NONE = 0,
+ MMC_TEST_PREP_WRITE_FULL = 1 << 0,
+ MMC_TEST_PREP_ERASE = 1 << 1,
+};
+
+struct mmc_test_multiple_rw {
+ unsigned int *sg_len;
+ unsigned int *bs;
+ unsigned int len;
+ unsigned int size;
+ bool do_write;
+ bool do_nonblock_req;
+ enum mmc_test_prep_media prepare;
+};
+
+struct mmc_test_async_req {
+ struct mmc_async_req areq;
+ struct mmc_test_card *test;
+};
+
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
@@ -367,21 +388,26 @@ out_free:
* Map memory into a scatterlist. Optionally allow the same memory to be
* mapped more than once.
*/
-static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
+static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
struct scatterlist *sglist, int repeat,
unsigned int max_segs, unsigned int max_seg_sz,
- unsigned int *sg_len)
+ unsigned int *sg_len, int min_sg_len)
{
struct scatterlist *sg = NULL;
unsigned int i;
+ unsigned long sz = size;
sg_init_table(sglist, max_segs);
+ if (min_sg_len > max_segs)
+ min_sg_len = max_segs;
*sg_len = 0;
do {
for (i = 0; i < mem->cnt; i++) {
unsigned long len = PAGE_SIZE << mem->arr[i].order;
+ if (min_sg_len && (size / min_sg_len < len))
+ len = ALIGN(size / min_sg_len, 512);
if (len > sz)
len = sz;
if (len > max_seg_sz)
@@ -554,11 +580,12 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
"%lu.%09lu seconds (%u kB/s, %u KiB/s, "
- "%u.%02u IOPS)\n",
+ "%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
- rate / 1000, rate / 1024, iops / 100, iops % 100);
+ rate / 1000, rate / 1024, iops / 100, iops % 100,
+ test->area.sg_len);
mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
}
@@ -661,7 +688,7 @@ static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
* Checks that a normal transfer didn't have any errors
*/
static int mmc_test_check_result(struct mmc_test_card *test,
- struct mmc_request *mrq)
+ struct mmc_request *mrq)
{
int ret;
@@ -685,6 +712,17 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
+static int mmc_test_check_result_async(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_test_async_req *test_async =
+ container_of(areq, struct mmc_test_async_req, areq);
+
+ mmc_test_wait_busy(test_async->test);
+
+ return mmc_test_check_result(test_async->test, areq->mrq);
+}
+
/*
* Checks that a "short transfer" behaved as expected
*/
@@ -720,6 +758,85 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,
}
/*
+ * Tests nonblock transfer with certain parameters
+ */
+static void mmc_test_nonblock_reset(struct mmc_request *mrq,
+ struct mmc_command *cmd,
+ struct mmc_command *stop,
+ struct mmc_data *data)
+{
+ memset(mrq, 0, sizeof(struct mmc_request));
+ memset(cmd, 0, sizeof(struct mmc_command));
+ memset(data, 0, sizeof(struct mmc_data));
+ memset(stop, 0, sizeof(struct mmc_command));
+
+ mrq->cmd = cmd;
+ mrq->data = data;
+ mrq->stop = stop;
+}
+static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
+ struct scatterlist *sg, unsigned sg_len,
+ unsigned dev_addr, unsigned blocks,
+ unsigned blksz, int write, int count)
+{
+ struct mmc_request mrq1;
+ struct mmc_command cmd1;
+ struct mmc_command stop1;
+ struct mmc_data data1;
+
+ struct mmc_request mrq2;
+ struct mmc_command cmd2;
+ struct mmc_command stop2;
+ struct mmc_data data2;
+
+ struct mmc_test_async_req test_areq[2];
+ struct mmc_async_req *done_areq;
+ struct mmc_async_req *cur_areq = &test_areq[0].areq;
+ struct mmc_async_req *other_areq = &test_areq[1].areq;
+ int i;
+ int ret;
+
+ test_areq[0].test = test;
+ test_areq[1].test = test;
+
+ mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
+ mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
+
+ cur_areq->mrq = &mrq1;
+ cur_areq->err_check = mmc_test_check_result_async;
+ other_areq->mrq = &mrq2;
+ other_areq->err_check = mmc_test_check_result_async;
+
+ for (i = 0; i < count; i++) {
+ mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
+ blocks, blksz, write);
+ done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
+
+ if (ret || (!done_areq && i > 0))
+ goto err;
+
+ if (done_areq) {
+ if (done_areq->mrq == &mrq2)
+ mmc_test_nonblock_reset(&mrq2, &cmd2,
+ &stop2, &data2);
+ else
+ mmc_test_nonblock_reset(&mrq1, &cmd1,
+ &stop1, &data1);
+ }
+ done_areq = cur_areq;
+ cur_areq = other_areq;
+ other_areq = done_areq;
+ dev_addr += blocks;
+ }
+
+ done_areq = mmc_start_req(test->card->host, NULL, &ret);
+
+ return ret;
+err:
+ return ret;
+}
+
+/*
* Tests a basic transfer with certain parameters
*/
static int mmc_test_simple_transfer(struct mmc_test_card *test,
@@ -1302,7 +1419,7 @@ static int mmc_test_no_highmem(struct mmc_test_card *test)
* Map sz bytes so that it can be transferred.
*/
static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
- int max_scatter)
+ int max_scatter, int min_sg_len)
{
struct mmc_test_area *t = &test->area;
int err;
@@ -1315,7 +1432,7 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
&t->sg_len);
} else {
err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
- t->max_seg_sz, &t->sg_len);
+ t->max_seg_sz, &t->sg_len, min_sg_len);
}
if (err)
printk(KERN_INFO "%s: Failed to map sg list\n",
@@ -1336,14 +1453,17 @@ static int mmc_test_area_transfer(struct mmc_test_card *test,
}
/*
- * Map and transfer bytes.
+ * Map and transfer bytes for multiple transfers.
*/
-static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
- unsigned int dev_addr, int write, int max_scatter,
- int timed)
+static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write,
+ int max_scatter, int timed, int count,
+ bool nonblock, int min_sg_len)
{
struct timespec ts1, ts2;
- int ret;
+ int ret = 0;
+ int i;
+ struct mmc_test_area *t = &test->area;
/*
* In the case of a maximally scattered transfer, the maximum transfer
@@ -1361,14 +1481,21 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
sz = max_tfr;
}
- ret = mmc_test_area_map(test, sz, max_scatter);
+ ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
if (ret)
return ret;
if (timed)
getnstimeofday(&ts1);
+ if (nonblock)
+ ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
+ dev_addr, t->blocks, 512, write, count);
+ else
+ for (i = 0; i < count && ret == 0; i++) {
+ ret = mmc_test_area_transfer(test, dev_addr, write);
+ dev_addr += sz >> 9;
+ }
- ret = mmc_test_area_transfer(test, dev_addr, write);
if (ret)
return ret;
@@ -1376,11 +1503,19 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
getnstimeofday(&ts2);
if (timed)
- mmc_test_print_rate(test, sz, &ts1, &ts2);
+ mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
return 0;
}
+static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write, int max_scatter,
+ int timed)
+{
+ return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
+ timed, 1, false, 0);
+}
+
/*
* Write the test area entirely.
*/
@@ -1954,6 +2089,245 @@ static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
return mmc_test_large_seq_perf(test, 1);
}
+static int mmc_test_rw_multiple(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *tdata,
+ unsigned int reqsize, unsigned int size,
+ int min_sg_len)
+{
+ unsigned int dev_addr;
+ struct mmc_test_area *t = &test->area;
+ int ret = 0;
+
+ /* Set up test area */
+ if (size > mmc_test_capacity(test->card) / 2 * 512)
+ size = mmc_test_capacity(test->card) / 2 * 512;
+ if (reqsize > t->max_tfr)
+ reqsize = t->max_tfr;
+ dev_addr = mmc_test_capacity(test->card) / 4;
+ if ((dev_addr & 0xffff0000))
+ dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+ else
+ dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
+ if (!dev_addr)
+ goto err;
+
+ if (reqsize > size)
+ return 0;
+
+ /* prepare test area */
+ if (mmc_can_erase(test->card) &&
+ tdata->prepare & MMC_TEST_PREP_ERASE) {
+ ret = mmc_erase(test->card, dev_addr,
+ size / 512, MMC_SECURE_ERASE_ARG);
+ if (ret)
+ ret = mmc_erase(test->card, dev_addr,
+ size / 512, MMC_ERASE_ARG);
+ if (ret)
+ goto err;
+ }
+
+ /* Run test */
+ ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
+ tdata->do_write, 0, 1, size / reqsize,
+ tdata->do_nonblock_req, min_sg_len);
+ if (ret)
+ goto err;
+
+ return ret;
+ err:
+ printk(KERN_INFO "[%s] error\n", __func__);
+ return ret;
+}
+
+static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *rw)
+{
+ int ret = 0;
+ int i;
+ void *pre_req = test->card->host->ops->pre_req;
+ void *post_req = test->card->host->ops->post_req;
+
+ if (rw->do_nonblock_req &&
+ ((!pre_req && post_req) || (pre_req && !post_req))) {
+ printk(KERN_INFO "error: only one of pre/post is defined\n");
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < rw->len && ret == 0; i++) {
+ ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *rw)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0 ; i < rw->len && ret == 0; i++) {
+ ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
+ rw->sg_len[i]);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Multiple blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = true,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = true,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = false,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = false,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = true,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = true,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = false,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = false,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -2221,6 +2595,61 @@ static const struct mmc_test_case mmc_test_cases[] = {
.cleanup = mmc_test_area_cleanup,
},
+ {
+ .name = "Write performance with blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_write_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance with non-blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_write_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance with blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_read_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance with non-blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_read_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_wr_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance non-blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_wr_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_r_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance non-blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_r_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
};
static DEFINE_MUTEX(mmc_test_lock);
@@ -2445,6 +2874,32 @@ static const struct file_operations mmc_test_fops_test = {
.release = single_release,
};
+static int mtf_testlist_show(struct seq_file *sf, void *data)
+{
+ int i;
+
+ mutex_lock(&mmc_test_lock);
+
+ for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
+ seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
+
+ mutex_unlock(&mmc_test_lock);
+
+ return 0;
+}
+
+static int mtf_testlist_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtf_testlist_show, inode->i_private);
+}
+
+static const struct file_operations mmc_test_fops_testlist = {
+ .open = mtf_testlist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void mmc_test_free_file_test(struct mmc_card *card)
{
struct mmc_test_dbgfs_file *df, *dfs;
@@ -2476,7 +2931,18 @@ static int mmc_test_register_file_test(struct mmc_card *card)
if (IS_ERR_OR_NULL(file)) {
dev_err(&card->dev,
- "Can't create file. Perhaps debugfs is disabled.\n");
+ "Can't create test. Perhaps debugfs is disabled.\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ if (card->debugfs_root)
+ file = debugfs_create_file("testlist", S_IRUGO,
+ card->debugfs_root, card, &mmc_test_fops_testlist);
+
+ if (IS_ERR_OR_NULL(file)) {
+ dev_err(&card->dev,
+ "Can't create testlist. Perhaps debugfs is disabled.\n");
ret = -ENODEV;
goto err;
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6413afa318d..45fb362e3f0 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -52,14 +52,18 @@ static int mmc_queue_thread(void *d)
down(&mq->thread_sem);
do {
struct request *req = NULL;
+ struct mmc_queue_req *tmp;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
req = blk_fetch_request(q);
- mq->req = req;
+ mq->mqrq_cur->req = req;
spin_unlock_irq(q->queue_lock);
- if (!req) {
+ if (req || mq->mqrq_prev->req) {
+ set_current_state(TASK_RUNNING);
+ mq->issue_fn(mq, req);
+ } else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
@@ -67,11 +71,14 @@ static int mmc_queue_thread(void *d)
up(&mq->thread_sem);
schedule();
down(&mq->thread_sem);
- continue;
}
- set_current_state(TASK_RUNNING);
- mq->issue_fn(mq, req);
+ /* Current request becomes previous request and vice versa. */
+ mq->mqrq_prev->brq.mrq.data = NULL;
+ mq->mqrq_prev->req = NULL;
+ tmp = mq->mqrq_prev;
+ mq->mqrq_prev = mq->mqrq_cur;
+ mq->mqrq_cur = tmp;
} while (1);
up(&mq->thread_sem);
@@ -97,10 +104,46 @@ static void mmc_request(struct request_queue *q)
return;
}
- if (!mq->req)
+ if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
wake_up_process(mq->thread);
}
+struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
+{
+ struct scatterlist *sg;
+
+ sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+ if (!sg)
+ *err = -ENOMEM;
+ else {
+ *err = 0;
+ sg_init_table(sg, sg_len);
+ }
+
+ return sg;
+}
+
+static void mmc_queue_setup_discard(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned max_discard;
+
+ max_discard = mmc_calc_max_discard(card);
+ if (!max_discard)
+ return;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ q->limits.max_discard_sectors = max_discard;
+ if (card->erased_byte == 0)
+ q->limits.discard_zeroes_data = 1;
+ q->limits.discard_granularity = card->pref_erase << 9;
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ q->limits.discard_granularity = 0;
+ if (mmc_can_secure_erase_trim(card))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -116,6 +159,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
int ret;
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = *mmc_dev(host)->dma_mask;
@@ -125,21 +170,16 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (!mq->queue)
return -ENOMEM;
+ memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
+ memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
+ mq->mqrq_cur = mqrq_cur;
+ mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
- mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- if (mmc_can_erase(card)) {
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
- mq->queue->limits.max_discard_sectors = UINT_MAX;
- if (card->erased_byte == 0)
- mq->queue->limits.discard_zeroes_data = 1;
- mq->queue->limits.discard_granularity = card->pref_erase << 9;
- if (mmc_can_secure_erase_trim(card))
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
- mq->queue);
- }
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_segs == 1) {
@@ -155,53 +195,64 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
bouncesz = host->max_blk_count * 512;
if (bouncesz > 512) {
- mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
- if (!mq->bounce_buf) {
+ mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_cur->bounce_buf) {
+ printk(KERN_WARNING "%s: unable to "
+ "allocate bounce cur buffer\n",
+ mmc_card_name(card));
+ }
+ mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_prev->bounce_buf) {
printk(KERN_WARNING "%s: unable to "
- "allocate bounce buffer\n",
+ "allocate bounce prev buffer\n",
mmc_card_name(card));
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
}
}
- if (mq->bounce_buf) {
+ if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
- mq->sg = kmalloc(sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!mq->sg) {
- ret = -ENOMEM;
+ mqrq_cur->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->sg, 1);
- mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
- bouncesz / 512, GFP_KERNEL);
- if (!mq->bounce_sg) {
- ret = -ENOMEM;
+ mqrq_cur->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->bounce_sg, bouncesz / 512);
}
}
#endif
- if (!mq->bounce_buf) {
+ if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- mq->sg = kmalloc(sizeof(struct scatterlist) *
- host->max_segs, GFP_KERNEL);
- if (!mq->sg) {
- ret = -ENOMEM;
+ mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+
+ mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->sg, host->max_segs);
}
sema_init(&mq->thread_sem, 1);
@@ -216,16 +267,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
return 0;
free_bounce_sg:
- if (mq->bounce_sg)
- kfree(mq->bounce_sg);
- mq->bounce_sg = NULL;
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
cleanup_queue:
- if (mq->sg)
- kfree(mq->sg);
- mq->sg = NULL;
- if (mq->bounce_buf)
- kfree(mq->bounce_buf);
- mq->bounce_buf = NULL;
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
+
blk_cleanup_queue(mq->queue);
return ret;
}
@@ -234,6 +291,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
+ struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
@@ -247,16 +306,23 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- if (mq->bounce_sg)
- kfree(mq->bounce_sg);
- mq->bounce_sg = NULL;
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
- kfree(mq->sg);
- mq->sg = NULL;
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
- if (mq->bounce_buf)
- kfree(mq->bounce_buf);
- mq->bounce_buf = NULL;
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
mq->card = NULL;
}
@@ -309,27 +375,27 @@ void mmc_queue_resume(struct mmc_queue *mq)
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
-unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
+unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
int i;
- if (!mq->bounce_buf)
- return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
+ if (!mqrq->bounce_buf)
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
- BUG_ON(!mq->bounce_sg);
+ BUG_ON(!mqrq->bounce_sg);
- sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
- mq->bounce_sg_len = sg_len;
+ mqrq->bounce_sg_len = sg_len;
buflen = 0;
- for_each_sg(mq->bounce_sg, sg, sg_len, i)
+ for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
- sg_init_one(mq->sg, mq->bounce_buf, buflen);
+ sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
return 1;
}
@@ -338,31 +404,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
-void mmc_queue_bounce_pre(struct mmc_queue *mq)
+void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
{
- if (!mq->bounce_buf)
+ if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mq->req) != WRITE)
+ if (rq_data_dir(mqrq->req) != WRITE)
return;
- sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
- mq->bounce_buf, mq->sg[0].length);
+ sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
-void mmc_queue_bounce_post(struct mmc_queue *mq)
+void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
{
- if (!mq->bounce_buf)
+ if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mq->req) != READ)
+ if (rq_data_dir(mqrq->req) != READ)
return;
- sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
- mq->bounce_buf, mq->sg[0].length);
+ sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
}
-
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 6223ef8dc9c..d2a1eb4b9f9 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -4,19 +4,35 @@
struct request;
struct task_struct;
+struct mmc_blk_request {
+ struct mmc_request mrq;
+ struct mmc_command sbc;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+};
+
+struct mmc_queue_req {
+ struct request *req;
+ struct mmc_blk_request brq;
+ struct scatterlist *sg;
+ char *bounce_buf;
+ struct scatterlist *bounce_sg;
+ unsigned int bounce_sg_len;
+ struct mmc_async_req mmc_active;
+};
+
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
- struct request *req;
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
- struct scatterlist *sg;
- char *bounce_buf;
- struct scatterlist *bounce_sg;
- unsigned int bounce_sg_len;
+ struct mmc_queue_req mqrq[2];
+ struct mmc_queue_req *mqrq_cur;
+ struct mmc_queue_req *mqrq_prev;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -25,8 +41,9 @@ extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
-extern unsigned int mmc_queue_map_sg(struct mmc_queue *);
-extern void mmc_queue_bounce_pre(struct mmc_queue *);
-extern void mmc_queue_bounce_post(struct mmc_queue *);
+extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
+ struct mmc_queue_req *);
+extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
+extern void mmc_queue_bounce_post(struct mmc_queue_req *);
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 7843efe2235..f091b43d00c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -198,9 +198,109 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
static void mmc_wait_done(struct mmc_request *mrq)
{
- complete(mrq->done_data);
+ complete(&mrq->completion);
}
+static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ init_completion(&mrq->completion);
+ mrq->done = mmc_wait_done;
+ mmc_start_request(host, mrq);
+}
+
+static void mmc_wait_for_req_done(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+ wait_for_completion(&mrq->completion);
+}
+
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ * @is_first_req: true if there is no previous started request
+ * that may run in parellel to this call, otherwise false
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ if (host->ops->pre_req)
+ host->ops->pre_req(host, mrq, is_first_req);
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another reuqest is running.
+ */
+static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ int err)
+{
+ if (host->ops->post_req)
+ host->ops->post_req(host, mrq, err);
+}
+
+/**
+ * mmc_start_req - start a non-blocking request
+ * @host: MMC host to start command
+ * @areq: async request to start
+ * @error: out parameter returns 0 for success, otherwise non zero
+ *
+ * Start a new MMC custom command request for a host.
+ * If there is on ongoing async request wait for completion
+ * of that request and start the new one and return.
+ * Does not wait for the new request to complete.
+ *
+ * Returns the completed request, NULL in case of none completed.
+ * Wait for the an ongoing request (previoulsy started) to complete and
+ * return the completed request. If there is no ongoing request, NULL
+ * is returned without waiting. NULL is not an error condition.
+ */
+struct mmc_async_req *mmc_start_req(struct mmc_host *host,
+ struct mmc_async_req *areq, int *error)
+{
+ int err = 0;
+ struct mmc_async_req *data = host->areq;
+
+ /* Prepare a new request */
+ if (areq)
+ mmc_pre_req(host, areq->mrq, !host->areq);
+
+ if (host->areq) {
+ mmc_wait_for_req_done(host, host->areq->mrq);
+ err = host->areq->err_check(host->card, host->areq);
+ if (err) {
+ mmc_post_req(host, host->areq->mrq, 0);
+ if (areq)
+ mmc_post_req(host, areq->mrq, -EINVAL);
+
+ host->areq = NULL;
+ goto out;
+ }
+ }
+
+ if (areq)
+ __mmc_start_req(host, areq->mrq);
+
+ if (host->areq)
+ mmc_post_req(host, host->areq->mrq, 0);
+
+ host->areq = areq;
+ out:
+ if (error)
+ *error = err;
+ return data;
+}
+EXPORT_SYMBOL(mmc_start_req);
+
/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
@@ -212,16 +312,9 @@ static void mmc_wait_done(struct mmc_request *mrq)
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
- DECLARE_COMPLETION_ONSTACK(complete);
-
- mrq->done_data = &complete;
- mrq->done = mmc_wait_done;
-
- mmc_start_request(host, mrq);
-
- wait_for_completion(&complete);
+ __mmc_start_req(host, mrq);
+ mmc_wait_for_req_done(host, mrq);
}
-
EXPORT_SYMBOL(mmc_wait_for_req);
/**
@@ -1516,6 +1609,82 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
}
EXPORT_SYMBOL(mmc_erase_group_aligned);
+static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
+ unsigned int arg)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
+ unsigned int last_timeout = 0;
+
+ if (card->erase_shift)
+ max_qty = UINT_MAX >> card->erase_shift;
+ else if (mmc_card_sd(card))
+ max_qty = UINT_MAX;
+ else
+ max_qty = UINT_MAX / card->erase_size;
+
+ /* Find the largest qty with an OK timeout */
+ do {
+ y = 0;
+ for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
+ timeout = mmc_erase_timeout(card, arg, qty + x);
+ if (timeout > host->max_discard_to)
+ break;
+ if (timeout < last_timeout)
+ break;
+ last_timeout = timeout;
+ y = x;
+ }
+ qty += y;
+ } while (y);
+
+ if (!qty)
+ return 0;
+
+ if (qty == 1)
+ return 1;
+
+ /* Convert qty to sectors */
+ if (card->erase_shift)
+ max_discard = --qty << card->erase_shift;
+ else if (mmc_card_sd(card))
+ max_discard = qty;
+ else
+ max_discard = --qty * card->erase_size;
+
+ return max_discard;
+}
+
+unsigned int mmc_calc_max_discard(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_discard, max_trim;
+
+ if (!host->max_discard_to)
+ return UINT_MAX;
+
+ /*
+ * Without erase_group_def set, MMC erase timeout depends on clock
+ * frequence which can change. In that case, the best choice is
+ * just the preferred erase size.
+ */
+ if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
+ return card->pref_erase;
+
+ max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
+ if (mmc_can_trim(card)) {
+ max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
+ if (max_trim < max_discard)
+ max_discard = max_trim;
+ } else if (max_discard < card->erase_size) {
+ max_discard = 0;
+ }
+ pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
+ mmc_hostname(host), max_discard, host->max_discard_to);
+ return max_discard;
+}
+EXPORT_SYMBOL(mmc_calc_max_discard);
+
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
struct mmc_command cmd = {0};
@@ -1663,6 +1832,10 @@ int mmc_power_save_host(struct mmc_host *host)
{
int ret = 0;
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__);
+#endif
+
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
@@ -1685,6 +1858,10 @@ int mmc_power_restore_host(struct mmc_host *host)
{
int ret;
+#ifdef CONFIG_MMC_DEBUG
+ pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__);
+#endif
+
mmc_bus_get(host);
if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) {
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index ff2774128aa..633975ff2bb 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -409,52 +409,62 @@ out:
static int sd_select_driver_type(struct mmc_card *card, u8 *status)
{
- int host_drv_type = 0, card_drv_type = 0;
+ int host_drv_type = SD_DRIVER_TYPE_B;
+ int card_drv_type = SD_DRIVER_TYPE_B;
+ int drive_strength;
int err;
/*
* If the host doesn't support any of the Driver Types A,C or D,
- * default Driver Type B is used.
+ * or there is no board specific handler then default Driver
+ * Type B is used.
*/
if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
| MMC_CAP_DRIVER_TYPE_D)))
return 0;
- if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) {
- host_drv_type = MMC_SET_DRIVER_TYPE_A;
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
- card_drv_type = MMC_SET_DRIVER_TYPE_A;
- else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
- card_drv_type = MMC_SET_DRIVER_TYPE_B;
- else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type = MMC_SET_DRIVER_TYPE_C;
- } else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) {
- host_drv_type = MMC_SET_DRIVER_TYPE_C;
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type = MMC_SET_DRIVER_TYPE_C;
- } else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) {
- /*
- * If we are here, that means only the default driver type
- * B is supported by the host.
- */
- host_drv_type = MMC_SET_DRIVER_TYPE_B;
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
- card_drv_type = MMC_SET_DRIVER_TYPE_B;
- else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type = MMC_SET_DRIVER_TYPE_C;
- }
+ if (!card->host->ops->select_drive_strength)
+ return 0;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
+ host_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
+ host_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
+ host_drv_type |= SD_DRIVER_TYPE_D;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
+ card_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+ card_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
+ card_drv_type |= SD_DRIVER_TYPE_D;
+
+ /*
+ * The drive strength that the hardware can support
+ * depends on the board design. Pass the appropriate
+ * information and let the hardware specific code
+ * return what is possible given the options
+ */
+ drive_strength = card->host->ops->select_drive_strength(
+ card->sw_caps.uhs_max_dtr,
+ host_drv_type, card_drv_type);
- err = mmc_sd_switch(card, 1, 2, card_drv_type, status);
+ err = mmc_sd_switch(card, 1, 2, drive_strength, status);
if (err)
return err;
- if ((status[15] & 0xF) != card_drv_type) {
- printk(KERN_WARNING "%s: Problem setting driver strength!\n",
+ if ((status[15] & 0xF) != drive_strength) {
+ printk(KERN_WARNING "%s: Problem setting drive strength!\n",
mmc_hostname(card->host));
return 0;
}
- mmc_set_driver_type(card->host, host_drv_type);
+ mmc_set_driver_type(card->host, drive_strength);
return 0;
}
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d2565df8a7f..e4e6822d09e 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -167,11 +167,8 @@ static int sdio_bus_remove(struct device *dev)
int ret = 0;
/* Make sure card is powered before invoking ->remove() */
- if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
- ret = pm_runtime_get_sync(dev);
- if (ret < 0)
- goto out;
- }
+ if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
+ pm_runtime_get_sync(dev);
drv->remove(func);
@@ -191,7 +188,6 @@ static int sdio_bus_remove(struct device *dev)
if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
pm_runtime_put_sync(dev);
-out:
return ret;
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 56dbf3f6ad0..8c87096531e 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -81,28 +81,32 @@ config MMC_RICOH_MMC
If unsure, say Y.
-config MMC_SDHCI_OF
- tristate "SDHCI support on OpenFirmware platforms"
- depends on MMC_SDHCI && OF
+config MMC_SDHCI_PLTFM
+ tristate "SDHCI platform and OF driver helper"
+ depends on MMC_SDHCI
help
- This selects the OF support for Secure Digital Host Controller
- Interfaces.
+ This selects the common helper functions support for Secure Digital
+ Host Controller Interface based platform and OF drivers.
+
+ If you have a controller with this interface, say Y or M here.
If unsure, say N.
config MMC_SDHCI_OF_ESDHC
- bool "SDHCI OF support for the Freescale eSDHC controller"
- depends on MMC_SDHCI_OF
+ tristate "SDHCI OF support for the Freescale eSDHC controller"
+ depends on MMC_SDHCI_PLTFM
depends on PPC_OF
select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
help
This selects the Freescale eSDHC controller support.
+ If you have a controller with this interface, say Y or M here.
+
If unsure, say N.
config MMC_SDHCI_OF_HLWD
- bool "SDHCI OF support for the Nintendo Wii SDHCI controllers"
- depends on MMC_SDHCI_OF
+ tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers"
+ depends on MMC_SDHCI_PLTFM
depends on PPC_OF
select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
help
@@ -110,40 +114,36 @@ config MMC_SDHCI_OF_HLWD
found in the "Hollywood" chipset of the Nintendo Wii video game
console.
- If unsure, say N.
-
-config MMC_SDHCI_PLTFM
- tristate "SDHCI support on the platform specific bus"
- depends on MMC_SDHCI
- help
- This selects the platform specific bus support for Secure Digital Host
- Controller Interface.
-
If you have a controller with this interface, say Y or M here.
If unsure, say N.
config MMC_SDHCI_CNS3XXX
- bool "SDHCI support on the Cavium Networks CNS3xxx SoC"
+ tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"
depends on ARCH_CNS3XXX
depends on MMC_SDHCI_PLTFM
help
This selects the SDHCI support for CNS3xxx System-on-Chip devices.
+ If you have a controller with this interface, say Y or M here.
+
If unsure, say N.
config MMC_SDHCI_ESDHC_IMX
- bool "SDHCI platform support for the Freescale eSDHC i.MX controller"
- depends on MMC_SDHCI_PLTFM && (ARCH_MX25 || ARCH_MX35 || ARCH_MX5)
+ tristate "SDHCI platform support for the Freescale eSDHC i.MX controller"
+ depends on ARCH_MX25 || ARCH_MX35 || ARCH_MX5
+ depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
This selects the Freescale eSDHC controller support on the platform
bus, found on platforms like mx35/51.
+ If you have a controller with this interface, say Y or M here.
+
If unsure, say N.
config MMC_SDHCI_DOVE
- bool "SDHCI support on Marvell's Dove SoC"
+ tristate "SDHCI support on Marvell's Dove SoC"
depends on ARCH_DOVE
depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
@@ -151,11 +151,14 @@ config MMC_SDHCI_DOVE
This selects the Secure Digital Host Controller Interface in
Marvell's Dove SoC.
+ If you have a controller with this interface, say Y or M here.
+
If unsure, say N.
config MMC_SDHCI_TEGRA
- bool "SDHCI platform support for the Tegra SD/MMC Controller"
- depends on MMC_SDHCI_PLTFM && ARCH_TEGRA
+ tristate "SDHCI platform support for the Tegra SD/MMC Controller"
+ depends on ARCH_TEGRA
+ depends on MMC_SDHCI_PLTFM
select MMC_SDHCI_IO_ACCESSORS
help
This selects the Tegra SD/MMC controller. If you have a Tegra
@@ -178,14 +181,28 @@ config MMC_SDHCI_S3C
If unsure, say N.
-config MMC_SDHCI_PXA
- tristate "Marvell PXA168/PXA910/MMP2 SD Host Controller support"
- depends on ARCH_PXA || ARCH_MMP
+config MMC_SDHCI_PXAV3
+ tristate "Marvell MMP2 SD Host Controller support (PXAV3)"
+ depends on CLKDEV_LOOKUP
select MMC_SDHCI
- select MMC_SDHCI_IO_ACCESSORS
+ select MMC_SDHCI_PLTFM
+ default CPU_MMP2
+ help
+ This selects the Marvell(R) PXAV3 SD Host Controller.
+ If you have a MMP2 platform with SD Host Controller
+ and a card slot, say Y or M here.
+
+ If unsure, say N.
+
+config MMC_SDHCI_PXAV2
+ tristate "Marvell PXA9XX SD Host Controller support (PXAV2)"
+ depends on CLKDEV_LOOKUP
+ select MMC_SDHCI
+ select MMC_SDHCI_PLTFM
+ default CPU_PXA910
help
- This selects the Marvell(R) PXA168/PXA910/MMP2 SD Host Controller.
- If you have a PXA168/PXA910/MMP2 platform with SD Host Controller
+ This selects the Marvell(R) PXAV2 SD Host Controller.
+ If you have a PXA9XX platform with SD Host Controller
and a card slot, say Y or M here.
If unsure, say N.
@@ -281,13 +298,12 @@ config MMC_ATMELMCI
endchoice
config MMC_ATMELMCI_DMA
- bool "Atmel MCI DMA support (EXPERIMENTAL)"
- depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE && EXPERIMENTAL
+ bool "Atmel MCI DMA support"
+ depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE
help
Say Y here to have the Atmel MCI driver use a DMA engine to
do data transfers and thus increase the throughput and
- reduce the CPU utilization. Note that this is highly
- experimental and may cause the driver to lock up.
+ reduce the CPU utilization.
If unsure, say N.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 58a5cf73d6e..b4b83f302e3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -9,7 +9,8 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
-obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
+obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
+obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
@@ -31,9 +32,7 @@ obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
obj-$(CONFIG_MMC_TMIO_CORE) += tmio_mmc_core.o
tmio_mmc_core-y := tmio_mmc_pio.o
-ifneq ($(CONFIG_MMC_SDHI),n)
-tmio_mmc_core-y += tmio_mmc_dma.o
-endif
+tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_SDHI)) += tmio_mmc_dma.o
obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
@@ -44,17 +43,13 @@ obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
obj-$(CONFIG_MMC_USHC) += ushc.o
-obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-platform.o
-sdhci-platform-y := sdhci-pltfm.o
-sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
-sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
-sdhci-platform-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
-sdhci-platform-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
-
-obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
-sdhci-of-y := sdhci-of-core.o
-sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
-sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
+obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
+obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o
+obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o
+obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o
+obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o
+obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
+obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index d3e6a962f42..a4aa3af86fe 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -77,7 +77,8 @@
#include <mach/board.h>
#include <mach/cpu.h>
-#include <mach/at91_mci.h>
+
+#include "at91_mci.h"
#define DRIVER_NAME "at91_mci"
diff --git a/arch/arm/mach-at91/include/mach/at91_mci.h b/drivers/mmc/host/at91_mci.h
index 02182c16a02..eec3a6b1c2b 100644
--- a/arch/arm/mach-at91/include/mach/at91_mci.h
+++ b/drivers/mmc/host/at91_mci.h
@@ -1,5 +1,5 @@
/*
- * arch/arm/mach-at91/include/mach/at91_mci.h
+ * drivers/mmc/host/at91_mci.h
*
* Copyright (C) 2005 Ivan Kokshaysky
* Copyright (C) SAN People
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index aa8039f473c..fa8cae1d700 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -203,6 +203,7 @@ struct atmel_mci_slot {
#define ATMCI_CARD_PRESENT 0
#define ATMCI_CARD_NEED_INIT 1
#define ATMCI_SHUTDOWN 2
+#define ATMCI_SUSPENDED 3
int detect_pin;
int wp_pin;
@@ -1878,10 +1879,72 @@ static int __exit atmci_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int atmci_suspend(struct device *dev)
+{
+ struct atmel_mci *host = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ struct atmel_mci_slot *slot = host->slot[i];
+ int ret;
+
+ if (!slot)
+ continue;
+ ret = mmc_suspend_host(slot->mmc);
+ if (ret < 0) {
+ while (--i >= 0) {
+ slot = host->slot[i];
+ if (slot
+ && test_bit(ATMCI_SUSPENDED, &slot->flags)) {
+ mmc_resume_host(host->slot[i]->mmc);
+ clear_bit(ATMCI_SUSPENDED, &slot->flags);
+ }
+ }
+ return ret;
+ } else {
+ set_bit(ATMCI_SUSPENDED, &slot->flags);
+ }
+ }
+
+ return 0;
+}
+
+static int atmci_resume(struct device *dev)
+{
+ struct atmel_mci *host = dev_get_drvdata(dev);
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) {
+ struct atmel_mci_slot *slot = host->slot[i];
+ int err;
+
+ slot = host->slot[i];
+ if (!slot)
+ continue;
+ if (!test_bit(ATMCI_SUSPENDED, &slot->flags))
+ continue;
+ err = mmc_resume_host(slot->mmc);
+ if (err < 0)
+ ret = err;
+ else
+ clear_bit(ATMCI_SUSPENDED, &slot->flags);
+ }
+
+ return ret;
+}
+static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume);
+#define ATMCI_PM_OPS (&atmci_pm)
+#else
+#define ATMCI_PM_OPS NULL
+#endif
+
static struct platform_driver atmci_driver = {
.remove = __exit_p(atmci_remove),
.driver = {
.name = "atmel_mci",
+ .pm = ATMCI_PM_OPS,
},
};
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 66dcddb9c20..0c839d3338d 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -33,6 +33,7 @@
#include <linux/mmc/dw_mmc.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
#include "dw_mmc.h"
@@ -100,6 +101,8 @@ struct dw_mci_slot {
int last_detect_state;
};
+static struct workqueue_struct *dw_mci_card_workqueue;
+
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
@@ -284,7 +287,7 @@ static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
/* DMA interface functions */
static void dw_mci_stop_dma(struct dw_mci *host)
{
- if (host->use_dma) {
+ if (host->using_dma) {
host->dma_ops->stop(host);
host->dma_ops->cleanup(host);
} else {
@@ -432,6 +435,8 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
unsigned int i, direction, sg_len;
u32 temp;
+ host->using_dma = 0;
+
/* If we don't have a channel, we can't do DMA */
if (!host->use_dma)
return -ENODEV;
@@ -451,6 +456,8 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
return -EINVAL;
}
+ host->using_dma = 1;
+
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
else
@@ -489,14 +496,18 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
host->sg = NULL;
host->data = data;
+ if (data->flags & MMC_DATA_READ)
+ host->dir_status = DW_MCI_RECV_STATUS;
+ else
+ host->dir_status = DW_MCI_SEND_STATUS;
+
if (dw_mci_submit_data_dma(host, data)) {
host->sg = data->sg;
host->pio_offset = 0;
- if (data->flags & MMC_DATA_READ)
- host->dir_status = DW_MCI_RECV_STATUS;
- else
- host->dir_status = DW_MCI_SEND_STATUS;
+ host->part_buf_start = 0;
+ host->part_buf_count = 0;
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
temp = mci_readl(host, INTMASK);
temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
mci_writel(host, INTMASK, temp);
@@ -574,7 +585,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
}
/* Set the current slot bus width */
- mci_writel(host, CTYPE, slot->ctype);
+ mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
static void dw_mci_start_request(struct dw_mci *host,
@@ -624,13 +635,13 @@ static void dw_mci_start_request(struct dw_mci *host,
host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
}
+/* must be called with host->lock held */
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
struct mmc_request *mrq)
{
dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
host->state);
- spin_lock_bh(&host->lock);
slot->mrq = mrq;
if (host->state == STATE_IDLE) {
@@ -639,8 +650,6 @@ static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
} else {
list_add_tail(&slot->queue_node, &host->queue);
}
-
- spin_unlock_bh(&host->lock);
}
static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -650,14 +659,23 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(slot->mrq);
+ /*
+ * The check for card presence and queueing of the request must be
+ * atomic, otherwise the card could be removed in between and the
+ * request wouldn't fail until another card was inserted.
+ */
+ spin_lock_bh(&host->lock);
+
if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
+ spin_unlock_bh(&host->lock);
mrq->cmd->error = -ENOMEDIUM;
mmc_request_done(mmc, mrq);
return;
}
- /* We don't support multiple blocks of weird lengths. */
dw_mci_queue_request(host, slot, mrq);
+
+ spin_unlock_bh(&host->lock);
}
static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -831,7 +849,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
struct mmc_command *cmd;
enum dw_mci_state state;
enum dw_mci_state prev_state;
- u32 status;
+ u32 status, ctrl;
spin_lock(&host->lock);
@@ -891,13 +909,19 @@ static void dw_mci_tasklet_func(unsigned long priv)
if (status & DW_MCI_DATA_ERROR_FLAGS) {
if (status & SDMMC_INT_DTO) {
- dev_err(&host->pdev->dev,
- "data timeout error\n");
data->error = -ETIMEDOUT;
} else if (status & SDMMC_INT_DCRC) {
- dev_err(&host->pdev->dev,
- "data CRC error\n");
data->error = -EILSEQ;
+ } else if (status & SDMMC_INT_EBE &&
+ host->dir_status ==
+ DW_MCI_SEND_STATUS) {
+ /*
+ * No data CRC status was returned.
+ * The number of bytes transferred will
+ * be exaggerated in PIO mode.
+ */
+ data->bytes_xfered = 0;
+ data->error = -ETIMEDOUT;
} else {
dev_err(&host->pdev->dev,
"data FIFO error "
@@ -905,6 +929,16 @@ static void dw_mci_tasklet_func(unsigned long priv)
status);
data->error = -EIO;
}
+ /*
+ * After an error, there may be data lingering
+ * in the FIFO, so reset it - doing so
+ * generates a block interrupt, hence setting
+ * the scatter-gather pointer to NULL.
+ */
+ host->sg = NULL;
+ ctrl = mci_readl(host, CTRL);
+ ctrl |= SDMMC_CTRL_FIFO_RESET;
+ mci_writel(host, CTRL, ctrl);
} else {
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
@@ -946,84 +980,278 @@ unlock:
}
-static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
+/* push final bytes to part_buf, only use during push */
+static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
{
- u16 *pdata = (u16 *)buf;
+ memcpy((void *)&host->part_buf, buf, cnt);
+ host->part_buf_count = cnt;
+}
- WARN_ON(cnt % 2 != 0);
+/* append bytes to part_buf, only use during push */
+static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
+{
+ cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
+ memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
+ host->part_buf_count += cnt;
+ return cnt;
+}
- cnt = cnt >> 1;
- while (cnt > 0) {
- mci_writew(host, DATA, *pdata++);
- cnt--;
+/* pull first bytes from part_buf, only use during pull */
+static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
+{
+ cnt = min(cnt, (int)host->part_buf_count);
+ if (cnt) {
+ memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
+ cnt);
+ host->part_buf_count -= cnt;
+ host->part_buf_start += cnt;
}
+ return cnt;
}
-static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
+/* pull final bytes from the part_buf, assuming it's just been filled */
+static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
{
- u16 *pdata = (u16 *)buf;
+ memcpy(buf, &host->part_buf, cnt);
+ host->part_buf_start = cnt;
+ host->part_buf_count = (1 << host->data_shift) - cnt;
+}
- WARN_ON(cnt % 2 != 0);
+static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
+{
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+ buf += len;
+ cnt -= len;
+ if (!sg_next(host->sg) || host->part_buf_count == 2) {
+ mci_writew(host, DATA, host->part_buf16);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x1)) {
+ while (cnt >= 2) {
+ u16 aligned_buf[64];
+ int len = min(cnt & -2, (int)sizeof(aligned_buf));
+ int items = len >> 1;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_writew(host, DATA, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u16 *pdata = buf;
+ for (; cnt >= 2; cnt -= 2)
+ mci_writew(host, DATA, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ if (!sg_next(host->sg))
+ mci_writew(host, DATA, host->part_buf16);
+ }
+}
- cnt = cnt >> 1;
- while (cnt > 0) {
- *pdata++ = mci_readw(host, DATA);
- cnt--;
+static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x1)) {
+ while (cnt >= 2) {
+ /* pull data from fifo into aligned buffer */
+ u16 aligned_buf[64];
+ int len = min(cnt & -2, (int)sizeof(aligned_buf));
+ int items = len >> 1;
+ int i;
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_readw(host, DATA);
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u16 *pdata = buf;
+ for (; cnt >= 2; cnt -= 2)
+ *pdata++ = mci_readw(host, DATA);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf16 = mci_readw(host, DATA);
+ dw_mci_pull_final_bytes(host, buf, cnt);
}
}
static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
{
- u32 *pdata = (u32 *)buf;
-
- WARN_ON(cnt % 4 != 0);
- WARN_ON((unsigned long)pdata & 0x3);
-
- cnt = cnt >> 2;
- while (cnt > 0) {
- mci_writel(host, DATA, *pdata++);
- cnt--;
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+ buf += len;
+ cnt -= len;
+ if (!sg_next(host->sg) || host->part_buf_count == 4) {
+ mci_writel(host, DATA, host->part_buf32);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x3)) {
+ while (cnt >= 4) {
+ u32 aligned_buf[32];
+ int len = min(cnt & -4, (int)sizeof(aligned_buf));
+ int items = len >> 2;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_writel(host, DATA, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u32 *pdata = buf;
+ for (; cnt >= 4; cnt -= 4)
+ mci_writel(host, DATA, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ if (!sg_next(host->sg))
+ mci_writel(host, DATA, host->part_buf32);
}
}
static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
{
- u32 *pdata = (u32 *)buf;
-
- WARN_ON(cnt % 4 != 0);
- WARN_ON((unsigned long)pdata & 0x3);
-
- cnt = cnt >> 2;
- while (cnt > 0) {
- *pdata++ = mci_readl(host, DATA);
- cnt--;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x3)) {
+ while (cnt >= 4) {
+ /* pull data from fifo into aligned buffer */
+ u32 aligned_buf[32];
+ int len = min(cnt & -4, (int)sizeof(aligned_buf));
+ int items = len >> 2;
+ int i;
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_readl(host, DATA);
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u32 *pdata = buf;
+ for (; cnt >= 4; cnt -= 4)
+ *pdata++ = mci_readl(host, DATA);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf32 = mci_readl(host, DATA);
+ dw_mci_pull_final_bytes(host, buf, cnt);
}
}
static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
{
- u64 *pdata = (u64 *)buf;
-
- WARN_ON(cnt % 8 != 0);
-
- cnt = cnt >> 3;
- while (cnt > 0) {
- mci_writeq(host, DATA, *pdata++);
- cnt--;
+ /* try and push anything in the part_buf */
+ if (unlikely(host->part_buf_count)) {
+ int len = dw_mci_push_part_bytes(host, buf, cnt);
+ buf += len;
+ cnt -= len;
+ if (!sg_next(host->sg) || host->part_buf_count == 8) {
+ mci_writew(host, DATA, host->part_buf);
+ host->part_buf_count = 0;
+ }
+ }
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+ /* memcpy from input buffer into aligned buffer */
+ memcpy(aligned_buf, buf, len);
+ buf += len;
+ cnt -= len;
+ /* push data from aligned buffer into fifo */
+ for (i = 0; i < items; ++i)
+ mci_writeq(host, DATA, aligned_buf[i]);
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+ for (; cnt >= 8; cnt -= 8)
+ mci_writeq(host, DATA, *pdata++);
+ buf = pdata;
+ }
+ /* put anything remaining in the part_buf */
+ if (cnt) {
+ dw_mci_set_part_bytes(host, buf, cnt);
+ if (!sg_next(host->sg))
+ mci_writeq(host, DATA, host->part_buf);
}
}
static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
{
- u64 *pdata = (u64 *)buf;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ if (unlikely((unsigned long)buf & 0x7)) {
+ while (cnt >= 8) {
+ /* pull data from fifo into aligned buffer */
+ u64 aligned_buf[16];
+ int len = min(cnt & -8, (int)sizeof(aligned_buf));
+ int items = len >> 3;
+ int i;
+ for (i = 0; i < items; ++i)
+ aligned_buf[i] = mci_readq(host, DATA);
+ /* memcpy from aligned buffer into output buffer */
+ memcpy(buf, aligned_buf, len);
+ buf += len;
+ cnt -= len;
+ }
+ } else
+#endif
+ {
+ u64 *pdata = buf;
+ for (; cnt >= 8; cnt -= 8)
+ *pdata++ = mci_readq(host, DATA);
+ buf = pdata;
+ }
+ if (cnt) {
+ host->part_buf = mci_readq(host, DATA);
+ dw_mci_pull_final_bytes(host, buf, cnt);
+ }
+}
- WARN_ON(cnt % 8 != 0);
+static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
+{
+ int len;
- cnt = cnt >> 3;
- while (cnt > 0) {
- *pdata++ = mci_readq(host, DATA);
- cnt--;
- }
+ /* get remaining partial bytes */
+ len = dw_mci_pull_part_bytes(host, buf, cnt);
+ if (unlikely(len == cnt))
+ return;
+ buf += len;
+ cnt -= len;
+
+ /* get the rest of the data */
+ host->pull_data(host, buf, cnt);
}
static void dw_mci_read_data_pio(struct dw_mci *host)
@@ -1037,9 +1265,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
unsigned int nbytes = 0, len;
do {
- len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
+ len = host->part_buf_count +
+ (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
if (offset + len <= sg->length) {
- host->pull_data(host, (void *)(buf + offset), len);
+ dw_mci_pull_data(host, (void *)(buf + offset), len);
offset += len;
nbytes += len;
@@ -1055,8 +1284,8 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
}
} else {
unsigned int remaining = sg->length - offset;
- host->pull_data(host, (void *)(buf + offset),
- remaining);
+ dw_mci_pull_data(host, (void *)(buf + offset),
+ remaining);
nbytes += remaining;
flush_dcache_page(sg_page(sg));
@@ -1066,7 +1295,7 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
offset = len - remaining;
buf = sg_virt(sg);
- host->pull_data(host, buf, offset);
+ dw_mci_pull_data(host, buf, offset);
nbytes += offset;
}
@@ -1083,7 +1312,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
return;
}
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
- len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
host->pio_offset = offset;
data->bytes_xfered += nbytes;
return;
@@ -1105,8 +1333,9 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
unsigned int nbytes = 0, len;
do {
- len = SDMMC_FIFO_SZ -
- (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
+ len = ((host->fifo_depth -
+ SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift)
+ - host->part_buf_count;
if (offset + len <= sg->length) {
host->push_data(host, (void *)(buf + offset), len);
@@ -1151,10 +1380,8 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
return;
}
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
-
host->pio_offset = offset;
data->bytes_xfered += nbytes;
-
return;
done:
@@ -1202,7 +1429,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
host->cmd_status = status;
smp_wmb();
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
@@ -1211,7 +1437,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
host->data_status = status;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC |
+ SDMMC_INT_SBE | SDMMC_INT_EBE)))
+ tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_DATA_OVER) {
@@ -1229,13 +1457,13 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_RXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
- if (host->sg)
+ if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
dw_mci_read_data_pio(host);
}
if (pending & SDMMC_INT_TXDR) {
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
- if (host->sg)
+ if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
dw_mci_write_data_pio(host);
}
@@ -1246,7 +1474,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_CD) {
mci_writel(host, RINTSTS, SDMMC_INT_CD);
- tasklet_schedule(&host->card_tasklet);
+ queue_work(dw_mci_card_workqueue, &host->card_work);
}
} while (pass_count++ < 5);
@@ -1265,9 +1493,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void dw_mci_tasklet_card(unsigned long data)
+static void dw_mci_work_routine_card(struct work_struct *work)
{
- struct dw_mci *host = (struct dw_mci *)data;
+ struct dw_mci *host = container_of(work, struct dw_mci, card_work);
int i;
for (i = 0; i < host->num_slots; i++) {
@@ -1279,22 +1507,21 @@ static void dw_mci_tasklet_card(unsigned long data)
present = dw_mci_get_cd(mmc);
while (present != slot->last_detect_state) {
- spin_lock(&host->lock);
-
dev_dbg(&slot->mmc->class_dev, "card %s\n",
present ? "inserted" : "removed");
+ /* Power up slot (before spin_lock, may sleep) */
+ if (present != 0 && host->pdata->setpower)
+ host->pdata->setpower(slot->id, mmc->ocr_avail);
+
+ spin_lock_bh(&host->lock);
+
/* Card change detected */
slot->last_detect_state = present;
- /* Power up slot */
- if (present != 0) {
- if (host->pdata->setpower)
- host->pdata->setpower(slot->id,
- mmc->ocr_avail);
-
+ /* Mark card as present if applicable */
+ if (present != 0)
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
- }
/* Clean up queue if present */
mrq = slot->mrq;
@@ -1344,8 +1571,6 @@ static void dw_mci_tasklet_card(unsigned long data)
/* Power down slot */
if (present == 0) {
- if (host->pdata->setpower)
- host->pdata->setpower(slot->id, 0);
clear_bit(DW_MMC_CARD_PRESENT, &slot->flags);
/*
@@ -1367,7 +1592,12 @@ static void dw_mci_tasklet_card(unsigned long data)
}
- spin_unlock(&host->lock);
+ spin_unlock_bh(&host->lock);
+
+ /* Power down slot (after spin_unlock, may sleep) */
+ if (present == 0 && host->pdata->setpower)
+ host->pdata->setpower(slot->id, 0);
+
present = dw_mci_get_cd(mmc);
}
@@ -1467,7 +1697,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
* Card may have been plugged in prior to boot so we
* need to run the detect tasklet
*/
- tasklet_schedule(&host->card_tasklet);
+ queue_work(dw_mci_card_workqueue, &host->card_work);
return 0;
}
@@ -1645,8 +1875,19 @@ static int dw_mci_probe(struct platform_device *pdev)
* FIFO threshold settings RxMark = fifo_size / 2 - 1,
* Tx Mark = fifo_size / 2 DMA Size = 8
*/
- fifo_size = mci_readl(host, FIFOTH);
- fifo_size = (fifo_size >> 16) & 0x7ff;
+ if (!host->pdata->fifo_depth) {
+ /*
+ * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
+ * have been overwritten by the bootloader, just like we're
+ * about to do, so if you know the value for your hardware, you
+ * should put it in the platform data.
+ */
+ fifo_size = mci_readl(host, FIFOTH);
+ fifo_size = 1 + ((fifo_size >> 16) & 0x7ff);
+ } else {
+ fifo_size = host->pdata->fifo_depth;
+ }
+ host->fifo_depth = fifo_size;
host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
((fifo_size/2) << 0));
mci_writel(host, FIFOTH, host->fifoth_val);
@@ -1656,12 +1897,15 @@ static int dw_mci_probe(struct platform_device *pdev)
mci_writel(host, CLKSRC, 0);
tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
- tasklet_init(&host->card_tasklet,
- dw_mci_tasklet_card, (unsigned long)host);
+ dw_mci_card_workqueue = alloc_workqueue("dw-mci-card",
+ WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
+ if (!dw_mci_card_workqueue)
+ goto err_dmaunmap;
+ INIT_WORK(&host->card_work, dw_mci_work_routine_card);
ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host);
if (ret)
- goto err_dmaunmap;
+ goto err_workqueue;
platform_set_drvdata(pdev, host);
@@ -1690,7 +1934,9 @@ static int dw_mci_probe(struct platform_device *pdev)
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
dev_info(&pdev->dev, "DW MMC controller at irq %d, "
- "%d bit host data width\n", irq, width);
+ "%d bit host data width, "
+ "%u deep fifo\n",
+ irq, width, fifo_size);
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
@@ -1705,6 +1951,9 @@ err_init_slot:
}
free_irq(irq, host);
+err_workqueue:
+ destroy_workqueue(dw_mci_card_workqueue);
+
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
@@ -1744,6 +1993,7 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
mci_writel(host, CLKSRC, 0);
free_irq(platform_get_irq(pdev, 0), host);
+ destroy_workqueue(dw_mci_card_workqueue);
dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
if (host->use_dma && host->dma_ops->exit)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 23c662af561..027d3773539 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -118,7 +118,6 @@
#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
/* Status register defines */
#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
-#define SDMMC_FIFO_SZ 32
/* Internal DMAC interrupt defines */
#define SDMMC_IDMAC_INT_AI BIT(9)
#define SDMMC_IDMAC_INT_NI BIT(8)
@@ -134,22 +133,22 @@
/* Register access macros */
#define mci_readl(dev, reg) \
- __raw_readl(dev->regs + SDMMC_##reg)
+ __raw_readl((dev)->regs + SDMMC_##reg)
#define mci_writel(dev, reg, value) \
- __raw_writel((value), dev->regs + SDMMC_##reg)
+ __raw_writel((value), (dev)->regs + SDMMC_##reg)
/* 16-bit FIFO access macros */
#define mci_readw(dev, reg) \
- __raw_readw(dev->regs + SDMMC_##reg)
+ __raw_readw((dev)->regs + SDMMC_##reg)
#define mci_writew(dev, reg, value) \
- __raw_writew((value), dev->regs + SDMMC_##reg)
+ __raw_writew((value), (dev)->regs + SDMMC_##reg)
/* 64-bit FIFO access macros */
#ifdef readq
#define mci_readq(dev, reg) \
- __raw_readq(dev->regs + SDMMC_##reg)
+ __raw_readq((dev)->regs + SDMMC_##reg)
#define mci_writeq(dev, reg, value) \
- __raw_writeq((value), dev->regs + SDMMC_##reg)
+ __raw_writeq((value), (dev)->regs + SDMMC_##reg)
#else
/*
* Dummy readq implementation for architectures that don't define it.
@@ -160,9 +159,9 @@
* rest of the code free from ifdefs.
*/
#define mci_readq(dev, reg) \
- (*(volatile u64 __force *)(dev->regs + SDMMC_##reg))
+ (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg))
#define mci_writeq(dev, reg, value) \
- (*(volatile u64 __force *)(dev->regs + SDMMC_##reg) = value)
+ (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value))
#endif
#endif /* _DW_MMC_H_ */
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index fe140724a02..fef7140eb1d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -226,6 +226,9 @@ static void __devinit mmci_dma_setup(struct mmci_host *host)
return;
}
+ /* initialize pre request cookie */
+ host->next_data.cookie = 1;
+
/* Try to acquire a generic DMA engine slave channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -335,7 +338,8 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
dir = DMA_FROM_DEVICE;
}
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+ if (!data->host_cookie)
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
/*
* Use of DMA with scatter-gather is impossible.
@@ -353,7 +357,8 @@ static void mmci_dma_data_error(struct mmci_host *host)
dmaengine_terminate_all(host->dma_current);
}
-static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+ struct mmci_host_next *next)
{
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
@@ -364,13 +369,20 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
};
- struct mmc_data *data = host->data;
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
int nr_sg;
- host->dma_current = NULL;
+ /* Check if next job is already prepared */
+ if (data->host_cookie && !next &&
+ host->dma_current && host->dma_desc_current)
+ return 0;
+
+ if (!next) {
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
+ }
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_FROM_DEVICE;
@@ -385,7 +397,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
return -EINVAL;
/* If less than or equal to the fifo size, don't bother with DMA */
- if (host->size <= variant->fifosize)
+ if (data->blksz * data->blocks <= variant->fifosize)
return -EINVAL;
device = chan->device;
@@ -399,14 +411,38 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
if (!desc)
goto unmap_exit;
- /* Okay, go for it. */
- host->dma_current = chan;
+ if (next) {
+ next->dma_chan = chan;
+ next->dma_desc = desc;
+ } else {
+ host->dma_current = chan;
+ host->dma_desc_current = desc;
+ }
+
+ return 0;
+ unmap_exit:
+ if (!next)
+ dmaengine_terminate_all(chan);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ return -ENOMEM;
+}
+
+static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
+{
+ int ret;
+ struct mmc_data *data = host->data;
+
+ ret = mmci_dma_prep_data(host, host->data, NULL);
+ if (ret)
+ return ret;
+
+ /* Okay, go for it. */
dev_vdbg(mmc_dev(host->mmc),
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
data->sg_len, data->blksz, data->blocks, data->flags);
- dmaengine_submit(desc);
- dma_async_issue_pending(chan);
+ dmaengine_submit(host->dma_desc_current);
+ dma_async_issue_pending(host->dma_current);
datactrl |= MCI_DPSM_DMAENABLE;
@@ -421,14 +457,90 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
host->base + MMCIMASK0);
return 0;
+}
-unmap_exit:
- dmaengine_terminate_all(chan);
- dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
- return -ENOMEM;
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+ struct mmci_host_next *next = &host->next_data;
+
+ if (data->host_cookie && data->host_cookie != next->cookie) {
+ printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
+ " host->next_data.cookie %d\n",
+ __func__, data->host_cookie, host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ if (!data->host_cookie)
+ return;
+
+ host->dma_desc_current = next->dma_desc;
+ host->dma_current = next->dma_chan;
+
+ next->dma_desc = NULL;
+ next->dma_chan = NULL;
}
+
+static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ struct mmci_host_next *nd = &host->next_data;
+
+ if (!data)
+ return;
+
+ if (data->host_cookie) {
+ data->host_cookie = 0;
+ return;
+ }
+
+ /* if config for dma */
+ if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
+ ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
+ if (mmci_dma_prep_data(host, data, nd))
+ data->host_cookie = 0;
+ else
+ data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
+ }
+}
+
+static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+ struct dma_chan *chan;
+ enum dma_data_direction dir;
+
+ if (!data)
+ return;
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ dir = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+
+ /* if config for dma */
+ if (chan) {
+ if (err)
+ dmaengine_terminate_all(chan);
+ if (err || data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, dir);
+ mrq->data->host_cookie = 0;
+ }
+}
+
#else
/* Blank functions if the DMA engine is not available */
+static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
+{
+}
static inline void mmci_dma_setup(struct mmci_host *host)
{
}
@@ -449,6 +561,10 @@ static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datac
{
return -ENOSYS;
}
+
+#define mmci_pre_request NULL
+#define mmci_post_request NULL
+
#endif
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
@@ -872,6 +988,9 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq = mrq;
+ if (mrq->data)
+ mmci_get_next_data(host, mrq->data);
+
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
mmci_start_data(host, mrq->data);
@@ -986,6 +1105,8 @@ static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
static const struct mmc_host_ops mmci_ops = {
.request = mmci_request,
+ .pre_req = mmci_pre_request,
+ .post_req = mmci_post_request,
.set_ios = mmci_set_ios,
.get_ro = mmci_get_ro,
.get_cd = mmci_get_cd,
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 2164e8c6476..79e4143ab9d 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -166,6 +166,12 @@ struct clk;
struct variant_data;
struct dma_chan;
+struct mmci_host_next {
+ struct dma_async_tx_descriptor *dma_desc;
+ struct dma_chan *dma_chan;
+ s32 cookie;
+};
+
struct mmci_host {
phys_addr_t phybase;
void __iomem *base;
@@ -203,6 +209,8 @@ struct mmci_host {
struct dma_chan *dma_current;
struct dma_chan *dma_rx_channel;
struct dma_chan *dma_tx_channel;
+ struct dma_async_tx_descriptor *dma_desc_current;
+ struct mmci_host_next next_data;
#define dma_inprogress(host) ((host)->dma_current)
#else
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 99d39a6a103..d513d47364d 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -564,40 +564,38 @@ static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
{
- unsigned int ssp_rate, bit_rate;
- u32 div1, div2;
+ unsigned int ssp_clk, ssp_sck;
+ u32 clock_divide, clock_rate;
u32 val;
- ssp_rate = clk_get_rate(host->clk);
+ ssp_clk = clk_get_rate(host->clk);
- for (div1 = 2; div1 < 254; div1 += 2) {
- div2 = ssp_rate / rate / div1;
- if (div2 < 0x100)
+ for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) {
+ clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide);
+ clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0;
+ if (clock_rate <= 255)
break;
}
- if (div1 >= 254) {
+ if (clock_divide > 254) {
dev_err(mmc_dev(host->mmc),
"%s: cannot set clock to %d\n", __func__, rate);
return;
}
- if (div2 == 0)
- bit_rate = ssp_rate / div1;
- else
- bit_rate = ssp_rate / div1 / div2;
+ ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
val = readl(host->base + HW_SSP_TIMING);
val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
- val |= BF_SSP(div1, TIMING_CLOCK_DIVIDE);
- val |= BF_SSP(div2 - 1, TIMING_CLOCK_RATE);
+ val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
+ val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
writel(val, host->base + HW_SSP_TIMING);
- host->clk_rate = bit_rate;
+ host->clk_rate = ssp_sck;
dev_dbg(mmc_dev(host->mmc),
- "%s: div1 %d, div2 %d, ssp %d, bit %d, rate %d\n",
- __func__, div1, div2, ssp_rate, bit_rate, rate);
+ "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n",
+ __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate);
}
static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index dedf3dab8a3..21e4a799df4 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
@@ -33,6 +34,7 @@
#include <linux/semaphore.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
#include <plat/dma.h>
#include <mach/hardware.h>
#include <plat/board.h>
@@ -116,15 +118,13 @@
#define OMAP_MMC4_DEVID 3
#define OMAP_MMC5_DEVID 4
+#define MMC_AUTOSUSPEND_DELAY 100
#define MMC_TIMEOUT_MS 20
#define OMAP_MMC_MASTER_CLOCK 96000000
+#define OMAP_MMC_MIN_CLOCK 400000
+#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
-/* Timeouts for entering power saving states on inactivity, msec */
-#define OMAP_MMC_DISABLED_TIMEOUT 100
-#define OMAP_MMC_SLEEP_TIMEOUT 1000
-#define OMAP_MMC_OFF_TIMEOUT 8000
-
/*
* One controller can have multiple slots, like on some omap boards using
* omap.c controller driver. Luckily this is not currently done on any known
@@ -141,6 +141,11 @@
#define OMAP_HSMMC_WRITE(base, reg, val) \
__raw_writel((val), (base) + OMAP_HSMMC_##reg)
+struct omap_hsmmc_next {
+ unsigned int dma_len;
+ s32 cookie;
+};
+
struct omap_hsmmc_host {
struct device *dev;
struct mmc_host *mmc;
@@ -148,7 +153,6 @@ struct omap_hsmmc_host {
struct mmc_command *cmd;
struct mmc_data *data;
struct clk *fclk;
- struct clk *iclk;
struct clk *dbclk;
/*
* vcc == configured supply
@@ -184,6 +188,7 @@ struct omap_hsmmc_host {
int reqs_blocked;
int use_reg;
int req_in_progress;
+ struct omap_hsmmc_next next_data;
struct omap_mmc_platform_data *pdata;
};
@@ -548,6 +553,15 @@ static void omap_hsmmc_gpio_free(struct omap_mmc_platform_data *pdata)
}
/*
+ * Start clock to the card
+ */
+static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host)
+{
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
+}
+
+/*
* Stop clock to the card
*/
static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)
@@ -584,6 +598,81 @@ static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);
}
+/* Calculate divisor for the given clock frequency */
+static u16 calc_divisor(struct mmc_ios *ios)
+{
+ u16 dsor = 0;
+
+ if (ios->clock) {
+ dsor = DIV_ROUND_UP(OMAP_MMC_MASTER_CLOCK, ios->clock);
+ if (dsor > 250)
+ dsor = 250;
+ }
+
+ return dsor;
+}
+
+static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ unsigned long regval;
+ unsigned long timeout;
+
+ dev_dbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock);
+
+ omap_hsmmc_stop_clock(host);
+
+ regval = OMAP_HSMMC_READ(host->base, SYSCTL);
+ regval = regval & ~(CLKD_MASK | DTO_MASK);
+ regval = regval | (calc_divisor(ios) << 6) | (DTO << 16);
+ OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
+ OMAP_HSMMC_WRITE(host->base, SYSCTL,
+ OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
+
+ /* Wait till the ICS bit is set */
+ timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
+ while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
+ && time_before(jiffies, timeout))
+ cpu_relax();
+
+ omap_hsmmc_start_clock(host);
+}
+
+static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ u32 con;
+
+ con = OMAP_HSMMC_READ(host->base, CON);
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
+ break;
+ case MMC_BUS_WIDTH_4:
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
+ break;
+ case MMC_BUS_WIDTH_1:
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
+ break;
+ }
+}
+
+static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host)
+{
+ struct mmc_ios *ios = &host->mmc->ios;
+ u32 con;
+
+ con = OMAP_HSMMC_READ(host->base, CON);
+ if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ OMAP_HSMMC_WRITE(host->base, CON, con | OD);
+ else
+ OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
+}
+
#ifdef CONFIG_PM
/*
@@ -595,8 +684,7 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
struct mmc_ios *ios = &host->mmc->ios;
struct omap_mmc_platform_data *pdata = host->pdata;
int context_loss = 0;
- u32 hctl, capa, con;
- u16 dsor = 0;
+ u32 hctl, capa;
unsigned long timeout;
if (pdata->get_context_loss_count) {
@@ -658,54 +746,12 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
if (host->power_mode == MMC_POWER_OFF)
goto out;
- con = OMAP_HSMMC_READ(host->base, CON);
- switch (ios->bus_width) {
- case MMC_BUS_WIDTH_8:
- OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
- break;
- case MMC_BUS_WIDTH_4:
- OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
- break;
- case MMC_BUS_WIDTH_1:
- OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
- break;
- }
-
- if (ios->clock) {
- dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
- if (dsor < 1)
- dsor = 1;
-
- if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
- dsor++;
-
- if (dsor > 250)
- dsor = 250;
- }
-
- OMAP_HSMMC_WRITE(host->base, SYSCTL,
- OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);
- OMAP_HSMMC_WRITE(host->base, SYSCTL, (dsor << 6) | (DTO << 16));
- OMAP_HSMMC_WRITE(host->base, SYSCTL,
- OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
+ omap_hsmmc_set_bus_width(host);
- timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
- while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
- && time_before(jiffies, timeout))
- ;
+ omap_hsmmc_set_clock(host);
- OMAP_HSMMC_WRITE(host->base, SYSCTL,
- OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
+ omap_hsmmc_set_bus_mode(host);
- con = OMAP_HSMMC_READ(host->base, CON);
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
- OMAP_HSMMC_WRITE(host->base, CON, con | OD);
- else
- OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
out:
host->context_loss = context_loss;
@@ -973,14 +1019,14 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
* Readable error output
*/
#ifdef CONFIG_MMC_DEBUG
-static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status)
+static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)
{
/* --- means reserved bit without definition at documentation */
static const char *omap_hsmmc_status_bits[] = {
- "CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ",
- "OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC",
- "CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---",
- "---", "---", "---", "CERR", "CERR", "BADA", "---", "---", "---"
+ "CC" , "TC" , "BGE", "---", "BWR" , "BRR" , "---" , "---" ,
+ "CIRQ", "OBI" , "---", "---", "---" , "---" , "---" , "ERRI",
+ "CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" ,
+ "ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"
};
char res[256];
char *buf = res;
@@ -997,6 +1043,11 @@ static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status)
dev_dbg(mmc_dev(host->mmc), "%s\n", res);
}
+#else
+static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host,
+ u32 status)
+{
+}
#endif /* CONFIG_MMC_DEBUG */
/*
@@ -1055,9 +1106,7 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status);
if (status & ERR) {
-#ifdef CONFIG_MMC_DEBUG
- omap_hsmmc_report_irq(host, status);
-#endif
+ omap_hsmmc_dbg_report_irq(host, status);
if ((status & CMD_TIMEOUT) ||
(status & CMD_CRC)) {
if (host->cmd) {
@@ -1155,8 +1204,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
int ret;
/* Disable the clocks */
- clk_disable(host->fclk);
- clk_disable(host->iclk);
+ pm_runtime_put_sync(host->dev);
if (host->got_dbclk)
clk_disable(host->dbclk);
@@ -1167,8 +1215,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
if (!ret)
ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
vdd);
- clk_enable(host->iclk);
- clk_enable(host->fclk);
+ pm_runtime_get_sync(host->dev);
if (host->got_dbclk)
clk_enable(host->dbclk);
@@ -1322,7 +1369,7 @@ static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host,
static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
{
struct omap_hsmmc_host *host = cb_data;
- struct mmc_data *data = host->mrq->data;
+ struct mmc_data *data;
int dma_ch, req_in_progress;
if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
@@ -1337,6 +1384,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
return;
}
+ data = host->mrq->data;
host->dma_sg_idx++;
if (host->dma_sg_idx < host->dma_len) {
/* Fire up the next transfer. */
@@ -1346,8 +1394,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
return;
}
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (!data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
req_in_progress = host->req_in_progress;
dma_ch = host->dma_ch;
@@ -1365,6 +1414,45 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
}
}
+static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host,
+ struct mmc_data *data,
+ struct omap_hsmmc_next *next)
+{
+ int dma_len;
+
+ if (!next && data->host_cookie &&
+ data->host_cookie != host->next_data.cookie) {
+ printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
+ " host->next_data.cookie %d\n",
+ __func__, data->host_cookie, host->next_data.cookie);
+ data->host_cookie = 0;
+ }
+
+ /* Check if next job is already prepared */
+ if (next ||
+ (!next && data->host_cookie != host->next_data.cookie)) {
+ dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
+
+ } else {
+ dma_len = host->next_data.dma_len;
+ host->next_data.dma_len = 0;
+ }
+
+
+ if (dma_len == 0)
+ return -EINVAL;
+
+ if (next) {
+ next->dma_len = dma_len;
+ data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+ } else
+ host->dma_len = dma_len;
+
+ return 0;
+}
+
/*
* Routine to configure and start DMA for the MMC card
*/
@@ -1398,9 +1486,10 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,
mmc_hostname(host->mmc), ret);
return ret;
}
+ ret = omap_hsmmc_pre_dma_transfer(host, data, NULL);
+ if (ret)
+ return ret;
- host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, omap_hsmmc_get_dma_dir(host, data));
host->dma_ch = dma_ch;
host->dma_sg_idx = 0;
@@ -1480,6 +1569,35 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)
return 0;
}
+static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (host->use_dma) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
+ data->host_cookie = 0;
+ }
+}
+
+static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct omap_hsmmc_host *host = mmc_priv(mmc);
+
+ if (mrq->data->host_cookie) {
+ mrq->data->host_cookie = 0;
+ return ;
+ }
+
+ if (host->use_dma)
+ if (omap_hsmmc_pre_dma_transfer(host, mrq->data,
+ &host->next_data))
+ mrq->data->host_cookie = 0;
+}
+
/*
* Request function. for read/write operation
*/
@@ -1528,13 +1646,9 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)
static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- u16 dsor = 0;
- unsigned long regval;
- unsigned long timeout;
- u32 con;
int do_send_init_stream = 0;
- mmc_host_enable(host->mmc);
+ pm_runtime_get_sync(host->dev);
if (ios->power_mode != host->power_mode) {
switch (ios->power_mode) {
@@ -1557,22 +1671,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
/* FIXME: set registers based only on changes to ios */
- con = OMAP_HSMMC_READ(host->base, CON);
- switch (mmc->ios.bus_width) {
- case MMC_BUS_WIDTH_8:
- OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
- break;
- case MMC_BUS_WIDTH_4:
- OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT);
- break;
- case MMC_BUS_WIDTH_1:
- OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT);
- break;
- }
+ omap_hsmmc_set_bus_width(host);
if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {
/* Only MMC1 can interface at 3V without some flavor
@@ -1592,47 +1691,14 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
- if (ios->clock) {
- dsor = OMAP_MMC_MASTER_CLOCK / ios->clock;
- if (dsor < 1)
- dsor = 1;
-
- if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock)
- dsor++;
-
- if (dsor > 250)
- dsor = 250;
- }
- omap_hsmmc_stop_clock(host);
- regval = OMAP_HSMMC_READ(host->base, SYSCTL);
- regval = regval & ~(CLKD_MASK);
- regval = regval | (dsor << 6) | (DTO << 16);
- OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
- OMAP_HSMMC_WRITE(host->base, SYSCTL,
- OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
-
- /* Wait till the ICS bit is set */
- timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS);
- while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS
- && time_before(jiffies, timeout))
- msleep(1);
-
- OMAP_HSMMC_WRITE(host->base, SYSCTL,
- OMAP_HSMMC_READ(host->base, SYSCTL) | CEN);
+ omap_hsmmc_set_clock(host);
if (do_send_init_stream)
send_init_stream(host);
- con = OMAP_HSMMC_READ(host->base, CON);
- if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN)
- OMAP_HSMMC_WRITE(host->base, CON, con | OD);
- else
- OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);
+ omap_hsmmc_set_bus_mode(host);
- if (host->power_mode == MMC_POWER_OFF)
- mmc_host_disable(host->mmc);
- else
- mmc_host_lazy_disable(host->mmc);
+ pm_runtime_put_autosuspend(host->dev);
}
static int omap_hsmmc_get_cd(struct mmc_host *mmc)
@@ -1688,230 +1754,12 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)
set_sd_bus_power(host);
}
-/*
- * Dynamic power saving handling, FSM:
- * ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF
- * ^___________| | |
- * |______________________|______________________|
- *
- * ENABLED: mmc host is fully functional
- * DISABLED: fclk is off
- * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep
- * REGSLEEP: fclk is off, voltage regulator is asleep
- * OFF: fclk is off, voltage regulator is off
- *
- * Transition handlers return the timeout for the next state transition
- * or negative error.
- */
-
-enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF};
-
-/* Handler for [ENABLED -> DISABLED] transition */
-static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
-{
- omap_hsmmc_context_save(host);
- clk_disable(host->fclk);
- host->dpm_state = DISABLED;
-
- dev_dbg(mmc_dev(host->mmc), "ENABLED -> DISABLED\n");
-
- if (host->power_mode == MMC_POWER_OFF)
- return 0;
-
- return OMAP_MMC_SLEEP_TIMEOUT;
-}
-
-/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
-static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
-{
- int err, new_state;
-
- if (!mmc_try_claim_host(host->mmc))
- return 0;
-
- clk_enable(host->fclk);
- omap_hsmmc_context_restore(host);
- if (mmc_card_can_sleep(host->mmc)) {
- err = mmc_card_sleep(host->mmc);
- if (err < 0) {
- clk_disable(host->fclk);
- mmc_release_host(host->mmc);
- return err;
- }
- new_state = CARDSLEEP;
- } else {
- new_state = REGSLEEP;
- }
- if (mmc_slot(host).set_sleep)
- mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0,
- new_state == CARDSLEEP);
- /* FIXME: turn off bus power and perhaps interrupts too */
- clk_disable(host->fclk);
- host->dpm_state = new_state;
-
- mmc_release_host(host->mmc);
-
- dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n",
- host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
-
- if (mmc_slot(host).no_off)
- return 0;
-
- if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
- mmc_slot(host).card_detect ||
- (mmc_slot(host).get_cover_state &&
- mmc_slot(host).get_cover_state(host->dev, host->slot_id)))
- return OMAP_MMC_OFF_TIMEOUT;
-
- return 0;
-}
-
-/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */
-static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
-{
- if (!mmc_try_claim_host(host->mmc))
- return 0;
-
- if (mmc_slot(host).no_off)
- return 0;
-
- if (!((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
- mmc_slot(host).card_detect ||
- (mmc_slot(host).get_cover_state &&
- mmc_slot(host).get_cover_state(host->dev, host->slot_id)))) {
- mmc_release_host(host->mmc);
- return 0;
- }
-
- mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);
- host->vdd = 0;
- host->power_mode = MMC_POWER_OFF;
-
- dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n",
- host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
-
- host->dpm_state = OFF;
-
- mmc_release_host(host->mmc);
-
- return 0;
-}
-
-/* Handler for [DISABLED -> ENABLED] transition */
-static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host)
-{
- int err;
-
- err = clk_enable(host->fclk);
- if (err < 0)
- return err;
-
- omap_hsmmc_context_restore(host);
- host->dpm_state = ENABLED;
-
- dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n");
-
- return 0;
-}
-
-/* Handler for [SLEEP -> ENABLED] transition */
-static int omap_hsmmc_sleep_to_enabled(struct omap_hsmmc_host *host)
-{
- if (!mmc_try_claim_host(host->mmc))
- return 0;
-
- clk_enable(host->fclk);
- omap_hsmmc_context_restore(host);
- if (mmc_slot(host).set_sleep)
- mmc_slot(host).set_sleep(host->dev, host->slot_id, 0,
- host->vdd, host->dpm_state == CARDSLEEP);
- if (mmc_card_can_sleep(host->mmc))
- mmc_card_awake(host->mmc);
-
- dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n",
- host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
-
- host->dpm_state = ENABLED;
-
- mmc_release_host(host->mmc);
-
- return 0;
-}
-
-/* Handler for [OFF -> ENABLED] transition */
-static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host)
-{
- clk_enable(host->fclk);
-
- omap_hsmmc_context_restore(host);
- omap_hsmmc_conf_bus_power(host);
- mmc_power_restore_host(host->mmc);
-
- host->dpm_state = ENABLED;
-
- dev_dbg(mmc_dev(host->mmc), "OFF -> ENABLED\n");
-
- return 0;
-}
-
-/*
- * Bring MMC host to ENABLED from any other PM state.
- */
-static int omap_hsmmc_enable(struct mmc_host *mmc)
-{
- struct omap_hsmmc_host *host = mmc_priv(mmc);
-
- switch (host->dpm_state) {
- case DISABLED:
- return omap_hsmmc_disabled_to_enabled(host);
- case CARDSLEEP:
- case REGSLEEP:
- return omap_hsmmc_sleep_to_enabled(host);
- case OFF:
- return omap_hsmmc_off_to_enabled(host);
- default:
- dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
- return -EINVAL;
- }
-}
-
-/*
- * Bring MMC host in PM state (one level deeper).
- */
-static int omap_hsmmc_disable(struct mmc_host *mmc, int lazy)
-{
- struct omap_hsmmc_host *host = mmc_priv(mmc);
-
- switch (host->dpm_state) {
- case ENABLED: {
- int delay;
-
- delay = omap_hsmmc_enabled_to_disabled(host);
- if (lazy || delay < 0)
- return delay;
- return 0;
- }
- case DISABLED:
- return omap_hsmmc_disabled_to_sleep(host);
- case CARDSLEEP:
- case REGSLEEP:
- return omap_hsmmc_sleep_to_off(host);
- default:
- dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n");
- return -EINVAL;
- }
-}
-
static int omap_hsmmc_enable_fclk(struct mmc_host *mmc)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- int err;
- err = clk_enable(host->fclk);
- if (err)
- return err;
- dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n");
- omap_hsmmc_context_restore(host);
+ pm_runtime_get_sync(host->dev);
+
return 0;
}
@@ -1919,26 +1767,17 @@ static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy)
{
struct omap_hsmmc_host *host = mmc_priv(mmc);
- omap_hsmmc_context_save(host);
- clk_disable(host->fclk);
- dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n");
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
+
return 0;
}
static const struct mmc_host_ops omap_hsmmc_ops = {
.enable = omap_hsmmc_enable_fclk,
.disable = omap_hsmmc_disable_fclk,
- .request = omap_hsmmc_request,
- .set_ios = omap_hsmmc_set_ios,
- .get_cd = omap_hsmmc_get_cd,
- .get_ro = omap_hsmmc_get_ro,
- .init_card = omap_hsmmc_init_card,
- /* NYET -- enable_sdio_irq */
-};
-
-static const struct mmc_host_ops omap_hsmmc_ps_ops = {
- .enable = omap_hsmmc_enable,
- .disable = omap_hsmmc_disable,
+ .post_req = omap_hsmmc_post_req,
+ .pre_req = omap_hsmmc_pre_req,
.request = omap_hsmmc_request,
.set_ios = omap_hsmmc_set_ios,
.get_cd = omap_hsmmc_get_cd,
@@ -1968,15 +1807,12 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
host->dpm_state, mmc->nesting_cnt,
host->context_loss, context_loss);
- if (host->suspended || host->dpm_state == OFF) {
+ if (host->suspended) {
seq_printf(s, "host suspended, can't read registers\n");
return 0;
}
- if (clk_enable(host->fclk) != 0) {
- seq_printf(s, "can't read the regs\n");
- return 0;
- }
+ pm_runtime_get_sync(host->dev);
seq_printf(s, "SYSCONFIG:\t0x%08x\n",
OMAP_HSMMC_READ(host->base, SYSCONFIG));
@@ -1993,7 +1829,8 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)
seq_printf(s, "CAPA:\t\t0x%08x\n",
OMAP_HSMMC_READ(host->base, CAPA));
- clk_disable(host->fclk);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
return 0;
}
@@ -2077,14 +1914,12 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
host->mapbase = res->start;
host->base = ioremap(host->mapbase, SZ_4K);
host->power_mode = MMC_POWER_OFF;
+ host->next_data.cookie = 1;
platform_set_drvdata(pdev, host);
INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
- if (mmc_slot(host).power_saving)
- mmc->ops = &omap_hsmmc_ps_ops;
- else
- mmc->ops = &omap_hsmmc_ops;
+ mmc->ops = &omap_hsmmc_ops;
/*
* If regulator_disable can only put vcc_aux to sleep then there is
@@ -2093,44 +1928,26 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
if (mmc_slot(host).vcc_aux_disable_is_sleep)
mmc_slot(host).no_off = 1;
- mmc->f_min = 400000;
- mmc->f_max = 52000000;
+ mmc->f_min = OMAP_MMC_MIN_CLOCK;
+ mmc->f_max = OMAP_MMC_MAX_CLOCK;
spin_lock_init(&host->irq_lock);
- host->iclk = clk_get(&pdev->dev, "ick");
- if (IS_ERR(host->iclk)) {
- ret = PTR_ERR(host->iclk);
- host->iclk = NULL;
- goto err1;
- }
host->fclk = clk_get(&pdev->dev, "fck");
if (IS_ERR(host->fclk)) {
ret = PTR_ERR(host->fclk);
host->fclk = NULL;
- clk_put(host->iclk);
goto err1;
}
omap_hsmmc_context_save(host);
mmc->caps |= MMC_CAP_DISABLE;
- mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT);
- /* we start off in DISABLED state */
- host->dpm_state = DISABLED;
- if (clk_enable(host->iclk) != 0) {
- clk_put(host->iclk);
- clk_put(host->fclk);
- goto err1;
- }
-
- if (mmc_host_enable(host->mmc) != 0) {
- clk_disable(host->iclk);
- clk_put(host->iclk);
- clk_put(host->fclk);
- goto err1;
- }
+ pm_runtime_enable(host->dev);
+ pm_runtime_get_sync(host->dev);
+ pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(host->dev);
if (cpu_is_omap2430()) {
host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
@@ -2240,8 +2057,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
omap_hsmmc_disable_irq(host);
- mmc_host_lazy_disable(host->mmc);
-
omap_hsmmc_protect_card(host);
mmc_add_host(mmc);
@@ -2259,6 +2074,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
}
omap_hsmmc_debugfs(mmc);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
return 0;
@@ -2274,10 +2091,9 @@ err_reg:
err_irq_cd_init:
free_irq(host->irq, host);
err_irq:
- mmc_host_disable(host->mmc);
- clk_disable(host->iclk);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
clk_put(host->fclk);
- clk_put(host->iclk);
if (host->got_dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
@@ -2299,7 +2115,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
struct resource *res;
if (host) {
- mmc_host_enable(host->mmc);
+ pm_runtime_get_sync(host->dev);
mmc_remove_host(host->mmc);
if (host->use_reg)
omap_hsmmc_reg_put(host);
@@ -2310,10 +2126,9 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
free_irq(mmc_slot(host).card_detect_irq, host);
flush_work_sync(&host->mmc_carddetect_work);
- mmc_host_disable(host->mmc);
- clk_disable(host->iclk);
+ pm_runtime_put_sync(host->dev);
+ pm_runtime_disable(host->dev);
clk_put(host->fclk);
- clk_put(host->iclk);
if (host->got_dbclk) {
clk_disable(host->dbclk);
clk_put(host->dbclk);
@@ -2343,6 +2158,7 @@ static int omap_hsmmc_suspend(struct device *dev)
return 0;
if (host) {
+ pm_runtime_get_sync(host->dev);
host->suspended = 1;
if (host->pdata->suspend) {
ret = host->pdata->suspend(&pdev->dev,
@@ -2357,13 +2173,11 @@ static int omap_hsmmc_suspend(struct device *dev)
}
cancel_work_sync(&host->mmc_carddetect_work);
ret = mmc_suspend_host(host->mmc);
- mmc_host_enable(host->mmc);
+
if (ret == 0) {
omap_hsmmc_disable_irq(host);
OMAP_HSMMC_WRITE(host->base, HCTL,
OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
- mmc_host_disable(host->mmc);
- clk_disable(host->iclk);
if (host->got_dbclk)
clk_disable(host->dbclk);
} else {
@@ -2375,9 +2189,8 @@ static int omap_hsmmc_suspend(struct device *dev)
dev_dbg(mmc_dev(host->mmc),
"Unmask interrupt failed\n");
}
- mmc_host_disable(host->mmc);
}
-
+ pm_runtime_put_sync(host->dev);
}
return ret;
}
@@ -2393,14 +2206,7 @@ static int omap_hsmmc_resume(struct device *dev)
return 0;
if (host) {
- ret = clk_enable(host->iclk);
- if (ret)
- goto clk_en_err;
-
- if (mmc_host_enable(host->mmc) != 0) {
- clk_disable(host->iclk);
- goto clk_en_err;
- }
+ pm_runtime_get_sync(host->dev);
if (host->got_dbclk)
clk_enable(host->dbclk);
@@ -2421,15 +2227,12 @@ static int omap_hsmmc_resume(struct device *dev)
if (ret == 0)
host->suspended = 0;
- mmc_host_lazy_disable(host->mmc);
+ pm_runtime_mark_last_busy(host->dev);
+ pm_runtime_put_autosuspend(host->dev);
}
return ret;
-clk_en_err:
- dev_dbg(mmc_dev(host->mmc),
- "Failed to enable MMC clocks during resume\n");
- return ret;
}
#else
@@ -2437,9 +2240,33 @@ clk_en_err:
#define omap_hsmmc_resume NULL
#endif
+static int omap_hsmmc_runtime_suspend(struct device *dev)
+{
+ struct omap_hsmmc_host *host;
+
+ host = platform_get_drvdata(to_platform_device(dev));
+ omap_hsmmc_context_save(host);
+ dev_dbg(mmc_dev(host->mmc), "disabled\n");
+
+ return 0;
+}
+
+static int omap_hsmmc_runtime_resume(struct device *dev)
+{
+ struct omap_hsmmc_host *host;
+
+ host = platform_get_drvdata(to_platform_device(dev));
+ omap_hsmmc_context_restore(host);
+ dev_dbg(mmc_dev(host->mmc), "enabled\n");
+
+ return 0;
+}
+
static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
.suspend = omap_hsmmc_suspend,
.resume = omap_hsmmc_resume,
+ .runtime_suspend = omap_hsmmc_runtime_suspend,
+ .runtime_resume = omap_hsmmc_runtime_resume,
};
static struct platform_driver omap_hsmmc_driver = {
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index 9ebd1d7759d..4b920b7621c 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -15,9 +15,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/sdhci-pltfm.h>
#include <mach/cns3xxx.h>
-#include "sdhci.h"
#include "sdhci-pltfm.h"
static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host)
@@ -86,7 +84,7 @@ static struct sdhci_ops sdhci_cns3xxx_ops = {
.set_clock = sdhci_cns3xxx_set_clock,
};
-struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
+static struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
.ops = &sdhci_cns3xxx_ops,
.quirks = SDHCI_QUIRK_BROKEN_DMA |
SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
@@ -95,3 +93,43 @@ struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_NONSTANDARD_CLOCK,
};
+
+static int __devinit sdhci_cns3xxx_probe(struct platform_device *pdev)
+{
+ return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata);
+}
+
+static int __devexit sdhci_cns3xxx_remove(struct platform_device *pdev)
+{
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static struct platform_driver sdhci_cns3xxx_driver = {
+ .driver = {
+ .name = "sdhci-cns3xxx",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_cns3xxx_probe,
+ .remove = __devexit_p(sdhci_cns3xxx_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
+};
+
+static int __init sdhci_cns3xxx_init(void)
+{
+ return platform_driver_register(&sdhci_cns3xxx_driver);
+}
+module_init(sdhci_cns3xxx_init);
+
+static void __exit sdhci_cns3xxx_exit(void)
+{
+ platform_driver_unregister(&sdhci_cns3xxx_driver);
+}
+module_exit(sdhci_cns3xxx_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for CNS3xxx");
+MODULE_AUTHOR("Scott Shu, "
+ "Anton Vorontsov <avorontsov@mvista.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index 2aeef4ffed8..f2d29dca442 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -22,7 +22,6 @@
#include <linux/io.h>
#include <linux/mmc/host.h>
-#include "sdhci.h"
#include "sdhci-pltfm.h"
static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
@@ -61,10 +60,50 @@ static struct sdhci_ops sdhci_dove_ops = {
.read_l = sdhci_dove_readl,
};
-struct sdhci_pltfm_data sdhci_dove_pdata = {
+static struct sdhci_pltfm_data sdhci_dove_pdata = {
.ops = &sdhci_dove_ops,
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_FORCE_DMA,
};
+
+static int __devinit sdhci_dove_probe(struct platform_device *pdev)
+{
+ return sdhci_pltfm_register(pdev, &sdhci_dove_pdata);
+}
+
+static int __devexit sdhci_dove_remove(struct platform_device *pdev)
+{
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static struct platform_driver sdhci_dove_driver = {
+ .driver = {
+ .name = "sdhci-dove",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_dove_probe,
+ .remove = __devexit_p(sdhci_dove_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
+};
+
+static int __init sdhci_dove_init(void)
+{
+ return platform_driver_register(&sdhci_dove_driver);
+}
+module_init(sdhci_dove_init);
+
+static void __exit sdhci_dove_exit(void)
+{
+ platform_driver_unregister(&sdhci_dove_driver);
+}
+module_exit(sdhci_dove_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for Dove");
+MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, "
+ "Mike Rapoport <mike@compulab.co.il>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index a19967d0bfc..710b706f4fc 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -18,12 +18,10 @@
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
-#include <linux/mmc/sdhci-pltfm.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <mach/hardware.h>
#include <mach/esdhc.h>
-#include "sdhci.h"
#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
@@ -31,7 +29,7 @@
#define SDHCI_VENDOR_SPEC 0xC0
#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002
-#define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0)
+#define ESDHC_FLAG_GPIO_FOR_CD (1 << 0)
/*
* The CMDTYPE of the CMD register (offset 0xE) should be set to
* "11" when the STOP CMD12 is issued on imx53 to abort one
@@ -67,14 +65,14 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
u32 val = readl(host->ioaddr + reg);
if (unlikely((reg == SDHCI_PRESENT_STATE)
- && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) {
+ && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD))) {
struct esdhc_platform_data *boarddata =
host->mmc->parent->platform_data;
if (boarddata && gpio_is_valid(boarddata->cd_gpio)
&& gpio_get_value(boarddata->cd_gpio))
/* no card, if a valid gpio says so... */
- val &= SDHCI_CARD_PRESENT;
+ val &= ~SDHCI_CARD_PRESENT;
else
/* ... in all other cases assume card is present */
val |= SDHCI_CARD_PRESENT;
@@ -89,7 +87,7 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg)
struct pltfm_imx_data *imx_data = pltfm_host->priv;
if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)
- && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP)))
+ && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD)))
/*
* these interrupts won't work with a custom card_detect gpio
* (only applied to mx25/35)
@@ -191,16 +189,6 @@ static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
return clk_get_rate(pltfm_host->clk) / 256 / 16;
}
-static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
-{
- struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
-
- if (boarddata && gpio_is_valid(boarddata->wp_gpio))
- return gpio_get_value(boarddata->wp_gpio);
- else
- return -ENOSYS;
-}
-
static struct sdhci_ops sdhci_esdhc_ops = {
.read_l = esdhc_readl_le,
.read_w = esdhc_readw_le,
@@ -212,6 +200,24 @@ static struct sdhci_ops sdhci_esdhc_ops = {
.get_min_clock = esdhc_pltfm_get_min_clock,
};
+static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
+ | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
+ /* ADMA has issues. Might be fixable */
+ .ops = &sdhci_esdhc_ops,
+};
+
+static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host)
+{
+ struct esdhc_platform_data *boarddata =
+ host->mmc->parent->platform_data;
+
+ if (boarddata && gpio_is_valid(boarddata->wp_gpio))
+ return gpio_get_value(boarddata->wp_gpio);
+ else
+ return -ENOSYS;
+}
+
static irqreturn_t cd_irq(int irq, void *data)
{
struct sdhci_host *sdhost = (struct sdhci_host *)data;
@@ -220,30 +226,35 @@ static irqreturn_t cd_irq(int irq, void *data)
return IRQ_HANDLED;
};
-static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata)
+static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_host *host;
+ struct esdhc_platform_data *boarddata;
struct clk *clk;
int err;
struct pltfm_imx_data *imx_data;
+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+
+ imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
+ if (!imx_data)
+ return -ENOMEM;
+ pltfm_host->priv = imx_data;
+
clk = clk_get(mmc_dev(host->mmc), NULL);
if (IS_ERR(clk)) {
dev_err(mmc_dev(host->mmc), "clk err\n");
- return PTR_ERR(clk);
+ err = PTR_ERR(clk);
+ goto err_clk_get;
}
clk_enable(clk);
pltfm_host->clk = clk;
- imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL);
- if (!imx_data) {
- clk_disable(pltfm_host->clk);
- clk_put(pltfm_host->clk);
- return -ENOMEM;
- }
- pltfm_host->priv = imx_data;
-
if (!cpu_is_mx25())
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
@@ -257,6 +268,7 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51()))
imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT;
+ boarddata = host->mmc->parent->platform_data;
if (boarddata) {
err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP");
if (err) {
@@ -284,11 +296,15 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
goto no_card_detect_irq;
}
- imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP;
+ imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD;
/* Now we have a working card_detect again */
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
+ err = sdhci_add_host(host);
+ if (err)
+ goto err_add_host;
+
return 0;
no_card_detect_irq:
@@ -297,14 +313,23 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
boarddata->cd_gpio = err;
not_supported:
kfree(imx_data);
- return 0;
+ err_add_host:
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+ err_clk_get:
+ sdhci_pltfm_free(pdev);
+ return err;
}
-static void esdhc_pltfm_exit(struct sdhci_host *host)
+static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data;
struct pltfm_imx_data *imx_data = pltfm_host->priv;
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ sdhci_remove_host(host, dead);
if (boarddata && gpio_is_valid(boarddata->wp_gpio))
gpio_free(boarddata->wp_gpio);
@@ -319,13 +344,37 @@ static void esdhc_pltfm_exit(struct sdhci_host *host)
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
kfree(imx_data);
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
}
-struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
- .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA
- | SDHCI_QUIRK_BROKEN_CARD_DETECTION,
- /* ADMA has issues. Might be fixable */
- .ops = &sdhci_esdhc_ops,
- .init = esdhc_pltfm_init,
- .exit = esdhc_pltfm_exit,
+static struct platform_driver sdhci_esdhc_imx_driver = {
+ .driver = {
+ .name = "sdhci-esdhc-imx",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_esdhc_imx_probe,
+ .remove = __devexit_p(sdhci_esdhc_imx_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
};
+
+static int __init sdhci_esdhc_imx_init(void)
+{
+ return platform_driver_register(&sdhci_esdhc_imx_driver);
+}
+module_init(sdhci_esdhc_imx_init);
+
+static void __exit sdhci_esdhc_imx_exit(void)
+{
+ platform_driver_unregister(&sdhci_esdhc_imx_driver);
+}
+module_exit(sdhci_esdhc_imx_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC");
+MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c
deleted file mode 100644
index 60e4186a434..00000000000
--- a/drivers/mmc/host/sdhci-of-core.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * OpenFirmware bindings for Secure Digital Host Controller Interface.
- *
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
- * Copyright (c) 2009 MontaVista Software, Inc.
- *
- * Authors: Xiaobo Xie <X.Xie@freescale.com>
- * Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/mmc/host.h>
-#ifdef CONFIG_PPC
-#include <asm/machdep.h>
-#endif
-#include "sdhci-of.h"
-#include "sdhci.h"
-
-#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
-
-/*
- * These accessors are designed for big endian hosts doing I/O to
- * little endian controllers incorporating a 32-bit hardware byte swapper.
- */
-
-u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
-{
- return in_be32(host->ioaddr + reg);
-}
-
-u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
-{
- return in_be16(host->ioaddr + (reg ^ 0x2));
-}
-
-u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
-{
- return in_8(host->ioaddr + (reg ^ 0x3));
-}
-
-void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg)
-{
- out_be32(host->ioaddr + reg, val);
-}
-
-void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg)
-{
- struct sdhci_of_host *of_host = sdhci_priv(host);
- int base = reg & ~0x3;
- int shift = (reg & 0x2) * 8;
-
- switch (reg) {
- case SDHCI_TRANSFER_MODE:
- /*
- * Postpone this write, we must do it together with a
- * command write that is down below.
- */
- of_host->xfer_mode_shadow = val;
- return;
- case SDHCI_COMMAND:
- sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow,
- SDHCI_TRANSFER_MODE);
- return;
- }
- clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
-}
-
-void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
-{
- int base = reg & ~0x3;
- int shift = (reg & 0x3) * 8;
-
- clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
-}
-#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
-
-#ifdef CONFIG_PM
-
-static int sdhci_of_suspend(struct platform_device *ofdev, pm_message_t state)
-{
- struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
-
- return mmc_suspend_host(host->mmc);
-}
-
-static int sdhci_of_resume(struct platform_device *ofdev)
-{
- struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
-
- return mmc_resume_host(host->mmc);
-}
-
-#else
-
-#define sdhci_of_suspend NULL
-#define sdhci_of_resume NULL
-
-#endif
-
-static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
-{
- if (of_get_property(np, "sdhci,wp-inverted", NULL))
- return true;
-
- /* Old device trees don't have the wp-inverted property. */
-#ifdef CONFIG_PPC
- return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
-#else
- return false;
-#endif
-}
-
-static const struct of_device_id sdhci_of_match[];
-static int __devinit sdhci_of_probe(struct platform_device *ofdev)
-{
- const struct of_device_id *match;
- struct device_node *np = ofdev->dev.of_node;
- struct sdhci_of_data *sdhci_of_data;
- struct sdhci_host *host;
- struct sdhci_of_host *of_host;
- const __be32 *clk;
- int size;
- int ret;
-
- match = of_match_device(sdhci_of_match, &ofdev->dev);
- if (!match)
- return -EINVAL;
- sdhci_of_data = match->data;
-
- if (!of_device_is_available(np))
- return -ENODEV;
-
- host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host));
- if (IS_ERR(host))
- return -ENOMEM;
-
- of_host = sdhci_priv(host);
- dev_set_drvdata(&ofdev->dev, host);
-
- host->ioaddr = of_iomap(np, 0);
- if (!host->ioaddr) {
- ret = -ENOMEM;
- goto err_addr_map;
- }
-
- host->irq = irq_of_parse_and_map(np, 0);
- if (!host->irq) {
- ret = -EINVAL;
- goto err_no_irq;
- }
-
- host->hw_name = dev_name(&ofdev->dev);
- if (sdhci_of_data) {
- host->quirks = sdhci_of_data->quirks;
- host->ops = &sdhci_of_data->ops;
- }
-
- if (of_get_property(np, "sdhci,auto-cmd12", NULL))
- host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
-
-
- if (of_get_property(np, "sdhci,1-bit-only", NULL))
- host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
-
- if (sdhci_of_wp_inverted(np))
- host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
-
- clk = of_get_property(np, "clock-frequency", &size);
- if (clk && size == sizeof(*clk) && *clk)
- of_host->clock = be32_to_cpup(clk);
-
- ret = sdhci_add_host(host);
- if (ret)
- goto err_add_host;
-
- return 0;
-
-err_add_host:
- irq_dispose_mapping(host->irq);
-err_no_irq:
- iounmap(host->ioaddr);
-err_addr_map:
- sdhci_free_host(host);
- return ret;
-}
-
-static int __devexit sdhci_of_remove(struct platform_device *ofdev)
-{
- struct sdhci_host *host = dev_get_drvdata(&ofdev->dev);
-
- sdhci_remove_host(host, 0);
- sdhci_free_host(host);
- irq_dispose_mapping(host->irq);
- iounmap(host->ioaddr);
- return 0;
-}
-
-static const struct of_device_id sdhci_of_match[] = {
-#ifdef CONFIG_MMC_SDHCI_OF_ESDHC
- { .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, },
- { .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, },
- { .compatible = "fsl,esdhc", .data = &sdhci_esdhc, },
-#endif
-#ifdef CONFIG_MMC_SDHCI_OF_HLWD
- { .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, },
-#endif
- { .compatible = "generic-sdhci", },
- {},
-};
-MODULE_DEVICE_TABLE(of, sdhci_of_match);
-
-static struct platform_driver sdhci_of_driver = {
- .driver = {
- .name = "sdhci-of",
- .owner = THIS_MODULE,
- .of_match_table = sdhci_of_match,
- },
- .probe = sdhci_of_probe,
- .remove = __devexit_p(sdhci_of_remove),
- .suspend = sdhci_of_suspend,
- .resume = sdhci_of_resume,
-};
-
-static int __init sdhci_of_init(void)
-{
- return platform_driver_register(&sdhci_of_driver);
-}
-module_init(sdhci_of_init);
-
-static void __exit sdhci_of_exit(void)
-{
- platform_driver_unregister(&sdhci_of_driver);
-}
-module_exit(sdhci_of_exit);
-
-MODULE_DESCRIPTION("Secure Digital Host Controller Interface OF driver");
-MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
- "Anton Vorontsov <avorontsov@ru.mvista.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index ba40d6d035c..fe604df6501 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -16,8 +16,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/mmc/host.h>
-#include "sdhci-of.h"
-#include "sdhci.h"
+#include "sdhci-pltfm.h"
#include "sdhci-esdhc.h"
static u16 esdhc_readw(struct sdhci_host *host, int reg)
@@ -60,32 +59,83 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)
static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)
{
- struct sdhci_of_host *of_host = sdhci_priv(host);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- return of_host->clock;
+ return pltfm_host->clock;
}
static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
{
- struct sdhci_of_host *of_host = sdhci_priv(host);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- return of_host->clock / 256 / 16;
+ return pltfm_host->clock / 256 / 16;
}
-struct sdhci_of_data sdhci_esdhc = {
+static struct sdhci_ops sdhci_esdhc_ops = {
+ .read_l = sdhci_be32bs_readl,
+ .read_w = esdhc_readw,
+ .read_b = sdhci_be32bs_readb,
+ .write_l = sdhci_be32bs_writel,
+ .write_w = esdhc_writew,
+ .write_b = esdhc_writeb,
+ .set_clock = esdhc_set_clock,
+ .enable_dma = esdhc_of_enable_dma,
+ .get_max_clock = esdhc_of_get_max_clock,
+ .get_min_clock = esdhc_of_get_min_clock,
+};
+
+static struct sdhci_pltfm_data sdhci_esdhc_pdata = {
/* card detection could be handled via GPIO */
.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
| SDHCI_QUIRK_NO_CARD_NO_RESET,
- .ops = {
- .read_l = sdhci_be32bs_readl,
- .read_w = esdhc_readw,
- .read_b = sdhci_be32bs_readb,
- .write_l = sdhci_be32bs_writel,
- .write_w = esdhc_writew,
- .write_b = esdhc_writeb,
- .set_clock = esdhc_set_clock,
- .enable_dma = esdhc_of_enable_dma,
- .get_max_clock = esdhc_of_get_max_clock,
- .get_min_clock = esdhc_of_get_min_clock,
+ .ops = &sdhci_esdhc_ops,
+};
+
+static int __devinit sdhci_esdhc_probe(struct platform_device *pdev)
+{
+ return sdhci_pltfm_register(pdev, &sdhci_esdhc_pdata);
+}
+
+static int __devexit sdhci_esdhc_remove(struct platform_device *pdev)
+{
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static const struct of_device_id sdhci_esdhc_of_match[] = {
+ { .compatible = "fsl,mpc8379-esdhc" },
+ { .compatible = "fsl,mpc8536-esdhc" },
+ { .compatible = "fsl,esdhc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match);
+
+static struct platform_driver sdhci_esdhc_driver = {
+ .driver = {
+ .name = "sdhci-esdhc",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_esdhc_of_match,
},
+ .probe = sdhci_esdhc_probe,
+ .remove = __devexit_p(sdhci_esdhc_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
};
+
+static int __init sdhci_esdhc_init(void)
+{
+ return platform_driver_register(&sdhci_esdhc_driver);
+}
+module_init(sdhci_esdhc_init);
+
+static void __exit sdhci_esdhc_exit(void)
+{
+ platform_driver_unregister(&sdhci_esdhc_driver);
+}
+module_exit(sdhci_esdhc_exit);
+
+MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
+MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
+ "Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 68ddb7546ae..735be131dca 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -21,8 +21,7 @@
#include <linux/delay.h>
#include <linux/mmc/host.h>
-#include "sdhci-of.h"
-#include "sdhci.h"
+#include "sdhci-pltfm.h"
/*
* Ops and quirks for the Nintendo Wii SDHCI controllers.
@@ -51,15 +50,63 @@ static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)
udelay(SDHCI_HLWD_WRITE_DELAY);
}
-struct sdhci_of_data sdhci_hlwd = {
+static struct sdhci_ops sdhci_hlwd_ops = {
+ .read_l = sdhci_be32bs_readl,
+ .read_w = sdhci_be32bs_readw,
+ .read_b = sdhci_be32bs_readb,
+ .write_l = sdhci_hlwd_writel,
+ .write_w = sdhci_hlwd_writew,
+ .write_b = sdhci_hlwd_writeb,
+};
+
+static struct sdhci_pltfm_data sdhci_hlwd_pdata = {
.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE,
- .ops = {
- .read_l = sdhci_be32bs_readl,
- .read_w = sdhci_be32bs_readw,
- .read_b = sdhci_be32bs_readb,
- .write_l = sdhci_hlwd_writel,
- .write_w = sdhci_hlwd_writew,
- .write_b = sdhci_hlwd_writeb,
+ .ops = &sdhci_hlwd_ops,
+};
+
+static int __devinit sdhci_hlwd_probe(struct platform_device *pdev)
+{
+ return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata);
+}
+
+static int __devexit sdhci_hlwd_remove(struct platform_device *pdev)
+{
+ return sdhci_pltfm_unregister(pdev);
+}
+
+static const struct of_device_id sdhci_hlwd_of_match[] = {
+ { .compatible = "nintendo,hollywood-sdhci" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match);
+
+static struct platform_driver sdhci_hlwd_driver = {
+ .driver = {
+ .name = "sdhci-hlwd",
+ .owner = THIS_MODULE,
+ .of_match_table = sdhci_hlwd_of_match,
},
+ .probe = sdhci_hlwd_probe,
+ .remove = __devexit_p(sdhci_hlwd_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
};
+
+static int __init sdhci_hlwd_init(void)
+{
+ return platform_driver_register(&sdhci_hlwd_driver);
+}
+module_init(sdhci_hlwd_init);
+
+static void __exit sdhci_hlwd_exit(void)
+{
+ platform_driver_unregister(&sdhci_hlwd_driver);
+}
+module_exit(sdhci_hlwd_exit);
+
+MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver");
+MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h
deleted file mode 100644
index ad09ad9915d..00000000000
--- a/drivers/mmc/host/sdhci-of.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * OpenFirmware bindings for Secure Digital Host Controller Interface.
- *
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
- * Copyright (c) 2009 MontaVista Software, Inc.
- *
- * Authors: Xiaobo Xie <X.Xie@freescale.com>
- * Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-#ifndef __SDHCI_OF_H
-#define __SDHCI_OF_H
-
-#include <linux/types.h>
-#include "sdhci.h"
-
-struct sdhci_of_data {
- unsigned int quirks;
- struct sdhci_ops ops;
-};
-
-struct sdhci_of_host {
- unsigned int clock;
- u16 xfer_mode_shadow;
-};
-
-extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg);
-extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg);
-extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg);
-extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg);
-extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg);
-extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg);
-
-extern struct sdhci_of_data sdhci_esdhc;
-extern struct sdhci_of_data sdhci_hlwd;
-
-#endif /* __SDHCI_OF_H */
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 936bbca19c0..26c528648f3 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -143,6 +143,12 @@ static const struct sdhci_pci_fixes sdhci_cafe = {
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
};
+static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ return 0;
+}
+
/*
* ADMA operation is disabled for Moorestown platform due to
* hardware bugs.
@@ -157,8 +163,15 @@ static int mrst_hc_probe(struct sdhci_pci_chip *chip)
return 0;
}
+static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
+{
+ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ return 0;
+}
+
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
.quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
+ .probe_slot = mrst_hc_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
@@ -170,8 +183,13 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
};
-static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = {
+static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
+ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+};
+
+static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .probe_slot = mfd_emmc_probe_slot,
};
/* O2Micro extra registers */
@@ -682,7 +700,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.device = PCI_DEVICE_ID_INTEL_MFD_SDIO1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
},
{
@@ -690,7 +708,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.device = PCI_DEVICE_ID_INTEL_MFD_SDIO2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio,
},
{
@@ -698,7 +716,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.device = PCI_DEVICE_ID_INTEL_MFD_EMMC0,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
},
{
@@ -706,7 +724,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
.device = PCI_DEVICE_ID_INTEL_MFD_EMMC1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc,
},
{
@@ -789,8 +807,34 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)
return 0;
}
+static int sdhci_pci_8bit_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+
+ switch (width) {
+ case MMC_BUS_WIDTH_8:
+ ctrl |= SDHCI_CTRL_8BITBUS;
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ break;
+ case MMC_BUS_WIDTH_4:
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ ctrl &= ~SDHCI_CTRL_8BITBUS;
+ break;
+ default:
+ ctrl &= ~(SDHCI_CTRL_8BITBUS | SDHCI_CTRL_4BITBUS);
+ break;
+ }
+
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+
+ return 0;
+}
+
static struct sdhci_ops sdhci_pci_ops = {
.enable_dma = sdhci_pci_enable_dma,
+ .platform_8bit_width = sdhci_pci_8bit_width,
};
/*****************************************************************************\
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index dbab0407f4b..71c0ce1f6db 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -2,6 +2,12 @@
* sdhci-pltfm.c Support for SDHCI platform devices
* Copyright (c) 2009 Intel Corporation
*
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ *
+ * Authors: Xiaobo Xie <X.Xie@freescale.com>
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
@@ -22,48 +28,66 @@
* Inspired by sdhci-pci.c, by Pierre Ossman
*/
-#include <linux/delay.h>
-#include <linux/highmem.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#ifdef CONFIG_PPC
+#include <asm/machdep.h>
+#endif
+#include "sdhci-pltfm.h"
-#include <linux/mmc/host.h>
+static struct sdhci_ops sdhci_pltfm_ops = {
+};
-#include <linux/io.h>
-#include <linux/mmc/sdhci-pltfm.h>
+#ifdef CONFIG_OF
+static bool sdhci_of_wp_inverted(struct device_node *np)
+{
+ if (of_get_property(np, "sdhci,wp-inverted", NULL))
+ return true;
-#include "sdhci.h"
-#include "sdhci-pltfm.h"
+ /* Old device trees don't have the wp-inverted property. */
+#ifdef CONFIG_PPC
+ return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds);
+#else
+ return false;
+#endif /* CONFIG_PPC */
+}
+
+void sdhci_get_of_property(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ const __be32 *clk;
+ int size;
-/*****************************************************************************\
- * *
- * SDHCI core callbacks *
- * *
-\*****************************************************************************/
+ if (of_device_is_available(np)) {
+ if (of_get_property(np, "sdhci,auto-cmd12", NULL))
+ host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
-static struct sdhci_ops sdhci_pltfm_ops = {
-};
+ if (of_get_property(np, "sdhci,1-bit-only", NULL))
+ host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
-/*****************************************************************************\
- * *
- * Device probing/removal *
- * *
-\*****************************************************************************/
+ if (sdhci_of_wp_inverted(np))
+ host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
-static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
+ clk = of_get_property(np, "clock-frequency", &size);
+ if (clk && size == sizeof(*clk) && *clk)
+ pltfm_host->clock = be32_to_cpup(clk);
+ }
+}
+#else
+void sdhci_get_of_property(struct platform_device *pdev) {}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(sdhci_get_of_property);
+
+struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
+ struct sdhci_pltfm_data *pdata)
{
- const struct platform_device_id *platid = platform_get_device_id(pdev);
- struct sdhci_pltfm_data *pdata;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct resource *iomem;
int ret;
- if (platid && platid->driver_data)
- pdata = (void *)platid->driver_data;
- else
- pdata = pdev->dev.platform_data;
-
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iomem) {
ret = -ENOMEM;
@@ -71,8 +95,7 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
}
if (resource_size(iomem) < 0x100)
- dev_err(&pdev->dev, "Invalid iomem size. You may "
- "experience problems.\n");
+ dev_err(&pdev->dev, "Invalid iomem size!\n");
/* Some PCI-based MFD need the parent here */
if (pdev->dev.parent != &platform_bus)
@@ -87,7 +110,7 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
pltfm_host = sdhci_priv(host);
- host->hw_name = "platform";
+ host->hw_name = dev_name(&pdev->dev);
if (pdata && pdata->ops)
host->ops = pdata->ops;
else
@@ -110,126 +133,95 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)
goto err_remap;
}
- if (pdata && pdata->init) {
- ret = pdata->init(host, pdata);
- if (ret)
- goto err_plat_init;
- }
-
- ret = sdhci_add_host(host);
- if (ret)
- goto err_add_host;
-
platform_set_drvdata(pdev, host);
- return 0;
+ return host;
-err_add_host:
- if (pdata && pdata->exit)
- pdata->exit(host);
-err_plat_init:
- iounmap(host->ioaddr);
err_remap:
release_mem_region(iomem->start, resource_size(iomem));
err_request:
sdhci_free_host(host);
err:
- printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret);
- return ret;
+ dev_err(&pdev->dev, "%s failed %d\n", __func__, ret);
+ return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_init);
-static int __devexit sdhci_pltfm_remove(struct platform_device *pdev)
+void sdhci_pltfm_free(struct platform_device *pdev)
{
- struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;
struct sdhci_host *host = platform_get_drvdata(pdev);
struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int dead;
- u32 scratch;
-
- dead = 0;
- scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
- if (scratch == (u32)-1)
- dead = 1;
- sdhci_remove_host(host, dead);
- if (pdata && pdata->exit)
- pdata->exit(host);
iounmap(host->ioaddr);
release_mem_region(iomem->start, resource_size(iomem));
sdhci_free_host(host);
platform_set_drvdata(pdev, NULL);
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_free);
- return 0;
+int sdhci_pltfm_register(struct platform_device *pdev,
+ struct sdhci_pltfm_data *pdata)
+{
+ struct sdhci_host *host;
+ int ret = 0;
+
+ host = sdhci_pltfm_init(pdev, pdata);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ sdhci_get_of_property(pdev);
+
+ ret = sdhci_add_host(host);
+ if (ret)
+ sdhci_pltfm_free(pdev);
+
+ return ret;
}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_register);
-static const struct platform_device_id sdhci_pltfm_ids[] = {
- { "sdhci", },
-#ifdef CONFIG_MMC_SDHCI_CNS3XXX
- { "sdhci-cns3xxx", (kernel_ulong_t)&sdhci_cns3xxx_pdata },
-#endif
-#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX
- { "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata },
-#endif
-#ifdef CONFIG_MMC_SDHCI_DOVE
- { "sdhci-dove", (kernel_ulong_t)&sdhci_dove_pdata },
-#endif
-#ifdef CONFIG_MMC_SDHCI_TEGRA
- { "sdhci-tegra", (kernel_ulong_t)&sdhci_tegra_pdata },
-#endif
- { },
-};
-MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids);
+int sdhci_pltfm_unregister(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ sdhci_remove_host(host, dead);
+ sdhci_pltfm_free(pdev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
#ifdef CONFIG_PM
-static int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
{
struct sdhci_host *host = platform_get_drvdata(dev);
return sdhci_suspend_host(host, state);
}
+EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
-static int sdhci_pltfm_resume(struct platform_device *dev)
+int sdhci_pltfm_resume(struct platform_device *dev)
{
struct sdhci_host *host = platform_get_drvdata(dev);
return sdhci_resume_host(host);
}
-#else
-#define sdhci_pltfm_suspend NULL
-#define sdhci_pltfm_resume NULL
+EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
#endif /* CONFIG_PM */
-static struct platform_driver sdhci_pltfm_driver = {
- .driver = {
- .name = "sdhci",
- .owner = THIS_MODULE,
- },
- .probe = sdhci_pltfm_probe,
- .remove = __devexit_p(sdhci_pltfm_remove),
- .id_table = sdhci_pltfm_ids,
- .suspend = sdhci_pltfm_suspend,
- .resume = sdhci_pltfm_resume,
-};
-
-/*****************************************************************************\
- * *
- * Driver init/exit *
- * *
-\*****************************************************************************/
-
-static int __init sdhci_drv_init(void)
+static int __init sdhci_pltfm_drv_init(void)
{
- return platform_driver_register(&sdhci_pltfm_driver);
+ pr_info("sdhci-pltfm: SDHCI platform and OF driver helper\n");
+
+ return 0;
}
+module_init(sdhci_pltfm_drv_init);
-static void __exit sdhci_drv_exit(void)
+static void __exit sdhci_pltfm_drv_exit(void)
{
- platform_driver_unregister(&sdhci_pltfm_driver);
}
+module_exit(sdhci_pltfm_drv_exit);
-module_init(sdhci_drv_init);
-module_exit(sdhci_drv_exit);
-
-MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver");
-MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
+MODULE_DESCRIPTION("SDHCI platform and OF driver helper");
+MODULE_AUTHOR("Intel Corporation");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h
index 2b37016ad0a..3a9fc3f4084 100644
--- a/drivers/mmc/host/sdhci-pltfm.h
+++ b/drivers/mmc/host/sdhci-pltfm.h
@@ -12,17 +12,95 @@
#define _DRIVERS_MMC_SDHCI_PLTFM_H
#include <linux/clk.h>
-#include <linux/types.h>
-#include <linux/mmc/sdhci-pltfm.h>
+#include <linux/platform_device.h>
+#include "sdhci.h"
+
+struct sdhci_pltfm_data {
+ struct sdhci_ops *ops;
+ unsigned int quirks;
+};
struct sdhci_pltfm_host {
struct clk *clk;
void *priv; /* to handle quirks across io-accessor calls */
+
+ /* migrate from sdhci_of_host */
+ unsigned int clock;
+ u16 xfer_mode_shadow;
};
-extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata;
-extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata;
-extern struct sdhci_pltfm_data sdhci_dove_pdata;
-extern struct sdhci_pltfm_data sdhci_tegra_pdata;
+#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
+/*
+ * These accessors are designed for big endian hosts doing I/O to
+ * little endian controllers incorporating a 32-bit hardware byte swapper.
+ */
+static inline u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg)
+{
+ return in_be32(host->ioaddr + reg);
+}
+
+static inline u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg)
+{
+ return in_be16(host->ioaddr + (reg ^ 0x2));
+}
+
+static inline u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg)
+{
+ return in_8(host->ioaddr + (reg ^ 0x3));
+}
+
+static inline void sdhci_be32bs_writel(struct sdhci_host *host,
+ u32 val, int reg)
+{
+ out_be32(host->ioaddr + reg, val);
+}
+
+static inline void sdhci_be32bs_writew(struct sdhci_host *host,
+ u16 val, int reg)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ int base = reg & ~0x3;
+ int shift = (reg & 0x2) * 8;
+
+ switch (reg) {
+ case SDHCI_TRANSFER_MODE:
+ /*
+ * Postpone this write, we must do it together with a
+ * command write that is down below.
+ */
+ pltfm_host->xfer_mode_shadow = val;
+ return;
+ case SDHCI_COMMAND:
+ sdhci_be32bs_writel(host,
+ val << 16 | pltfm_host->xfer_mode_shadow,
+ SDHCI_TRANSFER_MODE);
+ return;
+ }
+ clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift);
+}
+
+static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg)
+{
+ int base = reg & ~0x3;
+ int shift = (reg & 0x3) * 8;
+
+ clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift);
+}
+#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */
+
+extern void sdhci_get_of_property(struct platform_device *pdev);
+
+extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev,
+ struct sdhci_pltfm_data *pdata);
+extern void sdhci_pltfm_free(struct platform_device *pdev);
+
+extern int sdhci_pltfm_register(struct platform_device *pdev,
+ struct sdhci_pltfm_data *pdata);
+extern int sdhci_pltfm_unregister(struct platform_device *pdev);
+
+#ifdef CONFIG_PM
+extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
+extern int sdhci_pltfm_resume(struct platform_device *dev);
+#endif
#endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c
deleted file mode 100644
index 089c9a68b7b..00000000000
--- a/drivers/mmc/host/sdhci-pxa.c
+++ /dev/null
@@ -1,303 +0,0 @@
-/* linux/drivers/mmc/host/sdhci-pxa.c
- *
- * Copyright (C) 2010 Marvell International Ltd.
- * Zhangfei Gao <zhangfei.gao@marvell.com>
- * Kevin Wang <dwang4@marvell.com>
- * Mingwei Wang <mwwang@marvell.com>
- * Philip Rakity <prakity@marvell.com>
- * Mark Brown <markb@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* Supports:
- * SDHCI support for MMP2/PXA910/PXA168
- *
- * Refer to sdhci-s3c.c.
- */
-
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/mmc/host.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <plat/sdhci.h>
-#include "sdhci.h"
-
-#define DRIVER_NAME "sdhci-pxa"
-
-#define SD_FIFO_PARAM 0x104
-#define DIS_PAD_SD_CLK_GATE 0x400
-
-struct sdhci_pxa {
- struct sdhci_host *host;
- struct sdhci_pxa_platdata *pdata;
- struct clk *clk;
- struct resource *res;
-
- u8 clk_enable;
-};
-
-/*****************************************************************************\
- * *
- * SDHCI core callbacks *
- * *
-\*****************************************************************************/
-static void set_clock(struct sdhci_host *host, unsigned int clock)
-{
- struct sdhci_pxa *pxa = sdhci_priv(host);
- u32 tmp = 0;
-
- if (clock == 0) {
- if (pxa->clk_enable) {
- clk_disable(pxa->clk);
- pxa->clk_enable = 0;
- }
- } else {
- if (0 == pxa->clk_enable) {
- if (pxa->pdata->flags & PXA_FLAG_DISABLE_CLOCK_GATING) {
- tmp = readl(host->ioaddr + SD_FIFO_PARAM);
- tmp |= DIS_PAD_SD_CLK_GATE;
- writel(tmp, host->ioaddr + SD_FIFO_PARAM);
- }
- clk_enable(pxa->clk);
- pxa->clk_enable = 1;
- }
- }
-}
-
-static int set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
-{
- u16 ctrl_2;
-
- /*
- * Set V18_EN -- UHS modes do not work without this.
- * does not change signaling voltage
- */
- ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
-
- /* Select Bus Speed Mode for host */
- ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
- switch (uhs) {
- case MMC_TIMING_UHS_SDR12:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
- break;
- case MMC_TIMING_UHS_SDR25:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
- break;
- case MMC_TIMING_UHS_SDR50:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
- break;
- case MMC_TIMING_UHS_SDR104:
- ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
- break;
- case MMC_TIMING_UHS_DDR50:
- ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
- break;
- }
-
- sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
- pr_debug("%s:%s uhs = %d, ctrl_2 = %04X\n",
- __func__, mmc_hostname(host->mmc), uhs, ctrl_2);
-
- return 0;
-}
-
-static struct sdhci_ops sdhci_pxa_ops = {
- .set_uhs_signaling = set_uhs_signaling,
- .set_clock = set_clock,
-};
-
-/*****************************************************************************\
- * *
- * Device probing/removal *
- * *
-\*****************************************************************************/
-
-static int __devinit sdhci_pxa_probe(struct platform_device *pdev)
-{
- struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
- struct device *dev = &pdev->dev;
- struct sdhci_host *host = NULL;
- struct resource *iomem = NULL;
- struct sdhci_pxa *pxa = NULL;
- int ret, irq;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no irq specified\n");
- return irq;
- }
-
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem) {
- dev_err(dev, "no memory specified\n");
- return -ENOENT;
- }
-
- host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pxa));
- if (IS_ERR(host)) {
- dev_err(dev, "failed to alloc host\n");
- return PTR_ERR(host);
- }
-
- pxa = sdhci_priv(host);
- pxa->host = host;
- pxa->pdata = pdata;
- pxa->clk_enable = 0;
-
- pxa->clk = clk_get(dev, "PXA-SDHCLK");
- if (IS_ERR(pxa->clk)) {
- dev_err(dev, "failed to get io clock\n");
- ret = PTR_ERR(pxa->clk);
- goto out;
- }
-
- pxa->res = request_mem_region(iomem->start, resource_size(iomem),
- mmc_hostname(host->mmc));
- if (!pxa->res) {
- dev_err(&pdev->dev, "cannot request region\n");
- ret = -EBUSY;
- goto out;
- }
-
- host->ioaddr = ioremap(iomem->start, resource_size(iomem));
- if (!host->ioaddr) {
- dev_err(&pdev->dev, "failed to remap registers\n");
- ret = -ENOMEM;
- goto out;
- }
-
- host->hw_name = "MMC";
- host->ops = &sdhci_pxa_ops;
- host->irq = irq;
- host->quirks = SDHCI_QUIRK_BROKEN_ADMA
- | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
- | SDHCI_QUIRK_32BIT_DMA_ADDR
- | SDHCI_QUIRK_32BIT_DMA_SIZE
- | SDHCI_QUIRK_32BIT_ADMA_SIZE
- | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
-
- if (pdata->quirks)
- host->quirks |= pdata->quirks;
-
- /* enable 1/8V DDR capable */
- host->mmc->caps |= MMC_CAP_1_8V_DDR;
-
- /* If slot design supports 8 bit data, indicate this to MMC. */
- if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
- host->mmc->caps |= MMC_CAP_8_BIT_DATA;
-
- ret = sdhci_add_host(host);
- if (ret) {
- dev_err(&pdev->dev, "failed to add host\n");
- goto out;
- }
-
- if (pxa->pdata->max_speed)
- host->mmc->f_max = pxa->pdata->max_speed;
-
- platform_set_drvdata(pdev, host);
-
- return 0;
-out:
- if (host) {
- clk_put(pxa->clk);
- if (host->ioaddr)
- iounmap(host->ioaddr);
- if (pxa->res)
- release_mem_region(pxa->res->start,
- resource_size(pxa->res));
- sdhci_free_host(host);
- }
-
- return ret;
-}
-
-static int __devexit sdhci_pxa_remove(struct platform_device *pdev)
-{
- struct sdhci_host *host = platform_get_drvdata(pdev);
- struct sdhci_pxa *pxa = sdhci_priv(host);
- int dead = 0;
- u32 scratch;
-
- if (host) {
- scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
- if (scratch == (u32)-1)
- dead = 1;
-
- sdhci_remove_host(host, dead);
-
- if (host->ioaddr)
- iounmap(host->ioaddr);
- if (pxa->res)
- release_mem_region(pxa->res->start,
- resource_size(pxa->res));
- if (pxa->clk_enable) {
- clk_disable(pxa->clk);
- pxa->clk_enable = 0;
- }
- clk_put(pxa->clk);
-
- sdhci_free_host(host);
- platform_set_drvdata(pdev, NULL);
- }
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int sdhci_pxa_suspend(struct platform_device *dev, pm_message_t state)
-{
- struct sdhci_host *host = platform_get_drvdata(dev);
-
- return sdhci_suspend_host(host, state);
-}
-
-static int sdhci_pxa_resume(struct platform_device *dev)
-{
- struct sdhci_host *host = platform_get_drvdata(dev);
-
- return sdhci_resume_host(host);
-}
-#else
-#define sdhci_pxa_suspend NULL
-#define sdhci_pxa_resume NULL
-#endif
-
-static struct platform_driver sdhci_pxa_driver = {
- .probe = sdhci_pxa_probe,
- .remove = __devexit_p(sdhci_pxa_remove),
- .suspend = sdhci_pxa_suspend,
- .resume = sdhci_pxa_resume,
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-/*****************************************************************************\
- * *
- * Driver init/exit *
- * *
-\*****************************************************************************/
-
-static int __init sdhci_pxa_init(void)
-{
- return platform_driver_register(&sdhci_pxa_driver);
-}
-
-static void __exit sdhci_pxa_exit(void)
-{
- platform_driver_unregister(&sdhci_pxa_driver);
-}
-
-module_init(sdhci_pxa_init);
-module_exit(sdhci_pxa_exit);
-
-MODULE_DESCRIPTION("SDH controller driver for PXA168/PXA910/MMP2");
-MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
new file mode 100644
index 00000000000..38f58994f79
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ * Kevin Wang <dwang4@marvell.com>
+ * Jun Nie <njun@marvell.com>
+ * Qiming Wu <wuqm@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_data/pxa_sdhci.h>
+#include <linux/slab.h>
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+#define SD_FIFO_PARAM 0xe0
+#define DIS_PAD_SD_CLK_GATE 0x0400 /* Turn on/off Dynamic SD Clock Gating */
+#define CLK_GATE_ON 0x0200 /* Disable/enable Clock Gate */
+#define CLK_GATE_CTL 0x0100 /* Clock Gate Control */
+#define CLK_GATE_SETTING_BITS (DIS_PAD_SD_CLK_GATE | \
+ CLK_GATE_ON | CLK_GATE_CTL)
+
+#define SD_CLOCK_BURST_SIZE_SETUP 0xe6
+#define SDCLK_SEL_SHIFT 8
+#define SDCLK_SEL_MASK 0x3
+#define SDCLK_DELAY_SHIFT 10
+#define SDCLK_DELAY_MASK 0x3c
+
+#define SD_CE_ATA_2 0xea
+#define MMC_CARD 0x1000
+#define MMC_WIDTH 0x0100
+
+static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+
+ if (mask == SDHCI_RESET_ALL) {
+ u16 tmp = 0;
+
+ /*
+ * tune timing of read data/command when crc error happen
+ * no performance impact
+ */
+ if (pdata->clk_delay_sel == 1) {
+ tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+
+ tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT);
+ tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
+ << SDCLK_DELAY_SHIFT;
+ tmp &= ~(SDCLK_SEL_MASK << SDCLK_SEL_SHIFT);
+ tmp |= (1 & SDCLK_SEL_MASK) << SDCLK_SEL_SHIFT;
+
+ writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+ }
+
+ if (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING) {
+ tmp = readw(host->ioaddr + SD_FIFO_PARAM);
+ tmp &= ~CLK_GATE_SETTING_BITS;
+ writew(tmp, host->ioaddr + SD_FIFO_PARAM);
+ } else {
+ tmp = readw(host->ioaddr + SD_FIFO_PARAM);
+ tmp &= ~CLK_GATE_SETTING_BITS;
+ tmp |= CLK_GATE_SETTING_BITS;
+ writew(tmp, host->ioaddr + SD_FIFO_PARAM);
+ }
+ }
+}
+
+static int pxav2_mmc_set_width(struct sdhci_host *host, int width)
+{
+ u8 ctrl;
+ u16 tmp;
+
+ ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL);
+ tmp = readw(host->ioaddr + SD_CE_ATA_2);
+ if (width == MMC_BUS_WIDTH_8) {
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ tmp |= MMC_CARD | MMC_WIDTH;
+ } else {
+ tmp &= ~(MMC_CARD | MMC_WIDTH);
+ if (width == MMC_BUS_WIDTH_4)
+ ctrl |= SDHCI_CTRL_4BITBUS;
+ else
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ }
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+ writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL);
+
+ return 0;
+}
+
+static u32 pxav2_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+
+ return clk_get_rate(pltfm_host->clk);
+}
+
+static struct sdhci_ops pxav2_sdhci_ops = {
+ .get_max_clock = pxav2_get_max_clock,
+ .platform_reset_exit = pxav2_set_private_registers,
+ .platform_8bit_width = pxav2_mmc_set_width,
+};
+
+static int __devinit sdhci_pxav2_probe(struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = NULL;
+ struct sdhci_pxa *pxa = NULL;
+ int ret;
+ struct clk *clk;
+
+ pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
+ if (!pxa)
+ return -ENOMEM;
+
+ host = sdhci_pltfm_init(pdev, NULL);
+ if (IS_ERR(host)) {
+ kfree(pxa);
+ return PTR_ERR(host);
+ }
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = pxa;
+
+ clk = clk_get(dev, "PXA-SDHCLK");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(clk);
+ goto err_clk_get;
+ }
+ pltfm_host->clk = clk;
+ clk_enable(clk);
+
+ host->quirks = SDHCI_QUIRK_BROKEN_ADMA
+ | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
+ | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+
+ if (pdata) {
+ if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
+ /* on-chip device */
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ }
+
+ /* If slot design supports 8 bit data, indicate this to MMC. */
+ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+ if (pdata->quirks)
+ host->quirks |= pdata->quirks;
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+ if (pdata->pm_caps)
+ host->mmc->pm_caps |= pdata->pm_caps;
+ }
+
+ host->ops = &pxav2_sdhci_ops;
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add host\n");
+ goto err_add_host;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+err_add_host:
+ clk_disable(clk);
+ clk_put(clk);
+err_clk_get:
+ sdhci_pltfm_free(pdev);
+ kfree(pxa);
+ return ret;
+}
+
+static int __devexit sdhci_pxav2_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+
+ sdhci_remove_host(host, 1);
+
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+ sdhci_pltfm_free(pdev);
+ kfree(pxa);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_pxav2_driver = {
+ .driver = {
+ .name = "sdhci-pxav2",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_pxav2_probe,
+ .remove = __devexit_p(sdhci_pxav2_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
+};
+static int __init sdhci_pxav2_init(void)
+{
+ return platform_driver_register(&sdhci_pxav2_driver);
+}
+
+static void __exit sdhci_pxav2_exit(void)
+{
+ platform_driver_unregister(&sdhci_pxav2_driver);
+}
+
+module_init(sdhci_pxav2_init);
+module_exit(sdhci_pxav2_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for pxav2");
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
new file mode 100644
index 00000000000..4198dbbc5c2
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2010 Marvell International Ltd.
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ * Kevin Wang <dwang4@marvell.com>
+ * Mingwei Wang <mwwang@marvell.com>
+ * Philip Rakity <prakity@marvell.com>
+ * Mark Brown <markb@marvell.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/platform_data/pxa_sdhci.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+
+#define SD_CLOCK_BURST_SIZE_SETUP 0x10A
+#define SDCLK_SEL 0x100
+#define SDCLK_DELAY_SHIFT 9
+#define SDCLK_DELAY_MASK 0x1f
+
+#define SD_CFG_FIFO_PARAM 0x100
+#define SDCFG_GEN_PAD_CLK_ON (1<<6)
+#define SDCFG_GEN_PAD_CLK_CNT_MASK 0xFF
+#define SDCFG_GEN_PAD_CLK_CNT_SHIFT 24
+
+#define SD_SPI_MODE 0x108
+#define SD_CE_ATA_1 0x10C
+
+#define SD_CE_ATA_2 0x10E
+#define SDCE_MISC_INT (1<<2)
+#define SDCE_MISC_INT_EN (1<<1)
+
+static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask)
+{
+ struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+
+ if (mask == SDHCI_RESET_ALL) {
+ /*
+ * tune timing of read data/command when crc error happen
+ * no performance impact
+ */
+ if (pdata && 0 != pdata->clk_delay_cycles) {
+ u16 tmp;
+
+ tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+ tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK)
+ << SDCLK_DELAY_SHIFT;
+ tmp |= SDCLK_SEL;
+ writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP);
+ }
+ }
+}
+
+#define MAX_WAIT_COUNT 5
+static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+ u16 tmp;
+ int count;
+
+ if (pxa->power_mode == MMC_POWER_UP
+ && power_mode == MMC_POWER_ON) {
+
+ dev_dbg(mmc_dev(host->mmc),
+ "%s: slot->power_mode = %d,"
+ "ios->power_mode = %d\n",
+ __func__,
+ pxa->power_mode,
+ power_mode);
+
+ /* set we want notice of when 74 clocks are sent */
+ tmp = readw(host->ioaddr + SD_CE_ATA_2);
+ tmp |= SDCE_MISC_INT_EN;
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+
+ /* start sending the 74 clocks */
+ tmp = readw(host->ioaddr + SD_CFG_FIFO_PARAM);
+ tmp |= SDCFG_GEN_PAD_CLK_ON;
+ writew(tmp, host->ioaddr + SD_CFG_FIFO_PARAM);
+
+ /* slowest speed is about 100KHz or 10usec per clock */
+ udelay(740);
+ count = 0;
+
+ while (count++ < MAX_WAIT_COUNT) {
+ if ((readw(host->ioaddr + SD_CE_ATA_2)
+ & SDCE_MISC_INT) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (count == MAX_WAIT_COUNT)
+ dev_warn(mmc_dev(host->mmc), "74 clock interrupt not cleared\n");
+
+ /* clear the interrupt bit if posted */
+ tmp = readw(host->ioaddr + SD_CE_ATA_2);
+ tmp |= SDCE_MISC_INT;
+ writew(tmp, host->ioaddr + SD_CE_ATA_2);
+ }
+ pxa->power_mode = power_mode;
+}
+
+static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs)
+{
+ u16 ctrl_2;
+
+ /*
+ * Set V18_EN -- UHS modes do not work without this.
+ * does not change signaling voltage
+ */
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ switch (uhs) {
+ case MMC_TIMING_UHS_SDR12:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ break;
+ case MMC_TIMING_UHS_SDR25:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ break;
+ case MMC_TIMING_UHS_SDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180;
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180;
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180;
+ break;
+ }
+
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+ dev_dbg(mmc_dev(host->mmc),
+ "%s uhs = %d, ctrl_2 = %04X\n",
+ __func__, uhs, ctrl_2);
+
+ return 0;
+}
+
+static struct sdhci_ops pxav3_sdhci_ops = {
+ .platform_reset_exit = pxav3_set_private_registers,
+ .set_uhs_signaling = pxav3_set_uhs_signaling,
+ .platform_send_init_74_clocks = pxav3_gen_init_74_clocks,
+};
+
+static int __devinit sdhci_pxav3_probe(struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host;
+ struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct sdhci_host *host = NULL;
+ struct sdhci_pxa *pxa = NULL;
+ int ret;
+ struct clk *clk;
+
+ pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL);
+ if (!pxa)
+ return -ENOMEM;
+
+ host = sdhci_pltfm_init(pdev, NULL);
+ if (IS_ERR(host)) {
+ kfree(pxa);
+ return PTR_ERR(host);
+ }
+ pltfm_host = sdhci_priv(host);
+ pltfm_host->priv = pxa;
+
+ clk = clk_get(dev, "PXA-SDHCLK");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(clk);
+ goto err_clk_get;
+ }
+ pltfm_host->clk = clk;
+ clk_enable(clk);
+
+ host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL
+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
+
+ /* enable 1/8V DDR capable */
+ host->mmc->caps |= MMC_CAP_1_8V_DDR;
+
+ if (pdata) {
+ if (pdata->flags & PXA_FLAG_CARD_PERMANENT) {
+ /* on-chip device */
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ }
+
+ /* If slot design supports 8 bit data, indicate this to MMC. */
+ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
+ if (pdata->quirks)
+ host->quirks |= pdata->quirks;
+ if (pdata->host_caps)
+ host->mmc->caps |= pdata->host_caps;
+ if (pdata->pm_caps)
+ host->mmc->pm_caps |= pdata->pm_caps;
+ }
+
+ host->ops = &pxav3_sdhci_ops;
+
+ ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add host\n");
+ goto err_add_host;
+ }
+
+ platform_set_drvdata(pdev, host);
+
+ return 0;
+
+err_add_host:
+ clk_disable(clk);
+ clk_put(clk);
+err_clk_get:
+ sdhci_pltfm_free(pdev);
+ kfree(pxa);
+ return ret;
+}
+
+static int __devexit sdhci_pxav3_remove(struct platform_device *pdev)
+{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_pxa *pxa = pltfm_host->priv;
+
+ sdhci_remove_host(host, 1);
+
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+ sdhci_pltfm_free(pdev);
+ kfree(pxa);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sdhci_pxav3_driver = {
+ .driver = {
+ .name = "sdhci-pxav3",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_pxav3_probe,
+ .remove = __devexit_p(sdhci_pxav3_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
+};
+static int __init sdhci_pxav3_init(void)
+{
+ return platform_driver_register(&sdhci_pxav3_driver);
+}
+
+static void __exit sdhci_pxav3_exit(void)
+{
+ platform_driver_unregister(&sdhci_pxav3_driver);
+}
+
+module_init(sdhci_pxav3_init);
+module_exit(sdhci_pxav3_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for pxav3");
+MODULE_AUTHOR("Marvell International Ltd.");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 69e3ee321eb..460ffaf0f6d 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -612,16 +612,14 @@ static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
{
struct sdhci_host *host = platform_get_drvdata(dev);
- sdhci_suspend_host(host, pm);
- return 0;
+ return sdhci_suspend_host(host, pm);
}
static int sdhci_s3c_resume(struct platform_device *dev)
{
struct sdhci_host *host = platform_get_drvdata(dev);
- sdhci_resume_host(host);
- return 0;
+ return sdhci_resume_host(host);
}
#else
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 343c97edba3..18b0bd31de7 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -24,7 +24,6 @@
#include <mach/gpio.h>
#include <mach/sdhci.h>
-#include "sdhci.h"
#include "sdhci-pltfm.h"
static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
@@ -116,20 +115,42 @@ static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
return 0;
}
+static struct sdhci_ops tegra_sdhci_ops = {
+ .get_ro = tegra_sdhci_get_ro,
+ .read_l = tegra_sdhci_readl,
+ .read_w = tegra_sdhci_readw,
+ .write_l = tegra_sdhci_writel,
+ .platform_8bit_width = tegra_sdhci_8bit,
+};
+
+static struct sdhci_pltfm_data sdhci_tegra_pdata = {
+ .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
+ SDHCI_QUIRK_SINGLE_POWER_WRITE |
+ SDHCI_QUIRK_NO_HISPD_BIT |
+ SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
+ .ops = &tegra_sdhci_ops,
+};
-static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
- struct sdhci_pltfm_data *pdata)
+static int __devinit sdhci_tegra_probe(struct platform_device *pdev)
{
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
+ struct sdhci_pltfm_host *pltfm_host;
struct tegra_sdhci_platform_data *plat;
+ struct sdhci_host *host;
struct clk *clk;
int rc;
+ host = sdhci_pltfm_init(pdev, &sdhci_tegra_pdata);
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+ pltfm_host = sdhci_priv(host);
+
plat = pdev->dev.platform_data;
+
if (plat == NULL) {
dev_err(mmc_dev(host->mmc), "missing platform data\n");
- return -ENXIO;
+ rc = -ENXIO;
+ goto err_no_plat;
}
if (gpio_is_valid(plat->power_gpio)) {
@@ -137,7 +158,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate power gpio\n");
- goto out;
+ goto err_power_req;
}
tegra_gpio_enable(plat->power_gpio);
gpio_direction_output(plat->power_gpio, 1);
@@ -148,7 +169,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate cd gpio\n");
- goto out_power;
+ goto err_cd_req;
}
tegra_gpio_enable(plat->cd_gpio);
gpio_direction_input(plat->cd_gpio);
@@ -159,7 +180,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc), "request irq error\n");
- goto out_cd;
+ goto err_cd_irq_req;
}
}
@@ -169,7 +190,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (rc) {
dev_err(mmc_dev(host->mmc),
"failed to allocate wp gpio\n");
- goto out_irq;
+ goto err_wp_req;
}
tegra_gpio_enable(plat->wp_gpio);
gpio_direction_input(plat->wp_gpio);
@@ -179,7 +200,7 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (IS_ERR(clk)) {
dev_err(mmc_dev(host->mmc), "clk err\n");
rc = PTR_ERR(clk);
- goto out_wp;
+ goto err_clk_get;
}
clk_enable(clk);
pltfm_host->clk = clk;
@@ -189,38 +210,47 @@ static int tegra_sdhci_pltfm_init(struct sdhci_host *host,
if (plat->is_8bit)
host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+ rc = sdhci_add_host(host);
+ if (rc)
+ goto err_add_host;
+
return 0;
-out_wp:
+err_add_host:
+ clk_disable(pltfm_host->clk);
+ clk_put(pltfm_host->clk);
+err_clk_get:
if (gpio_is_valid(plat->wp_gpio)) {
tegra_gpio_disable(plat->wp_gpio);
gpio_free(plat->wp_gpio);
}
-
-out_irq:
+err_wp_req:
if (gpio_is_valid(plat->cd_gpio))
free_irq(gpio_to_irq(plat->cd_gpio), host);
-out_cd:
+err_cd_irq_req:
if (gpio_is_valid(plat->cd_gpio)) {
tegra_gpio_disable(plat->cd_gpio);
gpio_free(plat->cd_gpio);
}
-
-out_power:
+err_cd_req:
if (gpio_is_valid(plat->power_gpio)) {
tegra_gpio_disable(plat->power_gpio);
gpio_free(plat->power_gpio);
}
-
-out:
+err_power_req:
+err_no_plat:
+ sdhci_pltfm_free(pdev);
return rc;
}
-static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
+static int __devexit sdhci_tegra_remove(struct platform_device *pdev)
{
+ struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
struct tegra_sdhci_platform_data *plat;
+ int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+
+ sdhci_remove_host(host, dead);
plat = pdev->dev.platform_data;
@@ -242,22 +272,37 @@ static void tegra_sdhci_pltfm_exit(struct sdhci_host *host)
clk_disable(pltfm_host->clk);
clk_put(pltfm_host->clk);
+
+ sdhci_pltfm_free(pdev);
+
+ return 0;
}
-static struct sdhci_ops tegra_sdhci_ops = {
- .get_ro = tegra_sdhci_get_ro,
- .read_l = tegra_sdhci_readl,
- .read_w = tegra_sdhci_readw,
- .write_l = tegra_sdhci_writel,
- .platform_8bit_width = tegra_sdhci_8bit,
+static struct platform_driver sdhci_tegra_driver = {
+ .driver = {
+ .name = "sdhci-tegra",
+ .owner = THIS_MODULE,
+ },
+ .probe = sdhci_tegra_probe,
+ .remove = __devexit_p(sdhci_tegra_remove),
+#ifdef CONFIG_PM
+ .suspend = sdhci_pltfm_suspend,
+ .resume = sdhci_pltfm_resume,
+#endif
};
-struct sdhci_pltfm_data sdhci_tegra_pdata = {
- .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
- SDHCI_QUIRK_SINGLE_POWER_WRITE |
- SDHCI_QUIRK_NO_HISPD_BIT |
- SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC,
- .ops = &tegra_sdhci_ops,
- .init = tegra_sdhci_pltfm_init,
- .exit = tegra_sdhci_pltfm_exit,
-};
+static int __init sdhci_tegra_init(void)
+{
+ return platform_driver_register(&sdhci_tegra_driver);
+}
+module_init(sdhci_tegra_init);
+
+static void __exit sdhci_tegra_exit(void)
+{
+ platform_driver_unregister(&sdhci_tegra_driver);
+}
+module_exit(sdhci_tegra_exit);
+
+MODULE_DESCRIPTION("SDHCI driver for Tegra");
+MODULE_AUTHOR(" Google, Inc.");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 58d5436ff64..c31a3343340 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -127,11 +127,15 @@ static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs)
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
- u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT;
+ u32 present, irqs;
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
return;
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+ irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT;
+
if (enable)
sdhci_unmask_irqs(host, irqs);
else
@@ -2154,13 +2158,30 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
mmc_hostname(host->mmc), intmask);
if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
+ u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+
+ /*
+ * There is a observation on i.mx esdhc. INSERT bit will be
+ * immediately set again when it gets cleared, if a card is
+ * inserted. We have to mask the irq to prevent interrupt
+ * storm which will freeze the system. And the REMOVE gets
+ * the same situation.
+ *
+ * More testing are needed here to ensure it works for other
+ * platforms though.
+ */
+ sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT :
+ SDHCI_INT_CARD_REMOVE);
+ sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE :
+ SDHCI_INT_CARD_INSERT);
+
sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
- SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+ SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
+ intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
tasklet_schedule(&host->card_tasklet);
}
- intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE);
-
if (intmask & SDHCI_INT_CMD_MASK) {
sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK,
SDHCI_INT_STATUS);
@@ -2488,6 +2509,11 @@ int sdhci_add_host(struct sdhci_host *host)
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
+ mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000);
+ else
+ mmc->max_discard_to = (1 << 27) / host->timeout_clk;
+
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 14f8edbaa19..557886bee9c 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -175,6 +175,7 @@ struct sh_mmcif_host {
enum mmcif_state state;
spinlock_t lock;
bool power;
+ bool card_present;
/* DMA support */
struct dma_chan *chan_rx;
@@ -877,23 +878,23 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_unlock_irqrestore(&host->lock, flags);
if (ios->power_mode == MMC_POWER_UP) {
- if (p->set_pwr)
- p->set_pwr(host->pd, ios->power_mode);
- if (!host->power) {
+ if (!host->card_present) {
/* See if we also get DMA */
sh_mmcif_request_dma(host, host->pd->dev.platform_data);
- pm_runtime_get_sync(&host->pd->dev);
- host->power = true;
+ host->card_present = true;
}
} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
/* clock stop */
sh_mmcif_clock_control(host, 0);
if (ios->power_mode == MMC_POWER_OFF) {
- if (host->power) {
- pm_runtime_put(&host->pd->dev);
+ if (host->card_present) {
sh_mmcif_release_dma(host);
- host->power = false;
+ host->card_present = false;
}
+ }
+ if (host->power) {
+ pm_runtime_put(&host->pd->dev);
+ host->power = false;
if (p->down_pwr)
p->down_pwr(host->pd);
}
@@ -901,8 +902,16 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
return;
}
- if (ios->clock)
+ if (ios->clock) {
+ if (!host->power) {
+ if (p->set_pwr)
+ p->set_pwr(host->pd, ios->power_mode);
+ pm_runtime_get_sync(&host->pd->dev);
+ host->power = true;
+ sh_mmcif_sync_reset(host);
+ }
sh_mmcif_clock_control(host, ios->clock);
+ }
host->bus_width = ios->bus_width;
host->state = STATE_IDLE;
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index ce500f03df8..774f6439d7c 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -26,6 +26,7 @@
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
#include <linux/sh_dma.h>
+#include <linux/delay.h>
#include "tmio_mmc.h"
@@ -55,6 +56,39 @@ static int sh_mobile_sdhi_get_cd(struct platform_device *pdev)
return -ENOSYS;
}
+static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host)
+{
+ int timeout = 1000;
+
+ while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13)))
+ udelay(1);
+
+ if (!timeout) {
+ dev_warn(host->pdata->dev, "timeout waiting for SD bus idle\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
+{
+ switch (addr)
+ {
+ case CTL_SD_CMD:
+ case CTL_STOP_INTERNAL_ACTION:
+ case CTL_XFER_BLK_COUNT:
+ case CTL_SD_CARD_CLK_CTL:
+ case CTL_SD_XFER_LEN:
+ case CTL_SD_MEM_CARD_OPT:
+ case CTL_TRANSACTION_CTL:
+ case CTL_DMA_ENABLE:
+ return sh_mobile_sdhi_wait_idle(host);
+ }
+
+ return 0;
+}
+
static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
{
struct sh_mobile_sdhi *priv;
@@ -86,6 +120,8 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev)
mmc_data->hclk = clk_get_rate(priv->clk);
mmc_data->set_pwr = sh_mobile_sdhi_set_pwr;
mmc_data->get_cd = sh_mobile_sdhi_get_cd;
+ if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT)
+ mmc_data->write16_hook = sh_mobile_sdhi_write16_hook;
mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED;
if (p) {
mmc_data->flags = p->tmio_flags;
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 8260bc2c34e..087d88023ba 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -18,6 +18,7 @@
#include <linux/highmem.h>
#include <linux/mmc/tmio.h>
+#include <linux/mutex.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
@@ -52,6 +53,8 @@ struct tmio_mmc_host {
void (*set_clk_div)(struct platform_device *host, int state);
int pm_error;
+ /* recognise system-wide suspend in runtime PM methods */
+ bool pm_global;
/* pio related stuff */
struct scatterlist *sg_ptr;
@@ -73,8 +76,11 @@ struct tmio_mmc_host {
/* Track lost interrupts */
struct delayed_work delayed_reset_work;
- spinlock_t lock;
+ struct work_struct done;
+
+ spinlock_t lock; /* protect host private data */
unsigned long last_req_ts;
+ struct mutex ios_lock; /* protect set_ios() context */
};
int tmio_mmc_host_probe(struct tmio_mmc_host **host,
@@ -103,6 +109,7 @@ static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE)
void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
+void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable);
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
void tmio_mmc_release_dma(struct tmio_mmc_host *host);
#else
@@ -111,6 +118,10 @@ static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
{
}
+static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+{
+}
+
static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
struct tmio_mmc_data *pdata)
{
@@ -134,4 +145,44 @@ int tmio_mmc_host_resume(struct device *dev);
int tmio_mmc_host_runtime_suspend(struct device *dev);
int tmio_mmc_host_runtime_resume(struct device *dev);
+static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ readsw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
+{
+ return readw(host->ctl + (addr << host->bus_shift)) |
+ readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
+}
+
+static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
+{
+ /* If there is a hook and it returns non-zero then there
+ * is an error and the write should be skipped
+ */
+ if (host->pdata->write16_hook && host->pdata->write16_hook(host, addr))
+ return;
+ writew(val, host->ctl + (addr << host->bus_shift));
+}
+
+static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
+ u16 *buf, int count)
+{
+ writesw(host->ctl + (addr << host->bus_shift), buf, count);
+}
+
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+ writew(val, host->ctl + (addr << host->bus_shift));
+ writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
+}
+
+
#endif
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index bf12513d629..86f259cdfcb 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -23,11 +23,14 @@
#define TMIO_MMC_MIN_DMA_LEN 8
-static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
+void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{
+ if (!host->chan_tx || !host->chan_rx)
+ return;
+
#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
/* Switch DMA mode on or off - SuperH specific? */
- writew(enable ? 2 : 0, host->ctl + (0xd8 << host->bus_shift));
+ sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0);
#endif
}
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 0b09e8239aa..1f16357e730 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -46,40 +46,6 @@
#include "tmio_mmc.h"
-static u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)
-{
- return readw(host->ctl + (addr << host->bus_shift));
-}
-
-static void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr,
- u16 *buf, int count)
-{
- readsw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)
-{
- return readw(host->ctl + (addr << host->bus_shift)) |
- readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;
-}
-
-static void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val)
-{
- writew(val, host->ctl + (addr << host->bus_shift));
-}
-
-static void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr,
- u16 *buf, int count)
-{
- writesw(host->ctl + (addr << host->bus_shift), buf, count);
-}
-
-static void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
-{
- writew(val, host->ctl + (addr << host->bus_shift));
- writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
-}
-
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
u32 mask = sd_ctrl_read32(host, CTL_IRQ_MASK) & ~(i & TMIO_MASK_IRQ);
@@ -284,10 +250,16 @@ static void tmio_mmc_reset_work(struct work_struct *work)
/* called with host->lock held, interrupts disabled */
static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
{
- struct mmc_request *mrq = host->mrq;
+ struct mmc_request *mrq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
- if (!mrq)
+ mrq = host->mrq;
+ if (IS_ERR_OR_NULL(mrq)) {
+ spin_unlock_irqrestore(&host->lock, flags);
return;
+ }
host->cmd = NULL;
host->data = NULL;
@@ -296,11 +268,18 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
cancel_delayed_work(&host->delayed_reset_work);
host->mrq = NULL;
+ spin_unlock_irqrestore(&host->lock, flags);
- /* FIXME: mmc_request_done() can schedule! */
mmc_request_done(host->mmc, mrq);
}
+static void tmio_mmc_done_work(struct work_struct *work)
+{
+ struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
+ done);
+ tmio_mmc_finish_request(host);
+}
+
/* These are the bitmasks the tmio chip requires to implement the MMC response
* types. Note that R1 and R6 are the same in this scheme. */
#define APP_CMD 0x0040
@@ -467,7 +446,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
BUG();
}
- tmio_mmc_finish_request(host);
+ schedule_work(&host->done);
}
static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
@@ -557,7 +536,7 @@ static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
tasklet_schedule(&host->dma_issue);
}
} else {
- tmio_mmc_finish_request(host);
+ schedule_work(&host->done);
}
out:
@@ -567,6 +546,7 @@ out:
irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
struct tmio_mmc_host *host = devid;
+ struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
unsigned int ireg, irq_mask, status;
unsigned int sdio_ireg, sdio_irq_mask, sdio_status;
@@ -588,13 +568,13 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (sdio_ireg && !host->sdio_irq_enabled) {
pr_warning("tmio_mmc: Spurious SDIO IRQ, disabling! 0x%04x 0x%04x 0x%04x\n",
sdio_status, sdio_irq_mask, sdio_ireg);
- tmio_mmc_enable_sdio_irq(host->mmc, 0);
+ tmio_mmc_enable_sdio_irq(mmc, 0);
goto out;
}
- if (host->mmc->caps & MMC_CAP_SDIO_IRQ &&
+ if (mmc->caps & MMC_CAP_SDIO_IRQ &&
sdio_ireg & TMIO_SDIO_STAT_IOIRQ)
- mmc_signal_sdio_irq(host->mmc);
+ mmc_signal_sdio_irq(mmc);
if (sdio_ireg)
goto out;
@@ -603,58 +583,49 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
pr_debug_status(status);
pr_debug_status(ireg);
- if (!ireg) {
- tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
-
- pr_warning("tmio_mmc: Spurious irq, disabling! "
- "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
- pr_debug_status(status);
-
+ /* Card insert / remove attempts */
+ if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
+ TMIO_STAT_CARD_REMOVE);
+ if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
+ ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
+ !work_pending(&mmc->detect.work))
+ mmc_detect_change(host->mmc, msecs_to_jiffies(100));
goto out;
}
- while (ireg) {
- /* Card insert / remove attempts */
- if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
- tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
- TMIO_STAT_CARD_REMOVE);
- mmc_detect_change(host->mmc, msecs_to_jiffies(100));
- }
-
- /* CRC and other errors */
-/* if (ireg & TMIO_STAT_ERR_IRQ)
- * handled |= tmio_error_irq(host, irq, stat);
+ /* CRC and other errors */
+/* if (ireg & TMIO_STAT_ERR_IRQ)
+ * handled |= tmio_error_irq(host, irq, stat);
*/
- /* Command completion */
- if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
- tmio_mmc_ack_mmc_irqs(host,
- TMIO_STAT_CMDRESPEND |
- TMIO_STAT_CMDTIMEOUT);
- tmio_mmc_cmd_irq(host, status);
- }
-
- /* Data transfer */
- if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
- tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
- tmio_mmc_pio_irq(host);
- }
-
- /* Data transfer completion */
- if (ireg & TMIO_STAT_DATAEND) {
- tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
- tmio_mmc_data_irq(host);
- }
+ /* Command completion */
+ if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
+ tmio_mmc_ack_mmc_irqs(host,
+ TMIO_STAT_CMDRESPEND |
+ TMIO_STAT_CMDTIMEOUT);
+ tmio_mmc_cmd_irq(host, status);
+ goto out;
+ }
- /* Check status - keep going until we've handled it all */
- status = sd_ctrl_read32(host, CTL_STATUS);
- irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK);
- ireg = status & TMIO_MASK_IRQ & ~irq_mask;
+ /* Data transfer */
+ if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
+ tmio_mmc_pio_irq(host);
+ goto out;
+ }
- pr_debug("Status at end of loop: %08x\n", status);
- pr_debug_status(status);
+ /* Data transfer completion */
+ if (ireg & TMIO_STAT_DATAEND) {
+ tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
+ tmio_mmc_data_irq(host);
+ goto out;
}
- pr_debug("MMC IRQ end\n");
+
+ pr_warning("tmio_mmc: Spurious irq, disabling! "
+ "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
+ pr_debug_status(status);
+ tmio_mmc_disable_mmc_irqs(host, status & ~irq_mask);
out:
return IRQ_HANDLED;
@@ -749,6 +720,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
struct tmio_mmc_data *pdata = host->pdata;
unsigned long flags;
+ mutex_lock(&host->ios_lock);
+
spin_lock_irqsave(&host->lock, flags);
if (host->mrq) {
if (IS_ERR(host->mrq)) {
@@ -764,6 +737,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->mrq->cmd->opcode, host->last_req_ts, jiffies);
}
spin_unlock_irqrestore(&host->lock, flags);
+
+ mutex_unlock(&host->ios_lock);
return;
}
@@ -771,33 +746,30 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
spin_unlock_irqrestore(&host->lock, flags);
- if (ios->clock)
- tmio_mmc_set_clock(host, ios->clock);
-
- /* Power sequence - OFF -> UP -> ON */
- if (ios->power_mode == MMC_POWER_UP) {
- if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) && !pdata->power) {
+ /*
+ * pdata->power == false only if COLD_CD is available, otherwise only
+ * in short time intervals during probing or resuming
+ */
+ if (ios->power_mode == MMC_POWER_ON && ios->clock) {
+ if (!pdata->power) {
pm_runtime_get_sync(&host->pdev->dev);
pdata->power = true;
}
+ tmio_mmc_set_clock(host, ios->clock);
/* power up SD bus */
if (host->set_pwr)
host->set_pwr(host->pdev, 1);
- } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
- /* power down SD bus */
- if (ios->power_mode == MMC_POWER_OFF) {
- if (host->set_pwr)
- host->set_pwr(host->pdev, 0);
- if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
- pdata->power) {
- pdata->power = false;
- pm_runtime_put(&host->pdev->dev);
- }
- }
- tmio_mmc_clk_stop(host);
- } else {
/* start bus clock */
tmio_mmc_clk_start(host);
+ } else if (ios->power_mode != MMC_POWER_UP) {
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 0);
+ if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
+ pdata->power) {
+ pdata->power = false;
+ pm_runtime_put(&host->pdev->dev);
+ }
+ tmio_mmc_clk_stop(host);
}
switch (ios->bus_width) {
@@ -817,6 +789,8 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
current->comm, task_pid_nr(current),
ios->clock, ios->power_mode);
host->mrq = NULL;
+
+ mutex_unlock(&host->ios_lock);
}
static int tmio_mmc_get_ro(struct mmc_host *mmc)
@@ -913,16 +887,20 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
tmio_mmc_enable_sdio_irq(mmc, 0);
spin_lock_init(&_host->lock);
+ mutex_init(&_host->ios_lock);
/* Init delayed work for request timeouts */
INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work);
+ INIT_WORK(&_host->done, tmio_mmc_done_work);
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
/* We have to keep the device powered for its card detection to work */
- if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD))
+ if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) {
+ pdata->power = true;
pm_runtime_get_noresume(&pdev->dev);
+ }
mmc_add_host(mmc);
@@ -963,6 +941,7 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
pm_runtime_get_sync(&pdev->dev);
mmc_remove_host(host->mmc);
+ cancel_work_sync(&host->done);
cancel_delayed_work_sync(&host->delayed_reset_work);
tmio_mmc_release_dma(host);
@@ -998,11 +977,16 @@ int tmio_mmc_host_resume(struct device *dev)
/* The MMC core will perform the complete set up */
host->pdata->power = false;
+ host->pm_global = true;
if (!host->pm_error)
pm_runtime_get_sync(dev);
- tmio_mmc_reset(mmc_priv(mmc));
- tmio_mmc_request_dma(host, host->pdata);
+ if (host->pm_global) {
+ /* Runtime PM resume callback didn't run */
+ tmio_mmc_reset(host);
+ tmio_mmc_enable_dma(host, true);
+ host->pm_global = false;
+ }
return mmc_resume_host(mmc);
}
@@ -1023,12 +1007,15 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
struct tmio_mmc_data *pdata = host->pdata;
tmio_mmc_reset(host);
+ tmio_mmc_enable_dma(host, true);
if (pdata->power) {
/* Only entered after a card-insert interrupt */
- tmio_mmc_set_ios(mmc, &mmc->ios);
+ if (!mmc->card)
+ tmio_mmc_set_ios(mmc, &mmc->ios);
mmc_detect_change(mmc, msecs_to_jiffies(100));
}
+ host->pm_global = false;
return 0;
}
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 65626c1c446..6c3fb5ab20f 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -953,10 +953,14 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (!ubi->peb_buf2)
goto out_free;
+ err = ubi_debugging_init_dev(ubi);
+ if (err)
+ goto out_free;
+
err = attach_by_scanning(ubi);
if (err) {
dbg_err("failed to attach by scanning, error %d", err);
- goto out_free;
+ goto out_debugging;
}
if (ubi->autoresize_vol_id != -1) {
@@ -969,12 +973,16 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (err)
goto out_detach;
+ err = ubi_debugfs_init_dev(ubi);
+ if (err)
+ goto out_uif;
+
ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
if (IS_ERR(ubi->bgt_thread)) {
err = PTR_ERR(ubi->bgt_thread);
ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
err);
- goto out_uif;
+ goto out_debugfs;
}
ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
@@ -1008,12 +1016,18 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL);
return ubi_num;
+out_debugfs:
+ ubi_debugfs_exit_dev(ubi);
out_uif:
+ get_device(&ubi->dev);
+ ubi_assert(ref);
uif_close(ubi);
out_detach:
ubi_wl_close(ubi);
free_internal_volumes(ubi);
vfree(ubi->vtbl);
+out_debugging:
+ ubi_debugging_exit_dev(ubi);
out_free:
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
@@ -1080,11 +1094,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
*/
get_device(&ubi->dev);
+ ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
ubi_wl_close(ubi);
free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
+ ubi_debugging_exit_dev(ubi);
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
@@ -1199,6 +1215,11 @@ static int __init ubi_init(void)
if (!ubi_wl_entry_slab)
goto out_dev_unreg;
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
+
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
struct mtd_dev_param *p = &mtd_dev_param[i];
@@ -1247,6 +1268,8 @@ out_detach:
ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
mutex_unlock(&ubi_devices_mutex);
}
+ ubi_debugfs_exit();
+out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
misc_deregister(&ubi_ctrl_cdev);
@@ -1270,6 +1293,7 @@ static void __exit ubi_exit(void)
ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
mutex_unlock(&ubi_devices_mutex);
}
+ ubi_debugfs_exit();
kmem_cache_destroy(ubi_wl_entry_slab);
misc_deregister(&ubi_ctrl_cdev);
class_remove_file(ubi_class, &ubi_version);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 2224cbe41dd..ab80c0debac 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -27,17 +27,9 @@
#ifdef CONFIG_MTD_UBI_DEBUG
#include "ubi.h"
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-unsigned int ubi_chk_flags;
-unsigned int ubi_tst_flags;
-
-module_param_named(debug_chks, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
-module_param_named(debug_tsts, ubi_chk_flags, uint, S_IRUGO | S_IWUSR);
-
-MODULE_PARM_DESC(debug_chks, "Debug check flags");
-MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
/**
* ubi_dbg_dump_ec_hdr - dump an erase counter header.
@@ -239,4 +231,261 @@ out:
return;
}
+/**
+ * ubi_debugging_init_dev - initialize debugging for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function initializes debugging-related data for UBI device @ubi.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubi_debugging_init_dev(struct ubi_device *ubi)
+{
+ ubi->dbg = kzalloc(sizeof(struct ubi_debug_info), GFP_KERNEL);
+ if (!ubi->dbg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * ubi_debugging_exit_dev - free debugging data for an UBI device.
+ * @ubi: UBI device description object
+ */
+void ubi_debugging_exit_dev(struct ubi_device *ubi)
+{
+ kfree(ubi->dbg);
+}
+
+/*
+ * Root directory for UBI stuff in debugfs. Contains sub-directories which
+ * contain the stuff specific to particular UBI devices.
+ */
+static struct dentry *dfs_rootdir;
+
+/**
+ * ubi_debugfs_init - create UBI debugfs directory.
+ *
+ * Create UBI debugfs directory. Returns zero in case of success and a negative
+ * error code in case of failure.
+ */
+int ubi_debugfs_init(void)
+{
+ dfs_rootdir = debugfs_create_dir("ubi", NULL);
+ if (IS_ERR_OR_NULL(dfs_rootdir)) {
+ int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
+
+ ubi_err("cannot create \"ubi\" debugfs directory, error %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ubi_debugfs_exit - remove UBI debugfs directory.
+ */
+void ubi_debugfs_exit(void)
+{
+ debugfs_remove(dfs_rootdir);
+}
+
+/* Read an UBI debugfs file */
+static ssize_t dfs_file_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ char buf[3];
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = ubi->dbg;
+
+ if (dent == d->dfs_chk_gen)
+ val = d->chk_gen;
+ else if (dent == d->dfs_chk_io)
+ val = d->chk_io;
+ else if (dent == d->dfs_disable_bgt)
+ val = d->disable_bgt;
+ else if (dent == d->dfs_emulate_bitflips)
+ val = d->emulate_bitflips;
+ else if (dent == d->dfs_emulate_io_failures)
+ val = d->emulate_io_failures;
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (val)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ count = simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+/* Write an UBI debugfs file */
+static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long ubi_num = (unsigned long)file->private_data;
+ struct dentry *dent = file->f_path.dentry;
+ struct ubi_device *ubi;
+ struct ubi_debug_info *d;
+ size_t buf_size;
+ char buf[8];
+ int val;
+
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return -ENODEV;
+ d = ubi->dbg;
+
+ buf_size = min_t(size_t, count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size)) {
+ count = -EFAULT;
+ goto out;
+ }
+
+ if (buf[0] == '1')
+ val = 1;
+ else if (buf[0] == '0')
+ val = 0;
+ else {
+ count = -EINVAL;
+ goto out;
+ }
+
+ if (dent == d->dfs_chk_gen)
+ d->chk_gen = val;
+ else if (dent == d->dfs_chk_io)
+ d->chk_io = val;
+ else if (dent == d->dfs_disable_bgt)
+ d->disable_bgt = val;
+ else if (dent == d->dfs_emulate_bitflips)
+ d->emulate_bitflips = val;
+ else if (dent == d->dfs_emulate_io_failures)
+ d->emulate_io_failures = val;
+ else
+ count = -EINVAL;
+
+out:
+ ubi_put_device(ubi);
+ return count;
+}
+
+static int default_open(struct inode *inode, struct file *file)
+{
+ if (inode->i_private)
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+/* File operations for all UBI debugfs files */
+static const struct file_operations dfs_fops = {
+ .read = dfs_file_read,
+ .write = dfs_file_write,
+ .open = default_open,
+ .llseek = no_llseek,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * ubi_debugfs_init_dev - initialize debugfs for an UBI device.
+ * @ubi: UBI device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+int ubi_debugfs_init_dev(struct ubi_device *ubi)
+{
+ int err, n;
+ unsigned long ubi_num = ubi->ubi_num;
+ const char *fname;
+ struct dentry *dent;
+ struct ubi_debug_info *d = ubi->dbg;
+
+ n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
+ ubi->ubi_num);
+ if (n == UBI_DFS_DIR_LEN) {
+ /* The array size is too small */
+ fname = UBI_DFS_DIR_NAME;
+ dent = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ fname = d->dfs_dir_name;
+ dent = debugfs_create_dir(fname, dfs_rootdir);
+ if (IS_ERR_OR_NULL(dent))
+ goto out;
+ d->dfs_dir = dent;
+
+ fname = "chk_gen";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_gen = dent;
+
+ fname = "chk_io";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_io = dent;
+
+ fname = "tst_disable_bgt";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_disable_bgt = dent;
+
+ fname = "tst_emulate_bitflips";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_bitflips = dent;
+
+ fname = "tst_emulate_io_failures";
+ dent = debugfs_create_file(fname, S_IWUSR, d->dfs_dir, (void *)ubi_num,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_emulate_io_failures = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(d->dfs_dir);
+out:
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ ubi_err("cannot create \"%s\" debugfs file or directory, error %d\n",
+ fname, err);
+ return err;
+}
+
+/**
+ * dbg_debug_exit_dev - free all debugfs files corresponding to device @ubi
+ * @ubi: UBI device description object
+ */
+void ubi_debugfs_exit_dev(struct ubi_device *ubi)
+{
+ debugfs_remove_recursive(ubi->dbg->dfs_dir);
+}
+
#endif /* CONFIG_MTD_UBI_DEBUG */
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 3f1a09c5c43..65b5b76cc37 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -21,14 +21,6 @@
#ifndef __UBI_DEBUG_H__
#define __UBI_DEBUG_H__
-struct ubi_ec_hdr;
-struct ubi_vid_hdr;
-struct ubi_volume;
-struct ubi_vtbl_record;
-struct ubi_scan_volume;
-struct ubi_scan_leb;
-struct ubi_mkvol_req;
-
#ifdef CONFIG_MTD_UBI_DEBUG
#include <linux/random.h>
@@ -71,86 +63,103 @@ void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
-
-extern unsigned int ubi_chk_flags;
-
-/*
- * Debugging check flags.
- *
- * UBI_CHK_GEN: general checks
- * UBI_CHK_IO: check writes and erases
- */
-enum {
- UBI_CHK_GEN = 0x1,
- UBI_CHK_IO = 0x2,
-};
-
int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len);
-
-extern unsigned int ubi_tst_flags;
+int ubi_debugging_init_dev(struct ubi_device *ubi);
+void ubi_debugging_exit_dev(struct ubi_device *ubi);
+int ubi_debugfs_init(void);
+void ubi_debugfs_exit(void);
+int ubi_debugfs_init_dev(struct ubi_device *ubi);
+void ubi_debugfs_exit_dev(struct ubi_device *ubi);
/*
- * Special testing flags.
+ * The UBI debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 2 for the number plus 1 for the trailing zero byte.
+ */
+#define UBI_DFS_DIR_NAME "ubi%d"
+#define UBI_DFS_DIR_LEN (3 + 2 + 1)
+
+/**
+ * struct ubi_debug_info - debugging information for an UBI device.
*
- * UBIFS_TST_DISABLE_BGT: disable the background thread
- * UBI_TST_EMULATE_BITFLIPS: emulate bit-flips
- * UBI_TST_EMULATE_WRITE_FAILURES: emulate write failures
- * UBI_TST_EMULATE_ERASE_FAILURES: emulate erase failures
+ * @chk_gen: if UBI general extra checks are enabled
+ * @chk_io: if UBI I/O extra checks are enabled
+ * @disable_bgt: disable the background task for testing purposes
+ * @emulate_bitflips: emulate bit-flips for testing purposes
+ * @emulate_io_failures: emulate write/erase failures for testing purposes
+ * @dfs_dir_name: name of debugfs directory containing files of this UBI device
+ * @dfs_dir: direntry object of the UBI device debugfs directory
+ * @dfs_chk_gen: debugfs knob to enable UBI general extra checks
+ * @dfs_chk_io: debugfs knob to enable UBI I/O extra checks
+ * @dfs_disable_bgt: debugfs knob to disable the background task
+ * @dfs_emulate_bitflips: debugfs knob to emulate bit-flips
+ * @dfs_emulate_io_failures: debugfs knob to emulate write/erase failures
*/
-enum {
- UBI_TST_DISABLE_BGT = 0x1,
- UBI_TST_EMULATE_BITFLIPS = 0x2,
- UBI_TST_EMULATE_WRITE_FAILURES = 0x4,
- UBI_TST_EMULATE_ERASE_FAILURES = 0x8,
+struct ubi_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_io:1;
+ unsigned int disable_bgt:1;
+ unsigned int emulate_bitflips:1;
+ unsigned int emulate_io_failures:1;
+ char dfs_dir_name[UBI_DFS_DIR_LEN + 1];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_io;
+ struct dentry *dfs_disable_bgt;
+ struct dentry *dfs_emulate_bitflips;
+ struct dentry *dfs_emulate_io_failures;
};
/**
* ubi_dbg_is_bgt_disabled - if the background thread is disabled.
+ * @ubi: UBI device description object
*
* Returns non-zero if the UBI background thread is disabled for testing
* purposes.
*/
-static inline int ubi_dbg_is_bgt_disabled(void)
+static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
{
- return ubi_tst_flags & UBI_TST_DISABLE_BGT;
+ return ubi->dbg->disable_bgt;
}
/**
* ubi_dbg_is_bitflip - if it is time to emulate a bit-flip.
+ * @ubi: UBI device description object
*
* Returns non-zero if a bit-flip should be emulated, otherwise returns zero.
*/
-static inline int ubi_dbg_is_bitflip(void)
+static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
- if (ubi_tst_flags & UBI_TST_EMULATE_BITFLIPS)
+ if (ubi->dbg->emulate_bitflips)
return !(random32() % 200);
return 0;
}
/**
* ubi_dbg_is_write_failure - if it is time to emulate a write failure.
+ * @ubi: UBI device description object
*
* Returns non-zero if a write failure should be emulated, otherwise returns
* zero.
*/
-static inline int ubi_dbg_is_write_failure(void)
+static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
- if (ubi_tst_flags & UBI_TST_EMULATE_WRITE_FAILURES)
+ if (ubi->dbg->emulate_io_failures)
return !(random32() % 500);
return 0;
}
/**
* ubi_dbg_is_erase_failure - if its time to emulate an erase failure.
+ * @ubi: UBI device description object
*
* Returns non-zero if an erase failure should be emulated, otherwise returns
* zero.
*/
-static inline int ubi_dbg_is_erase_failure(void)
+static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
- if (ubi_tst_flags & UBI_TST_EMULATE_ERASE_FAILURES)
+ if (ubi->dbg->emulate_io_failures)
return !(random32() % 400);
return 0;
}
@@ -201,11 +210,6 @@ static inline void ubi_dbg_dump_flash(struct ubi_device *ubi,
static inline void
ubi_dbg_print_hex_dump(const char *l, const char *ps, int pt, int r,
int g, const void *b, size_t len, bool a) { return; }
-
-static inline int ubi_dbg_is_bgt_disabled(void) { return 0; }
-static inline int ubi_dbg_is_bitflip(void) { return 0; }
-static inline int ubi_dbg_is_write_failure(void) { return 0; }
-static inline int ubi_dbg_is_erase_failure(void) { return 0; }
static inline int ubi_dbg_check_all_ff(struct ubi_device *ubi,
int pnum, int offset,
int len) { return 0; }
@@ -213,5 +217,20 @@ static inline int ubi_dbg_check_write(struct ubi_device *ubi,
const void *buf, int pnum,
int offset, int len) { return 0; }
+static inline int ubi_debugging_init_dev(struct ubi_device *ubi) { return 0; }
+static inline void ubi_debugging_exit_dev(struct ubi_device *ubi) { return; }
+static inline int ubi_debugfs_init(void) { return 0; }
+static inline void ubi_debugfs_exit(void) { return; }
+static inline int ubi_debugfs_init_dev(struct ubi_device *ubi) { return 0; }
+static inline void ubi_debugfs_exit_dev(struct ubi_device *ubi) { return; }
+
+static inline int
+ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi) { return 0; }
+static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi) { return 0; }
+static inline int
+ubi_dbg_is_write_failure(const struct ubi_device *ubi) { return 0; }
+static inline int
+ubi_dbg_is_erase_failure(const struct ubi_device *ubi) { return 0; }
+
#endif /* !CONFIG_MTD_UBI_DEBUG */
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 8c1b1c7bc4a..6ba55c23587 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -212,7 +212,7 @@ retry:
} else {
ubi_assert(len == read);
- if (ubi_dbg_is_bitflip()) {
+ if (ubi_dbg_is_bitflip(ubi)) {
dbg_gen("bit-flip (emulated)");
err = UBI_IO_BITFLIPS;
}
@@ -281,7 +281,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
return err;
}
- if (ubi_dbg_is_write_failure()) {
+ if (ubi_dbg_is_write_failure(ubi)) {
dbg_err("cannot write %d bytes to PEB %d:%d "
"(emulated)", len, pnum, offset);
ubi_dbg_dump_stack();
@@ -396,7 +396,7 @@ retry:
if (err)
return err;
- if (ubi_dbg_is_erase_failure()) {
+ if (ubi_dbg_is_erase_failure(ubi)) {
dbg_err("cannot erase PEB %d (emulated)", pnum);
return -EIO;
}
@@ -1146,7 +1146,7 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
{
int err;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
err = ubi_io_is_bad(ubi, pnum);
@@ -1173,7 +1173,7 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
magic = be32_to_cpu(ec_hdr->magic);
@@ -1211,7 +1211,7 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1255,7 +1255,7 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
int err;
uint32_t magic;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
magic = be32_to_cpu(vid_hdr->magic);
@@ -1296,7 +1296,7 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr *vid_hdr;
void *p;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
@@ -1348,7 +1348,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
void *buf1;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
@@ -1412,7 +1412,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
- if (!(ubi_chk_flags & UBI_CHK_IO))
+ if (!ubi->dbg->chk_io)
return 0;
buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 2135a53732f..a3a198f9b98 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -1347,7 +1347,7 @@ static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
struct ubi_scan_leb *seb, *last_seb;
uint8_t *buf;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return 0;
/*
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index c6c22295898..dc64c767fd2 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -44,7 +44,6 @@
#include "ubi-media.h"
#include "scan.h"
-#include "debug.h"
/* Maximum number of supported UBI devices */
#define UBI_MAX_DEVICES 32
@@ -390,6 +389,8 @@ struct ubi_wl_entry;
* @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: protects @peb_buf1 and @peb_buf2
* @ckvol_mutex: serializes static volume checking when opening
+ *
+ * @dbg: debugging information for this UBI device
*/
struct ubi_device {
struct cdev cdev;
@@ -472,8 +473,12 @@ struct ubi_device {
void *peb_buf2;
struct mutex buf_mutex;
struct mutex ckvol_mutex;
+
+ struct ubi_debug_info *dbg;
};
+#include "debug.h"
+
extern struct kmem_cache *ubi_wl_entry_slab;
extern const struct file_operations ubi_ctrl_cdev_operations;
extern const struct file_operations ubi_cdev_operations;
@@ -662,6 +667,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
if (!ubi->ro_mode) {
ubi->ro_mode = 1;
ubi_warn("switch to read-only mode");
+ ubi_dbg_dump_stack();
}
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 366eb70219a..97e093d1967 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -871,7 +871,7 @@ static int paranoid_check_volumes(struct ubi_device *ubi)
{
int i, err = 0;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return 0;
for (i = 0; i < ubi->vtbl_slots; i++) {
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index fd3bf770f51..4b50a3029b8 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -307,8 +307,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
{
int err, tries = 0;
static struct ubi_vid_hdr *vid_hdr;
- struct ubi_scan_volume *sv;
- struct ubi_scan_leb *new_seb, *old_seb = NULL;
+ struct ubi_scan_leb *new_seb;
ubi_msg("create volume table (copy #%d)", copy + 1);
@@ -316,15 +315,6 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
if (!vid_hdr)
return -ENOMEM;
- /*
- * Check if there is a logical eraseblock which would have to contain
- * this volume table copy was found during scanning. It has to be wiped
- * out.
- */
- sv = ubi_scan_find_sv(si, UBI_LAYOUT_VOLUME_ID);
- if (sv)
- old_seb = ubi_scan_find_seb(sv, copy);
-
retry:
new_seb = ubi_scan_get_free_peb(ubi, si);
if (IS_ERR(new_seb)) {
@@ -351,8 +341,8 @@ retry:
goto write_error;
/*
- * And add it to the scanning information. Don't delete the old
- * @old_seb as it will be deleted and freed in 'ubi_scan_add_used()'.
+ * And add it to the scanning information. Don't delete the old version
+ * of this LEB as it will be deleted and freed in 'ubi_scan_add_used()'.
*/
err = ubi_scan_add_used(ubi, si, new_seb->pnum, new_seb->ec,
vid_hdr, 0);
@@ -876,7 +866,7 @@ out_free:
*/
static void paranoid_vtbl_check(const struct ubi_device *ubi)
{
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return;
if (vtbl_check(ubi, ubi->vtbl)) {
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index ff2c4956eef..42c684cf368 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1,4 +1,5 @@
/*
+ * @ubi: UBI device description object
* Copyright (c) International Business Machines Corp., 2006
*
* This program is free software; you can redistribute it and/or modify
@@ -163,12 +164,14 @@ struct ubi_work {
#ifdef CONFIG_MTD_UBI_DEBUG
static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
-static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
+static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
struct rb_root *root);
-static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
+static int paranoid_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e);
#else
#define paranoid_check_ec(ubi, pnum, ec) 0
-#define paranoid_check_in_wl_tree(e, root)
+#define paranoid_check_in_wl_tree(ubi, e, root)
#define paranoid_check_in_pq(ubi, e) 0
#endif
@@ -449,7 +452,7 @@ retry:
BUG();
}
- paranoid_check_in_wl_tree(e, &ubi->free);
+ paranoid_check_in_wl_tree(ubi, e, &ubi->free);
/*
* Move the physical eraseblock to the protection queue where it will
@@ -613,7 +616,7 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
list_add_tail(&wrk->list, &ubi->works);
ubi_assert(ubi->works_count >= 0);
ubi->works_count += 1;
- if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled())
+ if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
wake_up_process(ubi->bgt_thread);
spin_unlock(&ubi->wl_lock);
}
@@ -712,7 +715,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
e1->ec, e2->ec);
goto out_cancel;
}
- paranoid_check_in_wl_tree(e1, &ubi->used);
+ paranoid_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
@@ -721,12 +724,12 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
- paranoid_check_in_wl_tree(e1, &ubi->scrub);
+ paranoid_check_in_wl_tree(ubi, e1, &ubi->scrub);
rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
- paranoid_check_in_wl_tree(e2, &ubi->free);
+ paranoid_check_in_wl_tree(ubi, e2, &ubi->free);
rb_erase(&e2->u.rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
@@ -1169,13 +1172,13 @@ retry:
return 0;
} else {
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(e, &ubi->used);
+ paranoid_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) {
- paranoid_check_in_wl_tree(e, &ubi->scrub);
+ paranoid_check_in_wl_tree(ubi, e, &ubi->scrub);
rb_erase(&e->u.rb, &ubi->scrub);
} else if (in_wl_tree(e, &ubi->erroneous)) {
- paranoid_check_in_wl_tree(e, &ubi->erroneous);
+ paranoid_check_in_wl_tree(ubi, e, &ubi->erroneous);
rb_erase(&e->u.rb, &ubi->erroneous);
ubi->erroneous_peb_count -= 1;
ubi_assert(ubi->erroneous_peb_count >= 0);
@@ -1242,7 +1245,7 @@ retry:
}
if (in_wl_tree(e, &ubi->used)) {
- paranoid_check_in_wl_tree(e, &ubi->used);
+ paranoid_check_in_wl_tree(ubi, e, &ubi->used);
rb_erase(&e->u.rb, &ubi->used);
} else {
int err;
@@ -1364,7 +1367,7 @@ int ubi_thread(void *u)
spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works) || ubi->ro_mode ||
- !ubi->thread_enabled || ubi_dbg_is_bgt_disabled()) {
+ !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&ubi->wl_lock);
schedule();
@@ -1579,7 +1582,7 @@ static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return 0;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1609,16 +1612,18 @@ out_free:
/**
* paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
+ * @ubi: UBI device description object
* @e: the wear-leveling entry to check
* @root: the root of the tree
*
* This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
* is not.
*/
-static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
+static int paranoid_check_in_wl_tree(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e,
struct rb_root *root)
{
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return 0;
if (in_wl_tree(e, root))
@@ -1638,12 +1643,13 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
*
* This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
*/
-static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
+static int paranoid_check_in_pq(const struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
{
struct ubi_wl_entry *p;
int i;
- if (!(ubi_chk_flags & UBI_CHK_GEN))
+ if (!ubi->dbg->chk_gen)
return 0;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
diff --git a/drivers/net/bna/bfa_cee.c b/drivers/net/bna/bfa_cee.c
index dcfbf08bcf4..39e5ab9fde5 100644
--- a/drivers/net/bna/bfa_cee.c
+++ b/drivers/net/bna/bfa_cee.c
@@ -223,44 +223,56 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
}
/**
- * bfa_cee_hbfail()
+ * bfa_cee_notify()
*
* @brief CEE module heart-beat failure handler.
+ * @brief CEE module IOC event handler.
*
- * @param[in] Pointer to the CEE module data structure.
+ * @param[in] IOC event type
*
* @return void
*/
static void
-bfa_cee_hbfail(void *arg)
+bfa_cee_notify(void *arg, enum bfa_ioc_event event)
{
struct bfa_cee *cee;
- cee = arg;
+ cee = (struct bfa_cee *) arg;
- if (cee->get_attr_pending == true) {
- cee->get_attr_status = BFA_STATUS_FAILED;
- cee->get_attr_pending = false;
- if (cee->cbfn.get_attr_cbfn) {
- cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
- BFA_STATUS_FAILED);
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (cee->get_attr_pending == true) {
+ cee->get_attr_status = BFA_STATUS_FAILED;
+ cee->get_attr_pending = false;
+ if (cee->cbfn.get_attr_cbfn) {
+ cee->cbfn.get_attr_cbfn(
+ cee->cbfn.get_attr_cbarg,
+ BFA_STATUS_FAILED);
+ }
}
- }
- if (cee->get_stats_pending == true) {
- cee->get_stats_status = BFA_STATUS_FAILED;
- cee->get_stats_pending = false;
- if (cee->cbfn.get_stats_cbfn) {
- cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
- BFA_STATUS_FAILED);
+ if (cee->get_stats_pending == true) {
+ cee->get_stats_status = BFA_STATUS_FAILED;
+ cee->get_stats_pending = false;
+ if (cee->cbfn.get_stats_cbfn) {
+ cee->cbfn.get_stats_cbfn(
+ cee->cbfn.get_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
}
- }
- if (cee->reset_stats_pending == true) {
- cee->reset_stats_status = BFA_STATUS_FAILED;
- cee->reset_stats_pending = false;
- if (cee->cbfn.reset_stats_cbfn) {
- cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
- BFA_STATUS_FAILED);
+ if (cee->reset_stats_pending == true) {
+ cee->reset_stats_status = BFA_STATUS_FAILED;
+ cee->reset_stats_pending = false;
+ if (cee->cbfn.reset_stats_cbfn) {
+ cee->cbfn.reset_stats_cbfn(
+ cee->cbfn.reset_stats_cbarg,
+ BFA_STATUS_FAILED);
+ }
}
+ break;
+
+ default:
+ break;
}
}
@@ -286,6 +298,7 @@ bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc,
cee->ioc = ioc;
bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
- bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
- bfa_nw_ioc_hbfail_register(cee->ioc, &cee->hbfail);
+ bfa_q_qe_init(&cee->ioc_notify);
+ bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee);
+ bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify);
}
diff --git a/drivers/net/bna/bfa_cee.h b/drivers/net/bna/bfa_cee.h
index 20543d15b64..58d54e98d59 100644
--- a/drivers/net/bna/bfa_cee.h
+++ b/drivers/net/bna/bfa_cee.h
@@ -25,7 +25,6 @@
typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status);
typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status);
typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status);
-typedef void (*bfa_cee_hbfail_cbfn_t) (void *dev, enum bfa_status status);
struct bfa_cee_cbfn {
bfa_cee_get_attr_cbfn_t get_attr_cbfn;
@@ -45,7 +44,7 @@ struct bfa_cee {
enum bfa_status get_stats_status;
enum bfa_status reset_stats_status;
struct bfa_cee_cbfn cbfn;
- struct bfa_ioc_hbfail_notify hbfail;
+ struct bfa_ioc_notify ioc_notify;
struct bfa_cee_attr *attr;
struct bfa_cee_stats *stats;
struct bfa_dma attr_dma;
diff --git a/drivers/net/bna/bfa_sm.h b/drivers/net/bna/bfa_cs.h
index 46462c49b6f..3da1a946ccd 100644
--- a/drivers/net/bna/bfa_sm.h
+++ b/drivers/net/bna/bfa_cs.h
@@ -11,20 +11,24 @@
* General Public License for more details.
*/
/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
/**
- * @file bfasm.h State machine defines
+ * @file bfa_cs.h BFA common services
*/
-#ifndef __BFA_SM_H__
-#define __BFA_SM_H__
+#ifndef __BFA_CS_H__
+#define __BFA_CS_H__
#include "cna.h"
+/**
+ * @ BFA state machine interfaces
+ */
+
typedef void (*bfa_sm_t)(void *sm, int event);
/**
@@ -33,7 +37,7 @@ typedef void (*bfa_sm_t)(void *sm, int event);
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
*/
-#define bfa_sm_state_decl(oc, st, otype, etype) \
+#define bfa_sm_state_decl(oc, st, otype, etype) \
static void oc ## _sm_ ## st(otype * fsm, etype event)
#define bfa_sm_set_state(_sm, _state) ((_sm)->sm = (bfa_sm_t)(_state))
@@ -49,7 +53,7 @@ struct bfa_sm_table {
int state; /*!< state machine encoding */
char *name; /*!< state name for display */
};
-#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
+#define BFA_SM(_sm) ((bfa_sm_t)(_sm))
/**
* State machine with entry actions.
@@ -62,18 +66,18 @@ typedef void (*bfa_fsm_t)(void *fsm, int event);
* otype - object type, eg. struct bfa_ioc
* etype - object type, eg. enum ioc_event
*/
-#define bfa_fsm_state_decl(oc, st, otype, etype) \
- static void oc ## _sm_ ## st(otype * fsm, etype event); \
+#define bfa_fsm_state_decl(oc, st, otype, etype) \
+ static void oc ## _sm_ ## st(otype * fsm, etype event); \
static void oc ## _sm_ ## st ## _entry(otype * fsm)
-#define bfa_fsm_set_state(_fsm, _state) do { \
- (_fsm)->fsm = (bfa_fsm_t)(_state); \
- _state ## _entry(_fsm); \
+#define bfa_fsm_set_state(_fsm, _state) do { \
+ (_fsm)->fsm = (bfa_fsm_t)(_state); \
+ _state ## _entry(_fsm); \
} while (0)
#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event)))
#define bfa_fsm_get_state(_fsm) ((_fsm)->fsm)
-#define bfa_fsm_cmp_state(_fsm, _state) \
+#define bfa_fsm_cmp_state(_fsm, _state) \
((_fsm)->fsm == (bfa_fsm_t)(_state))
static inline int
@@ -85,4 +89,52 @@ bfa_sm_to_state(const struct bfa_sm_table *smt, bfa_sm_t sm)
i++;
return smt[i].state;
}
-#endif
+
+/**
+ * @ Generic wait counter.
+ */
+
+typedef void (*bfa_wc_resume_t) (void *cbarg);
+
+struct bfa_wc {
+ bfa_wc_resume_t wc_resume;
+ void *wc_cbarg;
+ int wc_count;
+};
+
+static inline void
+bfa_wc_up(struct bfa_wc *wc)
+{
+ wc->wc_count++;
+}
+
+static inline void
+bfa_wc_down(struct bfa_wc *wc)
+{
+ wc->wc_count--;
+ if (wc->wc_count == 0)
+ wc->wc_resume(wc->wc_cbarg);
+}
+
+/**
+ * Initialize a waiting counter.
+ */
+static inline void
+bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
+{
+ wc->wc_resume = wc_resume;
+ wc->wc_cbarg = wc_cbarg;
+ wc->wc_count = 0;
+ bfa_wc_up(wc);
+}
+
+/**
+ * Wait for counter to reach zero
+ */
+static inline void
+bfa_wc_wait(struct bfa_wc *wc)
+{
+ bfa_wc_down(wc);
+}
+
+#endif /* __BFA_CS_H__ */
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h
index 2ea0dfe1ced..b080b3698f4 100644
--- a/drivers/net/bna/bfa_defs.h
+++ b/drivers/net/bna/bfa_defs.h
@@ -80,7 +80,7 @@ struct bfa_adapter_attr {
enum {
BFA_IOC_DRIVER_LEN = 16,
- BFA_IOC_CHIP_REV_LEN = 8,
+ BFA_IOC_CHIP_REV_LEN = 8,
};
/**
@@ -153,6 +153,7 @@ struct bfa_ioc_drv_stats {
u32 enable_reqs;
u32 disable_replies;
u32 enable_replies;
+ u32 rsvd;
};
/**
@@ -174,7 +175,7 @@ enum bfa_ioc_type {
*/
struct bfa_ioc_attr {
enum bfa_ioc_type ioc_type;
- enum bfa_ioc_state state; /*!< IOC state */
+ enum bfa_ioc_state state; /*!< IOC state */
struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */
struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */
struct bfa_ioc_pci_attr pci_attr;
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
index fdd67761836..885ef3afdd4 100644
--- a/drivers/net/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/bna/bfa_defs_mfg_comm.h
@@ -192,14 +192,14 @@ do { \
* VPD vendor tag
*/
enum {
- BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
- BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
- BFA_MFG_VPD_HP = 2, /*!< vendor HP */
- BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
- BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
- BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
- BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
- BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
+ BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */
+ BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */
+ BFA_MFG_VPD_HP = 2, /*!< vendor HP */
+ BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */
+ BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */
+ BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */
+ BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */
+ BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */
};
/**
@@ -212,8 +212,8 @@ struct bfa_mfg_vpd {
u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */
u8 chksum; /*!< u8 checksum */
u8 vendor; /*!< vendor */
- u8 len; /*!< vpd data length excluding header */
- u8 rsv;
+ u8 len; /*!< vpd data length excluding header */
+ u8 rsv;
u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */
};
diff --git a/drivers/net/bna/bfa_defs_status.h b/drivers/net/bna/bfa_defs_status.h
index af951126375..7c5fe6c2e80 100644
--- a/drivers/net/bna/bfa_defs_status.h
+++ b/drivers/net/bna/bfa_defs_status.h
@@ -25,95 +25,95 @@
* comments are supported
*/
enum bfa_status {
- BFA_STATUS_OK = 0,
- BFA_STATUS_FAILED = 1,
- BFA_STATUS_EINVAL = 2,
- BFA_STATUS_ENOMEM = 3,
- BFA_STATUS_ENOSYS = 4,
- BFA_STATUS_ETIMER = 5,
- BFA_STATUS_EPROTOCOL = 6,
- BFA_STATUS_ENOFCPORTS = 7,
- BFA_STATUS_NOFLASH = 8,
- BFA_STATUS_BADFLASH = 9,
- BFA_STATUS_SFP_UNSUPP = 10,
+ BFA_STATUS_OK = 0,
+ BFA_STATUS_FAILED = 1,
+ BFA_STATUS_EINVAL = 2,
+ BFA_STATUS_ENOMEM = 3,
+ BFA_STATUS_ENOSYS = 4,
+ BFA_STATUS_ETIMER = 5,
+ BFA_STATUS_EPROTOCOL = 6,
+ BFA_STATUS_ENOFCPORTS = 7,
+ BFA_STATUS_NOFLASH = 8,
+ BFA_STATUS_BADFLASH = 9,
+ BFA_STATUS_SFP_UNSUPP = 10,
BFA_STATUS_UNKNOWN_VFID = 11,
BFA_STATUS_DATACORRUPTED = 12,
- BFA_STATUS_DEVBUSY = 13,
- BFA_STATUS_ABORTED = 14,
- BFA_STATUS_NODEV = 15,
- BFA_STATUS_HDMA_FAILED = 16,
+ BFA_STATUS_DEVBUSY = 13,
+ BFA_STATUS_ABORTED = 14,
+ BFA_STATUS_NODEV = 15,
+ BFA_STATUS_HDMA_FAILED = 16,
BFA_STATUS_FLASH_BAD_LEN = 17,
BFA_STATUS_UNKNOWN_LWWN = 18,
BFA_STATUS_UNKNOWN_RWWN = 19,
- BFA_STATUS_FCPT_LS_RJT = 20,
+ BFA_STATUS_FCPT_LS_RJT = 20,
BFA_STATUS_VPORT_EXISTS = 21,
- BFA_STATUS_VPORT_MAX = 22,
+ BFA_STATUS_VPORT_MAX = 22,
BFA_STATUS_UNSUPP_SPEED = 23,
- BFA_STATUS_INVLD_DFSZ = 24,
- BFA_STATUS_CNFG_FAILED = 25,
- BFA_STATUS_CMD_NOTSUPP = 26,
- BFA_STATUS_NO_ADAPTER = 27,
- BFA_STATUS_LINKDOWN = 28,
- BFA_STATUS_FABRIC_RJT = 29,
+ BFA_STATUS_INVLD_DFSZ = 24,
+ BFA_STATUS_CNFG_FAILED = 25,
+ BFA_STATUS_CMD_NOTSUPP = 26,
+ BFA_STATUS_NO_ADAPTER = 27,
+ BFA_STATUS_LINKDOWN = 28,
+ BFA_STATUS_FABRIC_RJT = 29,
BFA_STATUS_UNKNOWN_VWWN = 30,
BFA_STATUS_NSLOGIN_FAILED = 31,
- BFA_STATUS_NO_RPORTS = 32,
+ BFA_STATUS_NO_RPORTS = 32,
BFA_STATUS_NSQUERY_FAILED = 33,
BFA_STATUS_PORT_OFFLINE = 34,
BFA_STATUS_RPORT_OFFLINE = 35,
BFA_STATUS_TGTOPEN_FAILED = 36,
- BFA_STATUS_BAD_LUNS = 37,
- BFA_STATUS_IO_FAILURE = 38,
- BFA_STATUS_NO_FABRIC = 39,
- BFA_STATUS_EBADF = 40,
- BFA_STATUS_EINTR = 41,
- BFA_STATUS_EIO = 42,
- BFA_STATUS_ENOTTY = 43,
- BFA_STATUS_ENXIO = 44,
- BFA_STATUS_EFOPEN = 45,
+ BFA_STATUS_BAD_LUNS = 37,
+ BFA_STATUS_IO_FAILURE = 38,
+ BFA_STATUS_NO_FABRIC = 39,
+ BFA_STATUS_EBADF = 40,
+ BFA_STATUS_EINTR = 41,
+ BFA_STATUS_EIO = 42,
+ BFA_STATUS_ENOTTY = 43,
+ BFA_STATUS_ENXIO = 44,
+ BFA_STATUS_EFOPEN = 45,
BFA_STATUS_VPORT_WWN_BP = 46,
BFA_STATUS_PORT_NOT_DISABLED = 47,
- BFA_STATUS_BADFRMHDR = 48,
- BFA_STATUS_BADFRMSZ = 49,
- BFA_STATUS_MISSINGFRM = 50,
- BFA_STATUS_LINKTIMEOUT = 51,
+ BFA_STATUS_BADFRMHDR = 48,
+ BFA_STATUS_BADFRMSZ = 49,
+ BFA_STATUS_MISSINGFRM = 50,
+ BFA_STATUS_LINKTIMEOUT = 51,
BFA_STATUS_NO_FCPIM_NEXUS = 52,
BFA_STATUS_CHECKSUM_FAIL = 53,
- BFA_STATUS_GZME_FAILED = 54,
+ BFA_STATUS_GZME_FAILED = 54,
BFA_STATUS_SCSISTART_REQD = 55,
- BFA_STATUS_IOC_FAILURE = 56,
- BFA_STATUS_INVALID_WWN = 57,
- BFA_STATUS_MISMATCH = 58,
- BFA_STATUS_IOC_ENABLED = 59,
+ BFA_STATUS_IOC_FAILURE = 56,
+ BFA_STATUS_INVALID_WWN = 57,
+ BFA_STATUS_MISMATCH = 58,
+ BFA_STATUS_IOC_ENABLED = 59,
BFA_STATUS_ADAPTER_ENABLED = 60,
- BFA_STATUS_IOC_NON_OP = 61,
+ BFA_STATUS_IOC_NON_OP = 61,
BFA_STATUS_ADDR_MAP_FAILURE = 62,
- BFA_STATUS_SAME_NAME = 63,
- BFA_STATUS_PENDING = 64,
- BFA_STATUS_8G_SPD = 65,
- BFA_STATUS_4G_SPD = 66,
+ BFA_STATUS_SAME_NAME = 63,
+ BFA_STATUS_PENDING = 64,
+ BFA_STATUS_8G_SPD = 65,
+ BFA_STATUS_4G_SPD = 66,
BFA_STATUS_AD_IS_ENABLE = 67,
- BFA_STATUS_EINVAL_TOV = 68,
+ BFA_STATUS_EINVAL_TOV = 68,
BFA_STATUS_EINVAL_QDEPTH = 69,
BFA_STATUS_VERSION_FAIL = 70,
- BFA_STATUS_DIAG_BUSY = 71,
- BFA_STATUS_BEACON_ON = 72,
- BFA_STATUS_BEACON_OFF = 73,
- BFA_STATUS_LBEACON_ON = 74,
- BFA_STATUS_LBEACON_OFF = 75,
+ BFA_STATUS_DIAG_BUSY = 71,
+ BFA_STATUS_BEACON_ON = 72,
+ BFA_STATUS_BEACON_OFF = 73,
+ BFA_STATUS_LBEACON_ON = 74,
+ BFA_STATUS_LBEACON_OFF = 75,
BFA_STATUS_PORT_NOT_INITED = 76,
BFA_STATUS_RPSC_ENABLED = 77,
- BFA_STATUS_ENOFSAVE = 78,
- BFA_STATUS_BAD_FILE = 79,
- BFA_STATUS_RLIM_EN = 80,
- BFA_STATUS_RLIM_DIS = 81,
- BFA_STATUS_IOC_DISABLED = 82,
- BFA_STATUS_ADAPTER_DISABLED = 83,
- BFA_STATUS_BIOS_DISABLED = 84,
- BFA_STATUS_AUTH_ENABLED = 85,
- BFA_STATUS_AUTH_DISABLED = 86,
- BFA_STATUS_ERROR_TRL_ENABLED = 87,
- BFA_STATUS_ERROR_QOS_ENABLED = 88,
+ BFA_STATUS_ENOFSAVE = 78,
+ BFA_STATUS_BAD_FILE = 79,
+ BFA_STATUS_RLIM_EN = 80,
+ BFA_STATUS_RLIM_DIS = 81,
+ BFA_STATUS_IOC_DISABLED = 82,
+ BFA_STATUS_ADAPTER_DISABLED = 83,
+ BFA_STATUS_BIOS_DISABLED = 84,
+ BFA_STATUS_AUTH_ENABLED = 85,
+ BFA_STATUS_AUTH_DISABLED = 86,
+ BFA_STATUS_ERROR_TRL_ENABLED = 87,
+ BFA_STATUS_ERROR_QOS_ENABLED = 88,
BFA_STATUS_NO_SFP_DEV = 89,
BFA_STATUS_MEMTEST_FAILED = 90,
BFA_STATUS_INVALID_DEVID = 91,
@@ -190,7 +190,7 @@ enum bfa_status {
BFA_STATUS_FLASH_CKFAIL = 162,
BFA_STATUS_TRUNK_UNSUPP = 163,
BFA_STATUS_TRUNK_ENABLED = 164,
- BFA_STATUS_TRUNK_DISABLED = 165,
+ BFA_STATUS_TRUNK_DISABLED = 165,
BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166,
BFA_STATUS_BOOT_CODE_UPDATED = 167,
BFA_STATUS_BOOT_VERSION = 168,
@@ -198,8 +198,8 @@ enum bfa_status {
BFA_STATUS_INVALID_CARDTYPE = 170,
BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171,
BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172,
- BFA_STATUS_ETHBOOT_ENABLED = 173,
- BFA_STATUS_ETHBOOT_DISABLED = 174,
+ BFA_STATUS_ETHBOOT_ENABLED = 173,
+ BFA_STATUS_ETHBOOT_DISABLED = 174,
BFA_STATUS_IOPROFILE_OFF = 175,
BFA_STATUS_NO_PORT_INSTANCE = 176,
BFA_STATUS_BOOT_CODE_TIMEDOUT = 177,
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index fcb9bb3169e..126b0aac9f9 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -58,6 +58,7 @@ static bool bfa_nw_auto_recover = true;
/*
* forward declarations
*/
+static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
@@ -68,9 +69,10 @@ static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
-static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
+static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
static void bfa_ioc_recover(struct bfa_ioc *ioc);
static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
+static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
@@ -107,7 +109,7 @@ enum ioc_event {
IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
IOC_E_DISABLED = 7, /*!< f/w disabled */
IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */
- IOC_E_PFAILED = 9, /*!< failure notice by iocpf sm */
+ IOC_E_PFFAILED = 9, /*!< failure notice by iocpf sm */
IOC_E_HBFAIL = 10, /*!< heartbeat failure */
IOC_E_HWERROR = 11, /*!< hardware error interrupt */
IOC_E_TIMEOUT = 12, /*!< timeout */
@@ -156,7 +158,7 @@ enum iocpf_event {
IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
IOCPF_E_STOP = 3, /*!< stop on driver detach */
- IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
+ IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
@@ -239,7 +241,7 @@ bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -272,7 +274,7 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -294,12 +296,12 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
- case IOC_E_PFAILED:
+ case IOC_E_PFFAILED:
/* !!! fall through !!! */
case IOC_E_HWERROR:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
- if (event != IOC_E_PFAILED)
+ if (event != IOC_E_PFFAILED)
bfa_iocpf_initfail(ioc);
break;
@@ -316,7 +318,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -344,14 +346,14 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
break;
- case IOC_E_PFAILED:
+ case IOC_E_PFFAILED:
case IOC_E_HWERROR:
del_timer(&ioc->ioc_timer);
/* fall through */
case IOC_E_TIMEOUT:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
- if (event != IOC_E_PFAILED)
+ if (event != IOC_E_PFFAILED)
bfa_iocpf_getattrfail(ioc);
break;
@@ -364,7 +366,7 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -387,7 +389,7 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
break;
- case IOC_E_PFAILED:
+ case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_hb_stop(ioc);
/* !!! fall through !!! */
@@ -398,12 +400,12 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
else
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
- if (event != IOC_E_PFAILED)
+ if (event != IOC_E_PFFAILED)
bfa_iocpf_fail(ioc);
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -434,7 +436,7 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -465,7 +467,7 @@ bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -485,13 +487,13 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
break;
- case IOC_E_PFAILED:
+ case IOC_E_PFFAILED:
case IOC_E_HWERROR:
/**
* Initialization retry failed.
*/
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
- if (event != IOC_E_PFAILED)
+ if (event != IOC_E_PFFAILED)
bfa_iocpf_initfail(ioc);
break;
@@ -512,7 +514,7 @@ bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -546,7 +548,7 @@ bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -579,7 +581,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(iocpf->ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -589,6 +591,7 @@ bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
{
+ bfa_ioc_hw_sem_init(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -631,7 +634,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -675,7 +678,7 @@ bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -714,7 +717,7 @@ bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -762,7 +765,7 @@ bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -813,7 +816,7 @@ bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -856,7 +859,7 @@ bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -898,7 +901,7 @@ bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -927,7 +930,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -937,6 +940,7 @@ bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
{
+ bfa_ioc_mbox_flush(iocpf->ioc);
bfa_ioc_pf_disabled(iocpf->ioc);
}
@@ -957,7 +961,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -1009,7 +1013,7 @@ bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -1038,7 +1042,7 @@ bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -1053,7 +1057,7 @@ bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
/**
* Flush any queued up mailbox requests.
*/
- bfa_ioc_mbox_hbfail(iocpf->ioc);
+ bfa_ioc_mbox_flush(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -1093,7 +1097,7 @@ bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -1115,7 +1119,7 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
break;
default:
- bfa_sm_fault(iocpf->ioc, event);
+ bfa_sm_fault(event);
}
}
@@ -1123,23 +1127,28 @@ bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
* BFA IOC private functions
*/
+/**
+ * Notify common modules registered for notification.
+ */
static void
-bfa_ioc_disable_comp(struct bfa_ioc *ioc)
+bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
{
+ struct bfa_ioc_notify *notify;
struct list_head *qe;
- struct bfa_ioc_hbfail_notify *notify;
-
- ioc->cbfn->disable_cbfn(ioc->bfa);
- /**
- * Notify common modules registered for notification.
- */
- list_for_each(qe, &ioc->hb_notify_q) {
- notify = (struct bfa_ioc_hbfail_notify *) qe;
- notify->cbfn(notify->cbarg);
+ list_for_each(qe, &ioc->notify_q) {
+ notify = (struct bfa_ioc_notify *)qe;
+ notify->cbfn(notify->cbarg, event);
}
}
+static void
+bfa_ioc_disable_comp(struct bfa_ioc *ioc)
+{
+ ioc->cbfn->disable_cbfn(ioc->bfa);
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
+}
+
bool
bfa_nw_ioc_sem_get(void __iomem *sem_reg)
{
@@ -1169,6 +1178,29 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
}
static void
+bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
+{
+ struct bfi_ioc_image_hdr fwhdr;
+ u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+
+ if (fwstate == BFI_IOC_UNINIT)
+ return;
+
+ bfa_nw_ioc_fwver_get(ioc, &fwhdr);
+
+ if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL)
+ return;
+
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+
+ /*
+ * Try to lock and then unlock the semaphore.
+ */
+ readl(ioc->ioc_regs.ioc_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_sem_reg);
+}
+
+static void
bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
{
u32 r32;
@@ -1638,7 +1670,7 @@ bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
* Cleanup any pending requests.
*/
static void
-bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
+bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
{
struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
struct bfa_mbox_cmd *cmd;
@@ -1650,17 +1682,11 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
static void
bfa_ioc_fail_notify(struct bfa_ioc *ioc)
{
- struct list_head *qe;
- struct bfa_ioc_hbfail_notify *notify;
-
/**
* Notify driver and common modules registered for notification.
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
- list_for_each(qe, &ioc->hb_notify_q) {
- notify = (struct bfa_ioc_hbfail_notify *) qe;
- notify->cbfn(notify->cbarg);
- }
+ bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
}
static void
@@ -1684,7 +1710,7 @@ bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
static void
bfa_ioc_pf_failed(struct bfa_ioc *ioc)
{
- bfa_fsm_send_event(ioc, IOC_E_PFAILED);
+ bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
}
static void
@@ -1839,7 +1865,7 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
ioc->iocpf.ioc = ioc;
bfa_ioc_mbox_attach(ioc);
- INIT_LIST_HEAD(&ioc->hb_notify_q);
+ INIT_LIST_HEAD(&ioc->notify_q);
bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
bfa_fsm_send_event(ioc, IOC_E_RESET);
@@ -1969,6 +1995,8 @@ bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
* mailbox is free -- queue command to firmware
*/
bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
+
+ return;
}
/**
@@ -2001,18 +2029,30 @@ bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
void
bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
{
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
bfa_fsm_send_event(ioc, IOC_E_HWERROR);
}
/**
+ * return true if IOC is disabled
+ */
+bool
+bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
+ bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
+}
+
+/**
* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
void
-bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
- struct bfa_ioc_hbfail_notify *notify)
+bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_notify *notify)
{
- list_add_tail(&notify->qe, &ioc->hb_notify_q);
+ list_add_tail(&notify->qe, &ioc->notify_q);
}
#define BFA_MFG_NAME "Brocade"
@@ -2217,6 +2257,7 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
{
pr_crit("Heart Beat of IOC has failed\n");
bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index bd48abee781..bda866ba6e9 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -19,7 +19,7 @@
#ifndef __BFA_IOC_H__
#define __BFA_IOC_H__
-#include "bfa_sm.h"
+#include "bfa_cs.h"
#include "bfi.h"
#include "cna.h"
@@ -97,9 +97,12 @@ struct bfa_ioc_regs {
/**
* IOC Mailbox structures
*/
+typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
struct bfa_mbox_cmd {
struct list_head qe;
- u32 msg[BFI_IOC_MSGSZ];
+ bfa_mbox_cmd_cbfn_t cbfn;
+ void *cbarg;
+ u32 msg[BFI_IOC_MSGSZ];
};
/**
@@ -130,6 +133,23 @@ struct bfa_ioc_cbfn {
};
/**
+ * IOC event notification mechanism.
+ */
+enum bfa_ioc_event {
+ BFA_IOC_E_ENABLED = 1,
+ BFA_IOC_E_DISABLED = 2,
+ BFA_IOC_E_FAILED = 3,
+};
+
+typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event);
+
+struct bfa_ioc_notify {
+ struct list_head qe;
+ bfa_ioc_notify_cbfn_t cbfn;
+ void *cbarg;
+};
+
+/**
* Heartbeat failure notification queue element.
*/
struct bfa_ioc_hbfail_notify {
@@ -141,7 +161,7 @@ struct bfa_ioc_hbfail_notify {
/**
* Initialize a heartbeat failure notification structure
*/
-#define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do { \
+#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \
(__notify)->cbfn = (__cbfn); \
(__notify)->cbarg = (__cbarg); \
} while (0)
@@ -155,25 +175,25 @@ struct bfa_iocpf {
struct bfa_ioc {
bfa_fsm_t fsm;
- struct bfa *bfa;
- struct bfa_pcidev pcidev;
- struct timer_list ioc_timer;
- struct timer_list iocpf_timer;
- struct timer_list sem_timer;
+ struct bfa *bfa;
+ struct bfa_pcidev pcidev;
+ struct timer_list ioc_timer;
+ struct timer_list iocpf_timer;
+ struct timer_list sem_timer;
struct timer_list hb_timer;
u32 hb_count;
- struct list_head hb_notify_q;
+ struct list_head notify_q;
void *dbg_fwsave;
int dbg_fwsave_len;
bool dbg_fwsave_once;
enum bfi_mclass ioc_mc;
- struct bfa_ioc_regs ioc_regs;
+ struct bfa_ioc_regs ioc_regs;
struct bfa_ioc_drv_stats stats;
bool fcmode;
bool ctdev;
bool cna;
bool pllinit;
- bool stats_busy; /*!< outstanding stats */
+ bool stats_busy; /*!< outstanding stats */
u8 port_id;
struct bfa_dma attr_dma;
@@ -217,9 +237,11 @@ struct bfa_ioc_hwif {
BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++)
+#define bfa_ioc_stats_hb_count(_ioc, _hb_count) \
+ ((_ioc)->stats.hb_count = (_hb_count))
#define BFA_IOC_FWIMG_MINSZ (16 * 1024)
#define BFA_IOC_FWIMG_TYPE(__ioc) \
- (((__ioc)->ctdev) ? \
+ (((__ioc)->ctdev) ? \
(((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) : \
BFI_IMAGE_CB_FC)
#define BFA_IOC_FW_SMEM_SIZE(__ioc) \
@@ -263,9 +285,10 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
-void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
- struct bfa_ioc_hbfail_notify *notify);
+void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
+ struct bfa_ioc_notify *notify);
bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
diff --git a/drivers/net/bna/bfa_wc.h b/drivers/net/bna/bfa_wc.h
deleted file mode 100644
index d0e4caee67b..00000000000
--- a/drivers/net/bna/bfa_wc.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Linux network driver for Brocade Converged Network Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-/*
- * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- */
-
-/**
- * @file bfa_wc.h Generic wait counter.
- */
-
-#ifndef __BFA_WC_H__
-#define __BFA_WC_H__
-
-typedef void (*bfa_wc_resume_t) (void *cbarg);
-
-struct bfa_wc {
- bfa_wc_resume_t wc_resume;
- void *wc_cbarg;
- int wc_count;
-};
-
-static inline void
-bfa_wc_up(struct bfa_wc *wc)
-{
- wc->wc_count++;
-}
-
-static inline void
-bfa_wc_down(struct bfa_wc *wc)
-{
- wc->wc_count--;
- if (wc->wc_count == 0)
- wc->wc_resume(wc->wc_cbarg);
-}
-
-/**
- * Initialize a waiting counter.
- */
-static inline void
-bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
-{
- wc->wc_resume = wc_resume;
- wc->wc_cbarg = wc_cbarg;
- wc->wc_count = 0;
- bfa_wc_up(wc);
-}
-
-/**
- * Wait for counter to reach zero
- */
-static inline void
-bfa_wc_wait(struct bfa_wc *wc)
-{
- bfa_wc_down(wc);
-}
-
-#endif
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
index 6050379526f..088211c2724 100644
--- a/drivers/net/bna/bfi.h
+++ b/drivers/net/bna/bfi.h
@@ -51,13 +51,13 @@ struct bfi_mhdr {
};
#define bfi_h2i_set(_mh, _mc, _op, _lpuid) do { \
- (_mh).msg_class = (_mc); \
+ (_mh).msg_class = (_mc); \
(_mh).msg_id = (_op); \
(_mh).mtag.h2i.lpu_id = (_lpuid); \
} while (0)
#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \
- (_mh).msg_class = (_mc); \
+ (_mh).msg_class = (_mc); \
(_mh).msg_id = (_op); \
(_mh).mtag.i2htok = (_i2htok); \
} while (0)
@@ -66,7 +66,7 @@ struct bfi_mhdr {
* Message opcodes: 0-127 to firmware, 128-255 to host
*/
#define BFI_I2H_OPCODE_BASE 128
-#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
+#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE)
/**
****************************************************************************
@@ -186,7 +186,7 @@ enum bfi_mclass {
#define BFI_BOOT_TYPE_OFF 8
#define BFI_BOOT_LOADER_OFF 12
-#define BFI_BOOT_TYPE_NORMAL 0
+#define BFI_BOOT_TYPE_NORMAL 0
#define BFI_BOOT_TYPE_FLASH 1
#define BFI_BOOT_TYPE_MEMTEST 2
@@ -211,9 +211,9 @@ enum bfi_ioc_h2i_msgs {
enum bfi_ioc_i2h_msgs {
BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1),
- BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
- BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
- BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
+ BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2),
+ BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3),
+ BFI_IOC_I2H_READY_EVENT = BFA_I2HM(4),
BFI_IOC_I2H_HBEAT = BFA_I2HM(5),
};
@@ -289,6 +289,12 @@ struct bfi_ioc_image_hdr {
u32 md5sum[BFI_IOC_MD5SUM_SZ];
};
+enum bfi_fwboot_type {
+ BFI_FWBOOT_TYPE_NORMAL = 0,
+ BFI_FWBOOT_TYPE_FLASH = 1,
+ BFI_FWBOOT_TYPE_MEMTEST = 2,
+};
+
/**
* BFI_IOC_I2H_READY_EVENT message
*/
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
index a287f89b028..21e9155d6e5 100644
--- a/drivers/net/bna/bna.h
+++ b/drivers/net/bna/bna.h
@@ -13,7 +13,7 @@
#ifndef __BNA_H__
#define __BNA_H__
-#include "bfa_wc.h"
+#include "bfa_cs.h"
#include "bfa_ioc.h"
#include "cna.h"
#include "bfi_ll.h"
@@ -88,7 +88,7 @@ do { \
} while (0)
#define containing_rec(addr, type, field) \
- ((type *)((unsigned char *)(addr) - \
+ ((type *)((unsigned char *)(addr) - \
(unsigned char *)(&((type *)0)->field)))
#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2)
@@ -101,8 +101,8 @@ do { \
{ \
unsigned int page_index; /* index within a page */ \
void *page_addr; \
- page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
- (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
+ page_index = (_qe_idx) & (BNA_TXQ_PAGE_INDEX_MAX - 1); \
+ (_qe_ptr_range) = (BNA_TXQ_PAGE_INDEX_MAX - page_index); \
page_addr = (_qpt_ptr)[((_qe_idx) >> BNA_TXQ_PAGE_INDEX_MAX_SHIFT)];\
(_qe_ptr) = &((struct bna_txq_entry *)(page_addr))[page_index]; \
}
@@ -166,25 +166,25 @@ do { \
(((_q_ptr)->q.producer_index + (_num)) & \
((_q_ptr)->q.q_depth - 1))
-#define BNA_Q_CI_ADD(_q_ptr, _num) \
+#define BNA_Q_CI_ADD(_q_ptr, _num) \
(_q_ptr)->q.consumer_index = \
- (((_q_ptr)->q.consumer_index + (_num)) \
+ (((_q_ptr)->q.consumer_index + (_num)) \
& ((_q_ptr)->q.q_depth - 1))
#define BNA_Q_FREE_COUNT(_q_ptr) \
(BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth))
-#define BNA_Q_IN_USE_COUNT(_q_ptr) \
+#define BNA_Q_IN_USE_COUNT(_q_ptr) \
(BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
/* These macros build the data portion of the TxQ/RxQ doorbell */
-#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
+#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi))
#define BNA_DOORBELL_Q_STOP (0x40000000)
/* These macros build the data portion of the IB doorbell */
#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \
(0x80000000 | ((_timeout) << 16) | (_events))
-#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
+#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
/* Set the coalescing timer for the given ib */
#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
index 53b14169e36..cb2594c564d 100644
--- a/drivers/net/bna/bna_ctrl.c
+++ b/drivers/net/bna/bna_ctrl.c
@@ -16,8 +16,7 @@
* www.brocade.com
*/
#include "bna.h"
-#include "bfa_sm.h"
-#include "bfa_wc.h"
+#include "bfa_cs.h"
static void bna_device_cb_port_stopped(void *arg, enum bna_cb_status status);
@@ -380,7 +379,7 @@ bna_llport_sm_stopped(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -409,7 +408,7 @@ bna_llport_sm_down(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -455,7 +454,7 @@ bna_llport_sm_up_resp_wait(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -497,7 +496,7 @@ bna_llport_sm_down_resp_wait(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -526,7 +525,7 @@ bna_llport_sm_up(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -563,7 +562,7 @@ bna_llport_sm_last_resp_wait(struct bna_llport *llport,
break;
default:
- bfa_sm_fault(llport->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -916,7 +915,7 @@ bna_port_sm_stopped(struct bna_port *port, enum bna_port_event event)
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -956,7 +955,7 @@ bna_port_sm_mtu_init_wait(struct bna_port *port, enum bna_port_event event)
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1001,7 +1000,7 @@ bna_port_sm_pause_init_wait(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1022,7 +1021,7 @@ bna_port_sm_last_resp_wait(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1061,7 +1060,7 @@ bna_port_sm_started(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1086,7 +1085,7 @@ bna_port_sm_pause_cfg_wait(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1111,7 +1110,7 @@ bna_port_sm_rx_stop_wait(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1136,7 +1135,7 @@ bna_port_sm_mtu_cfg_wait(struct bna_port *port, enum bna_port_event event)
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1161,7 +1160,7 @@ bna_port_sm_chld_stop_wait(struct bna_port *port,
break;
default:
- bfa_sm_fault(port->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1472,7 +1471,7 @@ bna_device_sm_stopped(struct bna_device *device,
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1512,7 +1511,7 @@ bna_device_sm_ioc_ready_wait(struct bna_device *device,
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1542,7 +1541,7 @@ bna_device_sm_ready(struct bna_device *device, enum bna_device_event event)
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1568,7 +1567,7 @@ bna_device_sm_port_stop_wait(struct bna_device *device,
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1589,7 +1588,7 @@ bna_device_sm_ioc_disable_wait(struct bna_device *device,
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1622,7 +1621,7 @@ bna_device_sm_failed(struct bna_device *device,
break;
default:
- bfa_sm_fault(device->bna, event);
+ bfa_sm_fault(event);
}
}
diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
index 6cb89692f5c..cad233da843 100644
--- a/drivers/net/bna/bna_hw.h
+++ b/drivers/net/bna/bna_hw.h
@@ -67,7 +67,7 @@ static struct bna_ibidx_pool name[BFI_IBIDX_TOTAL_POOLS] = \
/**
* There are 2 free RIT segment pools:
- * Pool1: 192 segments of 1 RIT entry each
+ * Pool1: 192 segments of 1 RIT entry each
* Pool2: 1 segment of 64 RIT entry
*/
#define BFI_RIT_SEG_POOL1_SIZE 192
@@ -357,14 +357,14 @@ static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
* To clear set the value to 0.
* Range : 0x20 to 0x5c
*/
-#define PSS_SEM_LOCK_REG(_num) \
+#define PSS_SEM_LOCK_REG(_num) \
(PSS_BLK_REG_ADDR + 0x020 + ((_num) << 2))
/**
* PSS Semaphore Status Registers,
* corresponding to the lock registers above
*/
-#define PSS_SEM_STATUS_REG(_num) \
+#define PSS_SEM_STATUS_REG(_num) \
(PSS_BLK_REG_ADDR + 0x060 + ((_num) << 2))
/**
@@ -1044,7 +1044,7 @@ static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
__LPU12HOST_MBOX1_STATUS_BITS))
#define BNA_IS_MBOX_INTR(_intr_status) \
- ((_intr_status) & \
+ ((_intr_status) & \
(__LPU02HOST_MBOX0_STATUS_BITS | \
__LPU02HOST_MBOX1_STATUS_BITS | \
__LPU12HOST_MBOX0_STATUS_BITS | \
@@ -1070,11 +1070,11 @@ static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
__HALT_MASK_BITS)
#define BNA_IS_ERR_INTR(_intr_status) \
- ((_intr_status) & \
- (__EMC_ERROR_STATUS_BITS | \
- __LPU0_ERROR_STATUS_BITS | \
- __LPU1_ERROR_STATUS_BITS | \
- __PSS_ERROR_STATUS_BITS | \
+ ((_intr_status) & \
+ (__EMC_ERROR_STATUS_BITS | \
+ __LPU0_ERROR_STATUS_BITS | \
+ __LPU1_ERROR_STATUS_BITS | \
+ __PSS_ERROR_STATUS_BITS | \
__HALT_STATUS_BITS))
#define BNA_IS_MBOX_ERR_INTR(_intr_status) \
@@ -1087,9 +1087,9 @@ static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
#define BNA_INTR_STATUS_MBOX_CLR(_intr_status) \
do { \
(_intr_status) &= ~(__LPU02HOST_MBOX0_STATUS_BITS | \
- __LPU02HOST_MBOX1_STATUS_BITS | \
- __LPU12HOST_MBOX0_STATUS_BITS | \
- __LPU12HOST_MBOX1_STATUS_BITS); \
+ __LPU02HOST_MBOX1_STATUS_BITS | \
+ __LPU12HOST_MBOX0_STATUS_BITS | \
+ __LPU12HOST_MBOX1_STATUS_BITS); \
} while (0)
#define BNA_INTR_STATUS_ERR_CLR(_intr_status) \
@@ -1107,7 +1107,7 @@ do { \
writel(0xffffffff, (_bna)->regs.fn_int_mask);\
}
-#define bna_intx_enable(bna, new_mask) \
+#define bna_intx_enable(bna, new_mask) \
writel((new_mask), (bna)->regs.fn_int_mask)
#define bna_mbox_intr_disable(bna) \
@@ -1179,18 +1179,18 @@ do {\
#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000)
/* TxQ Entry Opcodes */
-#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
-#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
+#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */
+#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */
#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */
/* TxQ Entry Control Flags */
-#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
-#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
-#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
-#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
-#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
-#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
-#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
+#define BNA_TXQ_WI_CF_FCOE_CRC (1 << 8)
+#define BNA_TXQ_WI_CF_IPID_MODE (1 << 5)
+#define BNA_TXQ_WI_CF_INS_PRIO (1 << 4)
+#define BNA_TXQ_WI_CF_INS_VLAN (1 << 3)
+#define BNA_TXQ_WI_CF_UDP_CKSUM (1 << 2)
+#define BNA_TXQ_WI_CF_TCP_CKSUM (1 << 1)
+#define BNA_TXQ_WI_CF_IP_CKSUM (1 << 0)
#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \
(((_hdr_size) << 10) | ((_offset) & 0x3FF))
@@ -1199,30 +1199,30 @@ do {\
* Completion Q defines
*/
/* CQ Entry Flags */
-#define BNA_CQ_EF_MAC_ERROR (1 << 0)
-#define BNA_CQ_EF_FCS_ERROR (1 << 1)
-#define BNA_CQ_EF_TOO_LONG (1 << 2)
-#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
+#define BNA_CQ_EF_MAC_ERROR (1 << 0)
+#define BNA_CQ_EF_FCS_ERROR (1 << 1)
+#define BNA_CQ_EF_TOO_LONG (1 << 2)
+#define BNA_CQ_EF_FC_CRC_OK (1 << 3)
-#define BNA_CQ_EF_RSVD1 (1 << 4)
+#define BNA_CQ_EF_RSVD1 (1 << 4)
#define BNA_CQ_EF_L4_CKSUM_OK (1 << 5)
#define BNA_CQ_EF_L3_CKSUM_OK (1 << 6)
#define BNA_CQ_EF_HDS_HEADER (1 << 7)
-#define BNA_CQ_EF_UDP (1 << 8)
-#define BNA_CQ_EF_TCP (1 << 9)
+#define BNA_CQ_EF_UDP (1 << 8)
+#define BNA_CQ_EF_TCP (1 << 9)
#define BNA_CQ_EF_IP_OPTIONS (1 << 10)
-#define BNA_CQ_EF_IPV6 (1 << 11)
+#define BNA_CQ_EF_IPV6 (1 << 11)
-#define BNA_CQ_EF_IPV4 (1 << 12)
-#define BNA_CQ_EF_VLAN (1 << 13)
-#define BNA_CQ_EF_RSS (1 << 14)
-#define BNA_CQ_EF_RSVD2 (1 << 15)
+#define BNA_CQ_EF_IPV4 (1 << 12)
+#define BNA_CQ_EF_VLAN (1 << 13)
+#define BNA_CQ_EF_RSS (1 << 14)
+#define BNA_CQ_EF_RSVD2 (1 << 15)
#define BNA_CQ_EF_MCAST_MATCH (1 << 16)
-#define BNA_CQ_EF_MCAST (1 << 17)
-#define BNA_CQ_EF_BCAST (1 << 18)
-#define BNA_CQ_EF_REMOTE (1 << 19)
+#define BNA_CQ_EF_MCAST (1 << 17)
+#define BNA_CQ_EF_BCAST (1 << 18)
+#define BNA_CQ_EF_REMOTE (1 << 19)
#define BNA_CQ_EF_LOCAL (1 << 20)
@@ -1257,10 +1257,10 @@ enum ib_flags {
};
enum rss_hash_type {
- BFI_RSS_T_V4_TCP = (1 << 11),
- BFI_RSS_T_V4_IP = (1 << 10),
- BFI_RSS_T_V6_TCP = (1 << 9),
- BFI_RSS_T_V6_IP = (1 << 8)
+ BFI_RSS_T_V4_TCP = (1 << 11),
+ BFI_RSS_T_V4_IP = (1 << 10),
+ BFI_RSS_T_V6_TCP = (1 << 9),
+ BFI_RSS_T_V6_IP = (1 << 8)
};
enum hds_header_type {
BNA_HDS_T_V4_TCP = (1 << 11),
@@ -1298,7 +1298,7 @@ struct bna_txq_mem {
u32 reserved2;
u32 pg_cnt_n_prd_ptr; /* 31:16->total page count */
/* 15:0 ->producer pointer (index?) */
- u32 entry_n_pg_size; /* 31:16->entry size */
+ u32 entry_n_pg_size; /* 31:16->entry size */
/* 15:0 ->page size */
u32 int_blk_n_cns_ptr; /* 31:24->Int Blk Id; */
/* 23:16->Int Blk Offset */
@@ -1326,7 +1326,7 @@ struct bna_rxq_mem {
u32 sg_n_cq_n_cns_ptr; /* 31:28->reserved; 27:24->sg count */
/* 23:16->CQ; */
/* 15:0->consumer pointer(index?) */
- u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
+ u32 buf_sz_n_q_state; /* 31:16->buffer size; 15:0-> Q state */
u32 next_qid; /* 17:10->next QId */
u32 reserved3;
u32 reserved4[4];
@@ -1426,8 +1426,8 @@ struct bna_dma_addr {
};
struct bna_txq_wi_vector {
- u16 reserved;
- u16 length; /* Only 14 LSB are valid */
+ u16 reserved;
+ u16 length; /* Only 14 LSB are valid */
struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */
};
@@ -1465,7 +1465,7 @@ struct bna_txq_entry {
} hdr;
struct bna_txq_wi_vector vector[4];
};
-#define wi_hdr hdr.wi
+#define wi_hdr hdr.wi
#define wi_ext_hdr hdr.wi_ext
/* RxQ Entry Structure */
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
index 380085cc308..f0983c83244 100644
--- a/drivers/net/bna/bna_txrx.c
+++ b/drivers/net/bna/bna_txrx.c
@@ -16,7 +16,7 @@
* www.brocade.com
*/
#include "bna.h"
-#include "bfa_sm.h"
+#include "bfa_cs.h"
#include "bfi.h"
/**
@@ -569,7 +569,7 @@ bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -627,7 +627,7 @@ bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -678,7 +678,7 @@ bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -724,7 +724,7 @@ bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -734,7 +734,7 @@ bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
/**
* Note: Do not add rxf_clear_packet_filter here.
* It will overstep mbox when this transition happens:
- * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
+ * cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
*/
}
@@ -761,7 +761,7 @@ bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -771,7 +771,7 @@ bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
/**
* NOTE: Do not add rxf_disable here.
* It will overstep mbox when this transition happens:
- * start_wait -> stop_wait on RXF_E_STOP event
+ * start_wait -> stop_wait on RXF_E_STOP event
*/
}
@@ -815,7 +815,7 @@ bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -851,7 +851,7 @@ bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
* any other event during these states
*/
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -887,7 +887,7 @@ bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
* any other event during these states
*/
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -907,7 +907,7 @@ bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
break;
default:
- bfa_sm_fault(rxf->rx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -1898,7 +1898,7 @@ static void bna_rx_sm_stopped(struct bna_rx *rx,
/* no-op */
break;
default:
- bfa_sm_fault(rx->bna, event);
+ bfa_sm_fault(event);
break;
}
@@ -1946,7 +1946,7 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
bfa_fsm_set_state(rx, bna_rx_sm_started);
break;
default:
- bfa_sm_fault(rx->bna, event);
+ bfa_sm_fault(event);
break;
}
}
@@ -1981,7 +1981,7 @@ bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
break;
default:
- bfa_sm_fault(rx->bna, event);
+ bfa_sm_fault(event);
break;
}
}
@@ -2011,7 +2011,7 @@ bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
bna_rxf_fail(&rx->rxf);
break;
default:
- bfa_sm_fault(rx->bna, event);
+ bfa_sm_fault(event);
break;
}
@@ -2064,7 +2064,7 @@ bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
default:
- bfa_sm_fault(rx->bna, event);
+ bfa_sm_fault(event);
break;
}
}
@@ -3216,7 +3216,7 @@ bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
break;
default:
- bfa_sm_fault(tx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -3261,7 +3261,7 @@ bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
break;
default:
- bfa_sm_fault(tx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -3294,7 +3294,7 @@ bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
break;
default:
- bfa_sm_fault(tx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -3335,7 +3335,7 @@ bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
break;
default:
- bfa_sm_fault(tx->bna, event);
+ bfa_sm_fault(event);
}
}
@@ -3355,7 +3355,7 @@ bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
break;
default:
- bfa_sm_fault(tx->bna, event);
+ bfa_sm_fault(event);
}
}
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
index b9c134f7ad3..2f89cb23524 100644
--- a/drivers/net/bna/bna_types.h
+++ b/drivers/net/bna/bna_types.h
@@ -50,12 +50,12 @@ enum bna_status {
};
enum bna_cleanup_type {
- BNA_HARD_CLEANUP = 0,
- BNA_SOFT_CLEANUP = 1
+ BNA_HARD_CLEANUP = 0,
+ BNA_SOFT_CLEANUP = 1
};
enum bna_cb_status {
- BNA_CB_SUCCESS = 0,
+ BNA_CB_SUCCESS = 0,
BNA_CB_FAIL = 1,
BNA_CB_INTERRUPT = 2,
BNA_CB_BUSY = 3,
@@ -72,8 +72,8 @@ enum bna_res_type {
};
enum bna_mem_type {
- BNA_MEM_T_KVA = 1,
- BNA_MEM_T_DMA = 2
+ BNA_MEM_T_KVA = 1,
+ BNA_MEM_T_DMA = 2
};
enum bna_intr_type {
@@ -82,10 +82,10 @@ enum bna_intr_type {
};
enum bna_res_req_type {
- BNA_RES_MEM_T_COM = 0,
- BNA_RES_MEM_T_ATTR = 1,
- BNA_RES_MEM_T_FWTRC = 2,
- BNA_RES_MEM_T_STATS = 3,
+ BNA_RES_MEM_T_COM = 0,
+ BNA_RES_MEM_T_ATTR = 1,
+ BNA_RES_MEM_T_FWTRC = 2,
+ BNA_RES_MEM_T_STATS = 3,
BNA_RES_MEM_T_SWSTATS = 4,
BNA_RES_MEM_T_IBIDX = 5,
BNA_RES_MEM_T_IB_ARRAY = 6,
@@ -107,9 +107,9 @@ enum bna_res_req_type {
enum bna_tx_res_req_type {
BNA_TX_RES_MEM_T_TCB = 0,
BNA_TX_RES_MEM_T_UNMAPQ = 1,
- BNA_TX_RES_MEM_T_QPT = 2,
+ BNA_TX_RES_MEM_T_QPT = 2,
BNA_TX_RES_MEM_T_SWQPT = 3,
- BNA_TX_RES_MEM_T_PAGE = 4,
+ BNA_TX_RES_MEM_T_PAGE = 4,
BNA_TX_RES_INTR_T_TXCMPL = 5,
BNA_TX_RES_T_MAX,
};
@@ -158,14 +158,14 @@ enum bna_rx_type {
};
enum bna_rxp_type {
- BNA_RXP_SINGLE = 1,
- BNA_RXP_SLR = 2,
- BNA_RXP_HDS = 3
+ BNA_RXP_SINGLE = 1,
+ BNA_RXP_SLR = 2,
+ BNA_RXP_HDS = 3
};
enum bna_rxmode {
- BNA_RXMODE_PROMISC = 1,
- BNA_RXMODE_ALLMULTI = 2
+ BNA_RXMODE_PROMISC = 1,
+ BNA_RXMODE_ALLMULTI = 2
};
enum bna_rx_event {
@@ -202,7 +202,7 @@ enum bna_rxf_oper_state {
};
enum bna_rxf_flags {
- BNA_RXF_FL_STOP_PENDING = 0x01,
+ BNA_RXF_FL_STOP_PENDING = 0x01,
BNA_RXF_FL_FAILED = 0x02,
BNA_RXF_FL_RSS_CONFIG_PENDING = 0x04,
BNA_RXF_FL_OPERSTATE_CHANGED = 0x08,
@@ -244,11 +244,11 @@ enum bna_port_type {
enum bna_link_status {
BNA_LINK_DOWN = 0,
BNA_LINK_UP = 1,
- BNA_CEE_UP = 2
+ BNA_CEE_UP = 2
};
enum bna_llport_flags {
- BNA_LLPORT_F_ADMIN_UP = 1,
+ BNA_LLPORT_F_ADMIN_UP = 1,
BNA_LLPORT_F_PORT_ENABLED = 2,
BNA_LLPORT_F_RX_STARTED = 4
};
@@ -304,7 +304,7 @@ struct bna_mem_descr {
struct bna_mem_info {
enum bna_mem_type mem_type;
u32 len;
- u32 num;
+ u32 num;
u32 align_sz; /* 0/1 = no alignment */
struct bna_mem_descr *mdl;
void *cookie; /* For bnad to unmap dma later */
@@ -371,10 +371,10 @@ struct bna_mbox_qe {
struct list_head qe;
struct bfa_mbox_cmd cmd;
- u32 cmd_len;
+ u32 cmd_len;
/* Callback for port, tx, rx, rxf */
void (*cbfn)(void *arg, int status);
- void *cbarg;
+ void *cbarg;
};
struct bna_mbox_mod {
@@ -480,7 +480,7 @@ struct bna_ib_dbell {
/* Interrupt timer configuration */
struct bna_ib_config {
- u8 coalescing_timeo; /* Unit is 5usec. */
+ u8 coalescing_timeo; /* Unit is 5usec. */
int interpkt_count;
int interpkt_timeo;
@@ -576,8 +576,8 @@ struct bna_txq {
struct bna_tx *tx;
- u64 tx_packets;
- u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
};
/* TxF structure (hardware Tx Function) */
@@ -739,10 +739,10 @@ struct bna_rxq {
struct bna_rxp *rxp;
struct bna_rx *rx;
- u64 rx_packets;
+ u64 rx_packets;
u64 rx_bytes;
- u64 rx_packets_with_error;
- u64 rxbuf_alloc_failed;
+ u64 rx_packets_with_error;
+ u64 rxbuf_alloc_failed;
};
/* RxQ pair */
@@ -902,7 +902,7 @@ struct bna_rxf {
* callback for:
* bna_rxf_ucast_set()
* bna_rxf_{ucast/mcast}_add(),
- * bna_rxf_{ucast/mcast}_del(),
+ * bna_rxf_{ucast/mcast}_del(),
* bna_rxf_mode_set()
*/
void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx,
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index c89c9b28cb7..00273e5b70b 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -60,7 +60,7 @@ static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
#define BNAD_GET_MBOX_IRQ(_bnad) \
(((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
- ((_bnad)->msix_table[(_bnad)->msix_num - 1].vector) : \
+ ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
((_bnad)->pcidev->irq))
#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _depth) \
@@ -112,10 +112,10 @@ static void
bnad_free_all_txbufs(struct bnad *bnad,
struct bna_tcb *tcb)
{
- u32 unmap_cons;
+ u32 unmap_cons;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb = NULL;
int i;
unmap_array = unmap_q->unmap_array;
@@ -165,11 +165,11 @@ static u32
bnad_free_txbufs(struct bnad *bnad,
struct bna_tcb *tcb)
{
- u32 sent_packets = 0, sent_bytes = 0;
- u16 wis, unmap_cons, updated_hw_cons;
+ u32 sent_packets = 0, sent_bytes = 0;
+ u16 wis, unmap_cons, updated_hw_cons;
struct bnad_unmap_q *unmap_q = tcb->unmap_q;
struct bnad_skb_unmap *unmap_array;
- struct sk_buff *skb;
+ struct sk_buff *skb;
int i;
/*
@@ -247,7 +247,7 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
{
struct bnad *bnad = (struct bnad *)bnad_ptr;
struct bna_tcb *tcb;
- u32 acked = 0;
+ u32 acked = 0;
int i, j;
for (i = 0; i < bnad->num_tx; i++) {
@@ -1102,10 +1102,10 @@ static int
bnad_mbox_irq_alloc(struct bnad *bnad,
struct bna_intr_info *intr_info)
{
- int err = 0;
- unsigned long irq_flags, flags;
+ int err = 0;
+ unsigned long irq_flags, flags;
u32 irq;
- irq_handler_t irq_handler;
+ irq_handler_t irq_handler;
/* Mbox should use only 1 vector */
@@ -1116,17 +1116,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
spin_lock_irqsave(&bnad->bna_lock, flags);
if (bnad->cfg_flags & BNAD_CF_MSIX) {
irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
- irq = bnad->msix_table[bnad->msix_num - 1].vector;
+ irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
irq_flags = 0;
intr_info->intr_type = BNA_INTR_T_MSIX;
- intr_info->idl[0].vector = bnad->msix_num - 1;
+ intr_info->idl[0].vector = BNAD_MAILBOX_MSIX_INDEX;
} else {
irq_handler = (irq_handler_t)bnad_isr;
irq = bnad->pcidev->irq;
irq_flags = IRQF_SHARED;
intr_info->intr_type = BNA_INTR_T_INTX;
- /* intr_info->idl.vector = 0 ? */
}
+
spin_unlock_irqrestore(&bnad->bna_lock, flags);
sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
@@ -1179,11 +1179,12 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
switch (src) {
case BNAD_INTR_TX:
- vector_start = txrx_id;
+ vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
break;
case BNAD_INTR_RX:
- vector_start = bnad->num_tx * bnad->num_txq_per_tx +
+ vector_start = BNAD_MAILBOX_MSIX_VECTORS +
+ (bnad->num_tx * bnad->num_txq_per_tx) +
txrx_id;
break;
@@ -1204,11 +1205,11 @@ bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
switch (src) {
case BNAD_INTR_TX:
- intr_info->idl[0].vector = 0x1; /* Bit mask : Tx IB */
+ intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
break;
case BNAD_INTR_RX:
- intr_info->idl[0].vector = 0x2; /* Bit mask : Rx IB */
+ intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
break;
}
}
@@ -1447,7 +1448,7 @@ bnad_iocpf_sem_timeout(unsigned long data)
/*
* All timer routines use bnad->bna_lock to protect against
* the following race, which may occur in case of no locking:
- * Time CPU m CPU n
+ * Time CPU m CPU n
* 0 1 = test_bit
* 1 clear_bit
* 2 del_timer_sync
@@ -1912,7 +1913,7 @@ void
bnad_rx_coalescing_timeo_set(struct bnad *bnad)
{
struct bnad_rx_info *rx_info;
- int i;
+ int i;
for (i = 0; i < bnad->num_rx; i++) {
rx_info = &bnad->rx_info[i];
@@ -2075,7 +2076,7 @@ bnad_mbox_irq_sync(struct bnad *bnad)
spin_lock_irqsave(&bnad->bna_lock, flags);
if (bnad->cfg_flags & BNAD_CF_MSIX)
- irq = bnad->msix_table[bnad->msix_num - 1].vector;
+ irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
else
irq = bnad->pcidev->irq;
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -2426,18 +2427,18 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct bnad *bnad = netdev_priv(netdev);
- u16 txq_prod, vlan_tag = 0;
- u32 unmap_prod, wis, wis_used, wi_range;
- u32 vectors, vect_id, i, acked;
+ u16 txq_prod, vlan_tag = 0;
+ u32 unmap_prod, wis, wis_used, wi_range;
+ u32 vectors, vect_id, i, acked;
u32 tx_id;
- int err;
+ int err;
struct bnad_tx_info *tx_info;
struct bna_tcb *tcb;
struct bnad_unmap_q *unmap_q;
- dma_addr_t dma_addr;
+ dma_addr_t dma_addr;
struct bna_txq_entry *txqent;
- bna_txq_wi_ctrl_flag_t flags;
+ bna_txq_wi_ctrl_flag_t flags;
if (unlikely
(skb->len <= ETH_HLEN || skb->len > BFI_TX_MAX_DATA_PER_PKT)) {
@@ -3033,8 +3034,8 @@ static int __devinit
bnad_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pcidev_id)
{
- bool using_dac = false;
- int err;
+ bool using_dac = false;
+ int err;
struct bnad *bnad;
struct bna *bna;
struct net_device *netdev;
@@ -3066,7 +3067,7 @@ bnad_pci_probe(struct pci_dev *pdev,
/*
* PCI initialization
- * Output : using_dac = 1 for 64 bit DMA
+ * Output : using_dac = 1 for 64 bit DMA
* = 0 for 32 bit DMA
*/
err = bnad_pci_init(bnad, pdev, &using_dac);
@@ -3209,7 +3210,7 @@ bnad_pci_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-static const struct pci_device_id bnad_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnad_pci_id_table) = {
{
PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
PCI_DEVICE_ID_BROCADE_CT),
@@ -3232,7 +3233,8 @@ bnad_module_init(void)
{
int err;
- pr_info("Brocade 10G Ethernet driver\n");
+ pr_info("Brocade 10G Ethernet driver - version: %s\n",
+ BNAD_VERSION);
bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index 7aa550b6182..458eb30371b 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -68,10 +68,13 @@ struct bnad_rx_ctrl {
#define BNAD_VERSION "2.3.2.3"
+#define BNAD_MAILBOX_MSIX_INDEX 0
#define BNAD_MAILBOX_MSIX_VECTORS 1
+#define BNAD_INTX_TX_IB_BITMASK 0x1
+#define BNAD_INTX_RX_IB_BITMASK 0x2
-#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
-#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
+#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */
+#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */
#define BNAD_MAX_Q_DEPTH 0x10000
#define BNAD_MIN_Q_DEPTH 0x200
@@ -102,12 +105,12 @@ enum bnad_intr_source {
enum bnad_link_state {
BNAD_LS_DOWN = 0,
- BNAD_LS_UP = 1
+ BNAD_LS_UP = 1
};
struct bnad_completion {
- struct completion ioc_comp;
- struct completion ucast_comp;
+ struct completion ioc_comp;
+ struct completion ucast_comp;
struct completion mcast_comp;
struct completion tx_comp;
struct completion rx_comp;
@@ -125,7 +128,7 @@ struct bnad_completion {
/* Tx Rx Control Stats */
struct bnad_drv_stats {
- u64 netif_queue_stop;
+ u64 netif_queue_stop;
u64 netif_queue_wakeup;
u64 netif_queue_stopped;
u64 tso4;
@@ -188,7 +191,7 @@ struct bnad_skb_unmap {
struct bnad_unmap_q {
u32 producer_index;
u32 consumer_index;
- u32 q_depth;
+ u32 q_depth;
/* This should be the last one */
struct bnad_skb_unmap unmap_array[1];
};
@@ -211,7 +214,7 @@ struct bnad_unmap_q {
#define BNAD_RF_RX_SHUTDOWN_DELAYED 7
struct bnad {
- struct net_device *netdev;
+ struct net_device *netdev;
/* Data path */
struct bnad_tx_info tx_info[BNAD_MAX_TXS];
@@ -245,7 +248,7 @@ struct bnad {
u32 cfg_flags;
unsigned long run_flags;
- struct pci_dev *pcidev;
+ struct pci_dev *pcidev;
u64 mmio_start;
u64 mmio_len;
@@ -278,7 +281,7 @@ struct bnad {
struct bnad_diag *diag;
char adapter_name[BNAD_NAME_LEN];
- char port_name[BNAD_NAME_LEN];
+ char port_name[BNAD_NAME_LEN];
char mbox_irq_name[BNAD_NAME_LEN];
};
@@ -286,7 +289,7 @@ struct bnad {
* EXTERN VARIABLES
*/
extern struct firmware *bfi_fw;
-extern u32 bnad_rxqs_per_cq;
+extern u32 bnad_rxqs_per_cq;
/*
* EXTERN PROTOTYPES
@@ -332,7 +335,7 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
}
#define bnad_dim_timer_running(_bnad) \
- (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
+ (((_bnad)->cfg_flags & BNAD_CF_DIM_ENABLED) && \
(test_bit(BNAD_RF_DIM_TIMER_RUNNING, &((_bnad)->run_flags))))
#endif /* __BNAD_H__ */
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 3330cd78da2..fea07f19a5d 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -295,7 +295,7 @@ get_regs(struct bnad *bnad, u32 * regs)
u32 reg_addr;
unsigned long flags;
-#define BNAD_GET_REG(addr) \
+#define BNAD_GET_REG(addr) \
do { \
if (regs) \
regs[num++] = readl(bnad->bar0 + (addr)); \
diff --git a/drivers/net/bna/cna.h b/drivers/net/bna/cna.h
index 01b4af73302..a679e038747 100644
--- a/drivers/net/bna/cna.h
+++ b/drivers/net/bna/cna.h
@@ -33,7 +33,7 @@
#include <linux/list.h>
-#define bfa_sm_fault(__mod, __event) do { \
+#define bfa_sm_fault(__event) do { \
pr_err("SM Assertion failure: %s: %d: event = %d", __FILE__, __LINE__, \
__event); \
} while (0)
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 2e858e4dcf4..eb096253d78 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -104,7 +104,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
int err = 0;
u64 config = 0;
- if (!priv->mdev->dev->caps.wol) {
+ if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
wol->supported = 0;
wol->wolopts = 0;
return;
@@ -134,7 +134,7 @@ static int mlx4_en_set_wol(struct net_device *netdev,
u64 config = 0;
int err = 0;
- if (!priv->mdev->dev->caps.wol)
+ if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
return -EOPNOTSUPP;
if (wol->supported & ~WAKE_MAGIC)
@@ -170,7 +170,8 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
return NUM_ALL_STATS +
(priv->tx_ring_num + priv->rx_ring_num) * 2;
case ETH_SS_TEST:
- return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.loopback_support) * 2;
+ return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
+ & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
default:
return -EOPNOTSUPP;
}
@@ -220,7 +221,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
case ETH_SS_TEST:
for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
- if (priv->mdev->dev->caps.loopback_support)
+ if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
for (; i < MLX4_EN_NUM_SELF_TEST; i++)
strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
break;
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 9276b1b2558..6bfea233a9f 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -106,7 +106,8 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->tcp_rss = tcp_rss;
params->udp_rss = udp_rss;
- if (params->udp_rss && !mdev->dev->caps.udp_rss) {
+ if (params->udp_rss && !(mdev->dev->caps.flags
+ & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
params->udp_rss = 0;
}
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 9d3f57e76f2..4b0f32e568f 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -215,7 +215,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags |= MLX4_EN_FLAG_PROMISC;
/* Enable promiscouos mode */
- if (!mdev->dev->caps.vep_uc_steering)
+ if (!(mdev->dev->caps.flags &
+ MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
priv->base_qpn, 1);
else
@@ -259,7 +260,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->flags &= ~MLX4_EN_FLAG_PROMISC;
/* Disable promiscouos mode */
- if (!mdev->dev->caps.vep_uc_steering)
+ if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
priv->base_qpn, 0);
else
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index 2a74bc81b9f..5e710917806 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -114,9 +114,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
struct mlx4_set_port_rqp_calc_context *context;
int err;
u32 in_mod;
- u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT;
+ u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+ MCAST_DIRECT : MCAST_DEFAULT;
- if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering)
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
return 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
index 191a8dcd8a9..9fdbcecd499 100644
--- a/drivers/net/mlx4/en_selftest.c
+++ b/drivers/net/mlx4/en_selftest.c
@@ -159,7 +159,8 @@ retry_tx:
goto retry_tx;
}
- if (priv->mdev->dev->caps.loopback_support){
+ if (priv->mdev->dev->caps.flags &
+ MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
buf[3] = mlx4_en_test_registers(priv);
buf[4] = mlx4_en_test_loopback(priv);
}
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 67a209ba939..7eb8ba822e9 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -75,7 +75,7 @@ MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (defa
} \
} while (0)
-static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
+static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
{
static const char *fname[] = {
[ 0] = "RC transport",
@@ -99,13 +99,19 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[21] = "UD multicast support",
[24] = "Demand paging support",
[25] = "Router support",
- [30] = "IBoE support"
+ [30] = "IBoE support",
+ [32] = "Unicast loopback support",
+ [38] = "Wake On LAN support",
+ [40] = "UDP RSS support",
+ [41] = "Unicast VEP steering support",
+ [42] = "Multicast VEP steering support",
+ [48] = "Counters support",
};
int i;
mlx4_dbg(dev, "DEV_CAP flags:\n");
for (i = 0; i < ARRAY_SIZE(fname); ++i)
- if (fname[i] && (flags & (1 << i)))
+ if (fname[i] && (flags & (1LL << i)))
mlx4_dbg(dev, " %s\n", fname[i]);
}
@@ -142,7 +148,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
struct mlx4_cmd_mailbox *mailbox;
u32 *outbox;
u8 field;
- u32 field32;
+ u32 field32, flags, ext_flags;
u16 size;
u16 stat_rate;
int err;
@@ -180,8 +186,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
-#define QUERY_DEV_CAP_UDP_RSS_OFFSET 0x42
-#define QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET 0x43
+#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -199,6 +204,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
+#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -272,14 +278,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_msg_sz = 1 << (field & 0x1f);
MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
dev_cap->stat_rate_support = stat_rate;
- MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET);
- dev_cap->udp_rss = field & 0x1;
- dev_cap->vep_uc_steering = field & 0x2;
- dev_cap->vep_mc_steering = field & 0x4;
- MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
- dev_cap->loopback_support = field & 0x1;
- dev_cap->wol = field & 0x40;
- MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
+ MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
+ MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
+ dev_cap->flags = flags | (u64)ext_flags << 32;
MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
dev_cap->reserved_uars = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
@@ -356,6 +357,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
MLX4_GET(dev_cap->max_icm_sz, outbox,
QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
+ if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
+ MLX4_GET(dev_cap->max_counters, outbox,
+ QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
for (i = 1; i <= dev_cap->num_ports; ++i) {
@@ -449,6 +453,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
+ mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
dump_dev_cap_flags(dev, dev_cap->flags);
@@ -781,6 +786,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
if (enable_qos)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
+ /* enable counters */
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
+ *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
+
/* QPC/EEC/CQC/EQC/RDMARC attributes */
MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -801,7 +810,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
- if (dev->caps.vep_mc_steering)
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 88003ebc618..1e8ecc3708e 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -78,12 +78,7 @@ struct mlx4_dev_cap {
u16 wavelength[MLX4_MAX_PORTS + 1];
u64 trans_code[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
- int udp_rss;
- int loopback_support;
- int vep_uc_steering;
- int vep_mc_steering;
- int wol;
- u32 flags;
+ u64 flags;
int reserved_uars;
int uar_size;
int min_page_sz;
@@ -116,6 +111,7 @@ struct mlx4_dev_cap {
u8 supported_port_types[MLX4_MAX_PORTS + 1];
u8 log_max_macs[MLX4_MAX_PORTS + 1];
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
+ u32 max_counters;
};
struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 0cb0431ee19..c94b3426d35 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -143,6 +143,7 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
dev->caps.port_mask |= 1 << (i - 1);
}
+
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
int err;
@@ -226,11 +227,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.bmme_flags = dev_cap->bmme_flags;
dev->caps.reserved_lkey = dev_cap->reserved_lkey;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
- dev->caps.udp_rss = dev_cap->udp_rss;
- dev->caps.loopback_support = dev_cap->loopback_support;
- dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
- dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
- dev->caps.wol = dev_cap->wol;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.log_num_macs = log_num_mac;
@@ -262,6 +258,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_set_port_mask(dev);
+ dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
+
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
@@ -839,6 +837,45 @@ err_stop_fw:
return err;
}
+static int mlx4_init_counters_table(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int nent;
+
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return -ENOENT;
+
+ nent = dev->caps.max_counters;
+ return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
+}
+
+static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
+{
+ mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
+}
+
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
+ return -ENOENT;
+
+ *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
+ if (*idx == -1)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
+
+void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
+{
+ mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
+ return;
+}
+EXPORT_SYMBOL_GPL(mlx4_counter_free);
+
static int mlx4_setup_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -943,6 +980,12 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_qp_table_free;
}
+ err = mlx4_init_counters_table(dev);
+ if (err && err != -ENOENT) {
+ mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
+ goto err_counters_table_free;
+ }
+
for (port = 1; port <= dev->caps.num_ports; port++) {
enum mlx4_port_type port_type = 0;
mlx4_SENSE_PORT(dev, port, &port_type);
@@ -969,6 +1012,9 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err_mcg_table_free:
mlx4_cleanup_mcg_table(dev);
+err_counters_table_free:
+ mlx4_cleanup_counters_table(dev);
+
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -1299,6 +1345,7 @@ err_port:
for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
+ mlx4_cleanup_counters_table(dev);
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
@@ -1359,6 +1406,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_CLOSE_PORT(dev, p);
}
+ mlx4_cleanup_counters_table(dev);
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index e63c37d6a11..cd1784593a3 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -559,7 +559,8 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
struct mlx4_mgm *mgm = mgm_mailbox->buf;
u8 *mgid;
int err;
- u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0;
+ u8 op_mod = (prot == MLX4_PROT_ETH) ?
+ !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -834,7 +835,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
- if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
if (prot == MLX4_PROT_ETH)
@@ -853,7 +855,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
- if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering)
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
if (prot == MLX4_PROT_ETH) {
@@ -867,7 +870,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!dev->caps.vep_mc_steering)
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
@@ -877,7 +880,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!dev->caps.vep_mc_steering)
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
@@ -887,7 +890,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!dev->caps.vep_mc_steering)
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
@@ -897,7 +900,7 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!dev->caps.vep_mc_steering)
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index dd7d745fbab..a2fcd8402d3 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -48,8 +48,8 @@
#include <linux/mlx4/doorbell.h>
#define DRV_NAME "mlx4_core"
-#define DRV_VERSION "0.01"
-#define DRV_RELDATE "May 1, 2007"
+#define DRV_VERSION "1.0"
+#define DRV_RELDATE "July 14, 2011"
enum {
MLX4_HCR_BASE = 0x80680,
@@ -342,6 +342,7 @@ struct mlx4_priv {
struct mlx4_srq_table srq_table;
struct mlx4_qp_table qp_table;
struct mlx4_mcg_table mcg_table;
+ struct mlx4_bitmap counters_bitmap;
struct mlx4_catas_err catas_err;
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 8856659fb43..1f95afda684 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -146,7 +146,7 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
int i, err = 0;
int free = -1;
- if (dev->caps.vep_uc_steering) {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
if (!err) {
entry = kmalloc(sizeof *entry, GFP_KERNEL);
@@ -203,7 +203,7 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
goto out;
}
- if (!dev->caps.vep_uc_steering)
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
*qpn = info->base_qpn + free;
++table->total;
out:
@@ -243,7 +243,7 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
int index = qpn - info->base_qpn;
struct mlx4_mac_entry *entry;
- if (dev->caps.vep_uc_steering) {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (entry) {
mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
@@ -274,7 +274,7 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
struct mlx4_mac_entry *entry;
int err;
- if (dev->caps.vep_uc_steering) {
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 40bcb82d911..4e2d1448093 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -2160,12 +2160,9 @@ static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
{
struct pci_dev *pdev = tp->pci_dev;
- u16 vendor_id, device_id;
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &vendor_id);
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &device_id);
-
- if ((vendor_id != PCI_VENDOR_ID_GIGABYTE) || (device_id != 0xe000))
+ if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
+ (pdev->subsystem_device != 0xe000))
return;
rtl_writephy(tp, 0x1f, 0x0001);
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 40e95facdb6..86127bcc9f7 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -303,7 +303,6 @@ sbni_pci_probe( struct net_device *dev )
!= NULL ) {
int pci_irq_line;
unsigned long pci_ioaddr;
- u16 subsys;
if( pdev->vendor != SBNI_PCI_VENDOR &&
pdev->device != SBNI_PCI_DEVICE )
@@ -314,9 +313,7 @@ sbni_pci_probe( struct net_device *dev )
/* Avoid already found cards from previous calls */
if( !request_region( pci_ioaddr, SBNI_IO_EXTENT, dev->name ) ) {
- pci_read_config_word( pdev, PCI_SUBSYSTEM_ID, &subsys );
-
- if (subsys != 2)
+ if (pdev->subsystem_device != 2)
continue;
/* Dual adapter is present */
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index d06a6374ed6..cac63c9f49a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -71,8 +71,14 @@ config OF_MDIO
config OF_PCI
def_tristate PCI
- depends on PCI && (PPC || MICROBLAZE || X86)
+ depends on PCI
help
OpenFirmware PCI bus accessors
+config OF_PCI_IRQ
+ def_tristate PCI
+ depends on OF_PCI && OF_IRQ
+ help
+ OpenFirmware PCI IRQ routing helpers
+
endmenu # OF
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index f7861ed2f28..dccb1176be5 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_SPI) += of_spi.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
+obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index b4559c58c09..da1f4b9605d 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -577,6 +577,24 @@ int of_address_to_resource(struct device_node *dev, int index,
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
+struct device_node *of_find_matching_node_by_address(struct device_node *from,
+ const struct of_device_id *matches,
+ u64 base_address)
+{
+ struct device_node *dn = of_find_matching_node(from, matches);
+ struct resource res;
+
+ while (dn) {
+ if (of_address_to_resource(dn, 0, &res))
+ continue;
+ if (res.start == base_address)
+ return dn;
+ dn = of_find_matching_node(dn, matches);
+ }
+
+ return NULL;
+}
+
/**
* of_iomap - Maps the memory mapped IO for a given device_node
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 632ebae7f17..02ed36719de 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -596,6 +596,71 @@ struct device_node *of_find_node_by_phandle(phandle handle)
EXPORT_SYMBOL(of_find_node_by_phandle);
/**
+ * of_property_read_u32_array - Find and read an array of 32 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_value: pointer to return value, modified only if return value is 0.
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it. Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u32 value can be decoded.
+ */
+int of_property_read_u32_array(const struct device_node *np, char *propname,
+ u32 *out_values, size_t sz)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+ const __be32 *val;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ if ((sz * sizeof(*out_values)) > prop->length)
+ return -EOVERFLOW;
+
+ val = prop->value;
+ while (sz--)
+ *out_values++ = be32_to_cpup(val++);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u32_array);
+
+/**
+ * of_property_read_string - Find and read a string from a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_string: pointer to null terminated return string, modified only if
+ * return value is 0.
+ *
+ * Search for a property in a device tree node and retrieve a null
+ * terminated string value (pointer to data, not a copy). Returns 0 on
+ * success, -EINVAL if the property does not exist, -ENODATA if property
+ * does not have a value, and -EILSEQ if the string is not null-terminated
+ * within the length of the property data.
+ *
+ * The out_string pointer is modified only if a valid string can be decoded.
+ */
+int of_property_read_string(struct device_node *np, char *propname,
+ const char **out_string)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ if (strnlen(prop->value, prop->length) >= prop->length)
+ return -EILSEQ;
+ *out_string = prop->value;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_string);
+
+/**
* of_parse_phandle - Resolve a phandle property to a device_node pointer
* @np: Pointer to device node holding phandle property
* @phandle_name: Name of property holding a phandle value
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index 905960338fb..3007662ac61 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -21,8 +21,9 @@
#include <linux/slab.h>
/**
- * of_get_gpio_flags - Get a GPIO number and flags to use with GPIO API
+ * of_get_named_gpio_flags() - Get a GPIO number and flags to use with GPIO API
* @np: device node to get GPIO from
+ * @propname: property name containing gpio specifier(s)
* @index: index of the GPIO
* @flags: a flags pointer to fill in
*
@@ -30,8 +31,8 @@
* value on the error condition. If @flags is not NULL the function also fills
* in flags for the GPIO.
*/
-int of_get_gpio_flags(struct device_node *np, int index,
- enum of_gpio_flags *flags)
+int of_get_named_gpio_flags(struct device_node *np, const char *propname,
+ int index, enum of_gpio_flags *flags)
{
int ret;
struct device_node *gpio_np;
@@ -40,7 +41,7 @@ int of_get_gpio_flags(struct device_node *np, int index,
const void *gpio_spec;
const __be32 *gpio_cells;
- ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells", index,
+ ret = of_parse_phandles_with_args(np, propname, "#gpio-cells", index,
&gpio_np, &gpio_spec);
if (ret) {
pr_debug("%s: can't parse gpios property\n", __func__);
@@ -79,7 +80,7 @@ err0:
pr_debug("%s exited with status %d\n", __func__, ret);
return ret;
}
-EXPORT_SYMBOL(of_get_gpio_flags);
+EXPORT_SYMBOL(of_get_named_gpio_flags);
/**
* of_gpio_count - Count GPIOs for a device
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index ac1ec54e4fd..ec7b060ae95 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -1,92 +1,40 @@
#include <linux/kernel.h>
#include <linux/of_pci.h>
-#include <linux/of_irq.h>
#include <asm/prom.h>
-/**
- * of_irq_map_pci - Resolve the interrupt for a PCI device
- * @pdev: the device whose interrupt is to be resolved
- * @out_irq: structure of_irq filled by this function
- *
- * This function resolves the PCI interrupt for a given PCI device. If a
- * device-node exists for a given pci_dev, it will use normal OF tree
- * walking. If not, it will implement standard swizzling and walk up the
- * PCI tree until an device-node is found, at which point it will finish
- * resolving using the OF tree walking.
- */
-int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+static inline int __of_pci_pci_compare(struct device_node *node,
+ unsigned int devfn)
{
- struct device_node *dn, *ppnode;
- struct pci_dev *ppdev;
- u32 lspec;
- __be32 lspec_be;
- __be32 laddr[3];
- u8 pin;
- int rc;
+ unsigned int size;
+ const __be32 *reg = of_get_property(node, "reg", &size);
- /* Check if we have a device node, if yes, fallback to standard
- * device tree parsing
- */
- dn = pci_device_to_OF_node(pdev);
- if (dn) {
- rc = of_irq_map_one(dn, 0, out_irq);
- if (!rc)
- return rc;
- }
-
- /* Ok, we don't, time to have fun. Let's start by building up an
- * interrupt spec. we assume #interrupt-cells is 1, which is standard
- * for PCI. If you do different, then don't use that routine.
- */
- rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
- if (rc != 0)
- return rc;
- /* No pin, exit */
- if (pin == 0)
- return -ENODEV;
-
- /* Now we walk up the PCI tree */
- lspec = pin;
- for (;;) {
- /* Get the pci_dev of our parent */
- ppdev = pdev->bus->self;
-
- /* Ouch, it's a host bridge... */
- if (ppdev == NULL) {
- ppnode = pci_bus_to_OF_node(pdev->bus);
-
- /* No node for host bridge ? give up */
- if (ppnode == NULL)
- return -EINVAL;
- } else {
- /* We found a P2P bridge, check if it has a node */
- ppnode = pci_device_to_OF_node(ppdev);
- }
-
- /* Ok, we have found a parent with a device-node, hand over to
- * the OF parsing code.
- * We build a unit address from the linux device to be used for
- * resolution. Note that we use the linux bus number which may
- * not match your firmware bus numbering.
- * Fortunately, in most cases, interrupt-map-mask doesn't
- * include the bus number as part of the matching.
- * You should still be careful about that though if you intend
- * to rely on this function (you ship a firmware that doesn't
- * create device nodes for all PCI devices).
- */
- if (ppnode)
- break;
+ if (!reg || size < 5 * sizeof(__be32))
+ return 0;
+ return ((be32_to_cpup(&reg[0]) >> 8) & 0xff) == devfn;
+}
- /* We can only get here if we hit a P2P bridge with no node,
- * let's do standard swizzling and try again
+struct device_node *of_pci_find_child_device(struct device_node *parent,
+ unsigned int devfn)
+{
+ struct device_node *node, *node2;
+
+ for_each_child_of_node(parent, node) {
+ if (__of_pci_pci_compare(node, devfn))
+ return node;
+ /*
+ * Some OFs create a parent node "multifunc-device" as
+ * a fake root for all functions of a multi-function
+ * device we go down them as well.
*/
- lspec = pci_swizzle_interrupt_pin(pdev, lspec);
- pdev = ppdev;
+ if (!strcmp(node->name, "multifunc-device")) {
+ for_each_child_of_node(node, node2) {
+ if (__of_pci_pci_compare(node2, devfn)) {
+ of_node_put(node);
+ return node2;
+ }
+ }
+ }
}
-
- lspec_be = cpu_to_be32(lspec);
- laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
- laddr[1] = laddr[2] = cpu_to_be32(0);
- return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq);
+ return NULL;
}
-EXPORT_SYMBOL_GPL(of_irq_map_pci);
+EXPORT_SYMBOL_GPL(of_pci_find_child_device);
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
new file mode 100644
index 00000000000..ac1ec54e4fd
--- /dev/null
+++ b/drivers/of/of_pci_irq.c
@@ -0,0 +1,92 @@
+#include <linux/kernel.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+#include <asm/prom.h>
+
+/**
+ * of_irq_map_pci - Resolve the interrupt for a PCI device
+ * @pdev: the device whose interrupt is to be resolved
+ * @out_irq: structure of_irq filled by this function
+ *
+ * This function resolves the PCI interrupt for a given PCI device. If a
+ * device-node exists for a given pci_dev, it will use normal OF tree
+ * walking. If not, it will implement standard swizzling and walk up the
+ * PCI tree until an device-node is found, at which point it will finish
+ * resolving using the OF tree walking.
+ */
+int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq)
+{
+ struct device_node *dn, *ppnode;
+ struct pci_dev *ppdev;
+ u32 lspec;
+ __be32 lspec_be;
+ __be32 laddr[3];
+ u8 pin;
+ int rc;
+
+ /* Check if we have a device node, if yes, fallback to standard
+ * device tree parsing
+ */
+ dn = pci_device_to_OF_node(pdev);
+ if (dn) {
+ rc = of_irq_map_one(dn, 0, out_irq);
+ if (!rc)
+ return rc;
+ }
+
+ /* Ok, we don't, time to have fun. Let's start by building up an
+ * interrupt spec. we assume #interrupt-cells is 1, which is standard
+ * for PCI. If you do different, then don't use that routine.
+ */
+ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
+ if (rc != 0)
+ return rc;
+ /* No pin, exit */
+ if (pin == 0)
+ return -ENODEV;
+
+ /* Now we walk up the PCI tree */
+ lspec = pin;
+ for (;;) {
+ /* Get the pci_dev of our parent */
+ ppdev = pdev->bus->self;
+
+ /* Ouch, it's a host bridge... */
+ if (ppdev == NULL) {
+ ppnode = pci_bus_to_OF_node(pdev->bus);
+
+ /* No node for host bridge ? give up */
+ if (ppnode == NULL)
+ return -EINVAL;
+ } else {
+ /* We found a P2P bridge, check if it has a node */
+ ppnode = pci_device_to_OF_node(ppdev);
+ }
+
+ /* Ok, we have found a parent with a device-node, hand over to
+ * the OF parsing code.
+ * We build a unit address from the linux device to be used for
+ * resolution. Note that we use the linux bus number which may
+ * not match your firmware bus numbering.
+ * Fortunately, in most cases, interrupt-map-mask doesn't
+ * include the bus number as part of the matching.
+ * You should still be careful about that though if you intend
+ * to rely on this function (you ship a firmware that doesn't
+ * create device nodes for all PCI devices).
+ */
+ if (ppnode)
+ break;
+
+ /* We can only get here if we hit a P2P bridge with no node,
+ * let's do standard swizzling and try again
+ */
+ lspec = pci_swizzle_interrupt_pin(pdev, lspec);
+ pdev = ppdev;
+ }
+
+ lspec_be = cpu_to_be32(lspec);
+ laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
+ laddr[1] = laddr[2] = cpu_to_be32(0);
+ return of_irq_map_raw(ppnode, &lspec_be, 1, laddr, out_irq);
+}
+EXPORT_SYMBOL_GPL(of_irq_map_pci);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 63d3cb73bdb..e75af391e28 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -13,6 +13,7 @@
*/
#include <linux/errno.h>
#include <linux/module.h>
+#include <linux/amba/bus.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -22,6 +23,14 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+const struct of_device_id of_default_bus_match_table[] = {
+ { .compatible = "simple-bus", },
+#ifdef CONFIG_ARM_AMBA
+ { .compatible = "arm,amba-bus", },
+#endif /* CONFIG_ARM_AMBA */
+ {} /* Empty terminated list */
+};
+
static int of_dev_node_match(struct device *dev, void *data)
{
return dev->of_node == data;
@@ -168,17 +177,20 @@ struct platform_device *of_device_alloc(struct device_node *np,
EXPORT_SYMBOL(of_device_alloc);
/**
- * of_platform_device_create - Alloc, initialize and register an of_device
+ * of_platform_device_create_pdata - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
* @bus_id: name to assign device
+ * @platform_data: pointer to populate platform_data pointer with
* @parent: Linux device model parent device.
*
* Returns pointer to created platform device, or NULL if a device was not
* registered. Unavailable devices will not get registered.
*/
-struct platform_device *of_platform_device_create(struct device_node *np,
- const char *bus_id,
- struct device *parent)
+struct platform_device *of_platform_device_create_pdata(
+ struct device_node *np,
+ const char *bus_id,
+ void *platform_data,
+ struct device *parent)
{
struct platform_device *dev;
@@ -194,6 +206,7 @@ struct platform_device *of_platform_device_create(struct device_node *np,
#endif
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
dev->dev.bus = &platform_bus_type;
+ dev->dev.platform_data = platform_data;
/* We do not fill the DMA ops for platform devices by default.
* This is currently the responsibility of the platform code
@@ -207,8 +220,111 @@ struct platform_device *of_platform_device_create(struct device_node *np,
return dev;
}
+
+/**
+ * of_platform_device_create - Alloc, initialize and register an of_device
+ * @np: pointer to node to create device for
+ * @bus_id: name to assign device
+ * @parent: Linux device model parent device.
+ *
+ * Returns pointer to created platform device, or NULL if a device was not
+ * registered. Unavailable devices will not get registered.
+ */
+struct platform_device *of_platform_device_create(struct device_node *np,
+ const char *bus_id,
+ struct device *parent)
+{
+ return of_platform_device_create_pdata(np, bus_id, NULL, parent);
+}
EXPORT_SYMBOL(of_platform_device_create);
+#ifdef CONFIG_ARM_AMBA
+static struct amba_device *of_amba_device_create(struct device_node *node,
+ const char *bus_id,
+ void *platform_data,
+ struct device *parent)
+{
+ struct amba_device *dev;
+ const void *prop;
+ int i, ret;
+
+ pr_debug("Creating amba device %s\n", node->full_name);
+
+ if (!of_device_is_available(node))
+ return NULL;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ /* setup generic device info */
+ dev->dev.coherent_dma_mask = ~0;
+ dev->dev.of_node = of_node_get(node);
+ dev->dev.parent = parent;
+ dev->dev.platform_data = platform_data;
+ if (bus_id)
+ dev_set_name(&dev->dev, "%s", bus_id);
+ else
+ of_device_make_bus_id(&dev->dev);
+
+ /* setup amba-specific device info */
+ dev->dma_mask = ~0;
+
+ /* Allow the HW Peripheral ID to be overridden */
+ prop = of_get_property(node, "arm,primecell-periphid", NULL);
+ if (prop)
+ dev->periphid = of_read_ulong(prop, 1);
+
+ /* Decode the IRQs and address ranges */
+ for (i = 0; i < AMBA_NR_IRQS; i++)
+ dev->irq[i] = irq_of_parse_and_map(node, i);
+
+ ret = of_address_to_resource(node, 0, &dev->res);
+ if (ret)
+ goto err_free;
+
+ ret = amba_device_register(dev, &iomem_resource);
+ if (ret)
+ goto err_free;
+
+ return dev;
+
+err_free:
+ kfree(dev);
+ return NULL;
+}
+#else /* CONFIG_ARM_AMBA */
+static struct amba_device *of_amba_device_create(struct device_node *node,
+ const char *bus_id,
+ void *platform_data,
+ struct device *parent)
+{
+ return NULL;
+}
+#endif /* CONFIG_ARM_AMBA */
+
+/**
+ * of_devname_lookup() - Given a device node, lookup the preferred Linux name
+ */
+static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *lookup,
+ struct device_node *np)
+{
+ struct resource res;
+ if (lookup) {
+ for(; lookup->name != NULL; lookup++) {
+ if (!of_device_is_compatible(np, lookup->compatible))
+ continue;
+ if (of_address_to_resource(np, 0, &res))
+ continue;
+ if (res.start != lookup->phys_addr)
+ continue;
+ pr_debug("%s: devname=%s\n", np->full_name, lookup->name);
+ return lookup;
+ }
+ }
+ return NULL;
+}
+
/**
* of_platform_bus_create() - Create a device for a node and its children.
* @bus: device node of the bus to instantiate
@@ -221,19 +337,41 @@ EXPORT_SYMBOL(of_platform_device_create);
*/
static int of_platform_bus_create(struct device_node *bus,
const struct of_device_id *matches,
- struct device *parent)
+ const struct of_dev_auxdata *lookup,
+ struct device *parent, bool strict)
{
+ const struct of_dev_auxdata *auxdata;
struct device_node *child;
struct platform_device *dev;
+ const char *bus_id = NULL;
+ void *platform_data = NULL;
int rc = 0;
- dev = of_platform_device_create(bus, NULL, parent);
+ /* Make sure it has a compatible property */
+ if (strict && (!of_get_property(bus, "compatible", NULL))) {
+ pr_debug("%s() - skipping %s, no compatible prop\n",
+ __func__, bus->full_name);
+ return 0;
+ }
+
+ auxdata = of_dev_lookup(lookup, bus);
+ if (auxdata) {
+ bus_id = auxdata->name;
+ platform_data = auxdata->platform_data;
+ }
+
+ if (of_device_is_compatible(bus, "arm,primecell")) {
+ of_amba_device_create(bus, bus_id, platform_data, parent);
+ return 0;
+ }
+
+ dev = of_platform_device_create_pdata(bus, bus_id, platform_data, parent);
if (!dev || !of_match_node(matches, bus))
return 0;
for_each_child_of_node(bus, child) {
pr_debug(" create child: %s\n", child->full_name);
- rc = of_platform_bus_create(child, matches, &dev->dev);
+ rc = of_platform_bus_create(child, matches, lookup, &dev->dev, strict);
if (rc) {
of_node_put(child);
break;
@@ -267,11 +405,11 @@ int of_platform_bus_probe(struct device_node *root,
/* Do a self check of bus type, if there's a match, create children */
if (of_match_node(matches, root)) {
- rc = of_platform_bus_create(root, matches, parent);
+ rc = of_platform_bus_create(root, matches, NULL, parent, false);
} else for_each_child_of_node(root, child) {
if (!of_match_node(matches, child))
continue;
- rc = of_platform_bus_create(child, matches, parent);
+ rc = of_platform_bus_create(child, matches, NULL, parent, false);
if (rc)
break;
}
@@ -280,4 +418,44 @@ int of_platform_bus_probe(struct device_node *root,
return rc;
}
EXPORT_SYMBOL(of_platform_bus_probe);
+
+/**
+ * of_platform_populate() - Populate platform_devices from device tree data
+ * @root: parent of the first level to probe or NULL for the root of the tree
+ * @matches: match table, NULL to use the default
+ * @parent: parent to hook devices from, NULL for toplevel
+ *
+ * Similar to of_platform_bus_probe(), this function walks the device tree
+ * and creates devices from nodes. It differs in that it follows the modern
+ * convention of requiring all device nodes to have a 'compatible' property,
+ * and it is suitable for creating devices which are children of the root
+ * node (of_platform_bus_probe will only create children of the root which
+ * are selected by the @matches argument).
+ *
+ * New board support should be using this function instead of
+ * of_platform_bus_probe().
+ *
+ * Returns 0 on success, < 0 on failure.
+ */
+int of_platform_populate(struct device_node *root,
+ const struct of_device_id *matches,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent)
+{
+ struct device_node *child;
+ int rc = 0;
+
+ root = root ? of_node_get(root) : of_find_node_by_path("/");
+ if (!root)
+ return -EINVAL;
+
+ for_each_child_of_node(root, child) {
+ rc = of_platform_bus_create(child, matches, lookup, parent, true);
+ if (rc)
+ break;
+ }
+
+ of_node_put(root);
+ return rc;
+}
#endif /* !CONFIG_SPARC */
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 094308e41be..631f7302760 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -71,4 +71,6 @@ obj-$(CONFIG_PCI_STUB) += pci-stub.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
+obj-$(CONFIG_OF) += of.o
+
ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index 083034710fa..1d002b1c2bf 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -158,7 +158,7 @@ static void dlpar_pci_add_bus(struct device_node *dn)
/* Scan below the new bridge */
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
- of_scan_pci_bridge(dn, dev);
+ of_scan_pci_bridge(dev);
/* Map IO space for child bus, which may or may not succeed */
pcibios_map_io_space(dev->subordinate);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
new file mode 100644
index 00000000000..c94d37ec55c
--- /dev/null
+++ b/drivers/pci/of.c
@@ -0,0 +1,61 @@
+/*
+ * PCI <-> OF mapping helpers
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include "pci.h"
+
+void pci_set_of_node(struct pci_dev *dev)
+{
+ if (!dev->bus->dev.of_node)
+ return;
+ dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
+ dev->devfn);
+}
+
+void pci_release_of_node(struct pci_dev *dev)
+{
+ of_node_put(dev->dev.of_node);
+ dev->dev.of_node = NULL;
+}
+
+void pci_set_bus_of_node(struct pci_bus *bus)
+{
+ if (bus->self == NULL)
+ bus->dev.of_node = pcibios_get_phb_of_node(bus);
+ else
+ bus->dev.of_node = of_node_get(bus->self->dev.of_node);
+}
+
+void pci_release_bus_of_node(struct pci_bus *bus)
+{
+ of_node_put(bus->dev.of_node);
+ bus->dev.of_node = NULL;
+}
+
+struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
+{
+ /* This should only be called for PHBs */
+ if (WARN_ON(bus->self || bus->parent))
+ return NULL;
+
+ /* Look for a node pointer in either the intermediary device we
+ * create above the root bus or it's own parent. Normally only
+ * the later is populated.
+ */
+ if (bus->bridge->of_node)
+ return of_node_get(bus->bridge->of_node);
+ if (bus->bridge->parent->of_node)
+ return of_node_get(bus->bridge->parent->of_node);
+ return NULL;
+}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 46767c53917..12d1e81a8ab 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/cpu.h>
#include <linux/pm_runtime.h>
+#include <linux/suspend.h>
#include "pci.h"
struct pci_dynid {
@@ -616,6 +617,21 @@ static int pci_pm_prepare(struct device *dev)
int error = 0;
/*
+ * If a PCI device configured to wake up the system from sleep states
+ * has been suspended at run time and there's a resume request pending
+ * for it, this is equivalent to the device signaling wakeup, so the
+ * system suspend operation should be aborted.
+ */
+ pm_runtime_get_noresume(dev);
+ if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
+ pm_wakeup_event(dev, 0);
+
+ if (pm_wakeup_pending()) {
+ pm_runtime_put_sync(dev);
+ return -EBUSY;
+ }
+
+ /*
* PCI devices suspended at run time need to be resumed at this
* point, because in general it is necessary to reconfigure them for
* system suspend. Namely, if the device is supposed to wake up the
@@ -624,7 +640,7 @@ static int pci_pm_prepare(struct device *dev)
* system from the sleep state, we'll have to prevent it from signaling
* wake-up.
*/
- pm_runtime_get_sync(dev);
+ pm_runtime_resume(dev);
if (drv && drv->pm && drv->pm->prepare)
error = drv->pm->prepare(dev);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index bafb3c3d4a8..9ab492f21f8 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -52,6 +52,7 @@ static void release_pcibus_dev(struct device *dev)
if (pci_bus->bridge)
put_device(pci_bus->bridge);
pci_bus_remove_resources(pci_bus);
+ pci_release_bus_of_node(pci_bus);
kfree(pci_bus);
}
@@ -588,7 +589,7 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
child->self = bridge;
child->bridge = get_device(&bridge->dev);
-
+ pci_set_bus_of_node(child);
pci_set_bus_speed(child);
/* Set up default resource pointers and names.. */
@@ -1038,6 +1039,7 @@ static void pci_release_dev(struct device *dev)
pci_dev = to_pci_dev(dev);
pci_release_capabilities(pci_dev);
+ pci_release_of_node(pci_dev);
kfree(pci_dev);
}
@@ -1157,6 +1159,8 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
dev->vendor = l & 0xffff;
dev->device = (l >> 16) & 0xffff;
+ pci_set_of_node(dev);
+
if (pci_setup_device(dev)) {
kfree(dev);
return NULL;
@@ -1409,6 +1413,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
goto dev_reg_err;
b->bridge = get_device(dev);
device_enable_async_suspend(b->bridge);
+ pci_set_bus_of_node(b);
if (!parent)
set_dev_node(b->bridge, pcibus_to_node(b));
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 02145e9697a..1196f61a4ab 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2758,6 +2758,29 @@ static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
+
+ /*
+ * RICOH 0xe823 SD/MMC card reader fails to recognize
+ * certain types of SD/MMC cards. Lowering the SD base
+ * clock frequency from 200Mhz to 50Mhz fixes this issue.
+ *
+ * 0x150 - SD2.0 mode enable for changing base clock
+ * frequency to 50Mhz
+ * 0xe1 - Base clock frequency
+ * 0x32 - 50Mhz new clock frequency
+ * 0xf9 - Key register for 0x150
+ * 0xfc - key register for 0xe1
+ */
+ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
+ pci_write_config_byte(dev, 0xf9, 0xfc);
+ pci_write_config_byte(dev, 0x150, 0x10);
+ pci_write_config_byte(dev, 0xf9, 0x00);
+ pci_write_config_byte(dev, 0xfc, 0x01);
+ pci_write_config_byte(dev, 0xe1, 0x32);
+ pci_write_config_byte(dev, 0xfc, 0x00);
+
+ dev_notice(&dev->dev, "MMC controller base frequency changed to 50Mhz.\n");
+ }
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 12ef9121d4f..11312f401c7 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -258,13 +258,13 @@ static int vmwdt_suspend(void)
if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
pr_err("The system cannot be suspended while the watchdog"
" is in use\n");
- return NOTIFY_BAD;
+ return notifier_from_errno(-EBUSY);
}
if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
clear_bit(VMWDT_OPEN, &vmwdt_is_open);
pr_err("The system cannot be suspended while the watchdog"
" is running\n");
- return NOTIFY_BAD;
+ return notifier_from_errno(-EBUSY);
}
return NOTIFY_DONE;
}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index c47b25fd3f4..92d7324acb1 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -814,8 +814,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
mutex_unlock(&css->mutex);
continue;
}
- if (__chsc_do_secm(css, 0))
- ret = NOTIFY_BAD;
+ ret = __chsc_do_secm(css, 0);
+ ret = notifier_from_errno(ret);
mutex_unlock(&css->mutex);
}
break;
@@ -831,8 +831,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
mutex_unlock(&css->mutex);
continue;
}
- if (__chsc_do_secm(css, 1))
- ret = NOTIFY_BAD;
+ ret = __chsc_do_secm(css, 1);
+ ret = notifier_from_errno(ret);
mutex_unlock(&css->mutex);
}
/* search for subchannels, which appeared during hibernation */
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d70e91ae60a..d82a023a901 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -144,9 +144,9 @@ int scsi_autopm_get_device(struct scsi_device *sdev)
int err;
err = pm_runtime_get_sync(&sdev->sdev_gendev);
- if (err < 0)
+ if (err < 0 && err !=-EACCES)
pm_runtime_put_sync(&sdev->sdev_gendev);
- else if (err > 0)
+ else
err = 0;
return err;
}
@@ -173,9 +173,9 @@ int scsi_autopm_get_host(struct Scsi_Host *shost)
int err;
err = pm_runtime_get_sync(&shost->shost_gendev);
- if (err < 0)
+ if (err < 0 && err !=-EACCES)
pm_runtime_put_sync(&shost->shost_gendev);
- else if (err > 0)
+ else
err = 0;
return err;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index de35c3ad8a6..52e2900d9d8 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -86,9 +86,6 @@ config SPI_BFIN_SPORT
help
Enable support for a SPI bus via the Blackfin SPORT peripheral.
- This driver can also be built as a module. If so, the module
- will be called spi_bfin_sport.
-
config SPI_AU1550
tristate "Au1550/Au12x0 SPI Controller"
depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL
@@ -97,9 +94,6 @@ config SPI_AU1550
If you say yes to this option, support will be included for the
Au1550 SPI controller (may also work with Au1200,Au1210,Au1250).
- This driver can also be built as a module. If so, the module
- will be called au1550_spi.
-
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
help
@@ -130,9 +124,6 @@ config SPI_COLDFIRE_QSPI
This enables support for the Coldfire QSPI controller in master
mode.
- This driver can also be built as a module. If so, the module
- will be called coldfire_qspi.
-
config SPI_DAVINCI
tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
depends on SPI_MASTER && ARCH_DAVINCI
@@ -140,9 +131,6 @@ config SPI_DAVINCI
help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
- This driver can also be built as a module. The module will be called
- davinci_spi.
-
config SPI_EP93XX
tristate "Cirrus Logic EP93xx SPI controller"
depends on ARCH_EP93XX
@@ -150,9 +138,6 @@ config SPI_EP93XX
This enables using the Cirrus EP93xx SPI controller in master
mode.
- To compile this driver as a module, choose M here. The module will be
- called ep93xx_spi.
-
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GENERIC_GPIO
@@ -169,21 +154,6 @@ config SPI_GPIO
GPIO operations, you should be able to leverage that for better
speed with a custom version of this driver; see the source code.
-config SPI_IMX_VER_IMX1
- def_bool y if SOC_IMX1
-
-config SPI_IMX_VER_0_0
- def_bool y if SOC_IMX21 || SOC_IMX27
-
-config SPI_IMX_VER_0_4
- def_bool y if SOC_IMX31
-
-config SPI_IMX_VER_0_7
- def_bool y if ARCH_MX25 || SOC_IMX35 || SOC_IMX51 || SOC_IMX53
-
-config SPI_IMX_VER_2_3
- def_bool y if SOC_IMX51 || SOC_IMX53
-
config SPI_IMX
tristate "Freescale i.MX SPI controllers"
depends on ARCH_MXC
@@ -328,16 +298,6 @@ config SPI_S3C24XX_FIQ
no free DMA channels, or when doing transfers that required both
TX and RX data paths.
-config SPI_S3C24XX_GPIO
- tristate "Samsung S3C24XX series SPI by GPIO"
- depends on ARCH_S3C2410 && EXPERIMENTAL
- select SPI_BITBANG
- help
- SPI driver for Samsung S3C24XX series ARM SoCs using
- GPIO lines to provide the SPI bus. This can be used where
- the inbuilt hardware cannot provide the transfer mode, or
- where the board is using non hardware connected pins.
-
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
depends on (ARCH_S3C64XX || ARCH_S5P64X0)
@@ -385,16 +345,16 @@ config SPI_TI_SSP
This selects an SPI master implementation using a TI sequencer
serial port.
- To compile this driver as a module, choose M here: the
- module will be called ti-ssp-spi.
-
config SPI_TOPCLIFF_PCH
- tristate "Topcliff PCH SPI Controller"
+ tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI controller"
depends on PCI
help
SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
used in some x86 embedded processors.
+ This driver also supports the ML7213, a companion chip for the
+ Atom E6xx series and compatible with the Intel EG20T PCH.
+
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
depends on GENERIC_GPIO && CPU_TX49XX
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 0f8c69b6b19..61c3261c388 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -7,68 +7,55 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
# small core, mostly translating board-specific
# config declarations into driver model code
obj-$(CONFIG_SPI_MASTER) += spi.o
+obj-$(CONFIG_SPI_SPIDEV) += spidev.o
# SPI master controller drivers (bus)
-obj-$(CONFIG_SPI_ALTERA) += spi_altera.o
-obj-$(CONFIG_SPI_ATMEL) += atmel_spi.o
-obj-$(CONFIG_SPI_ATH79) += ath79_spi.o
-obj-$(CONFIG_SPI_BFIN) += spi_bfin5xx.o
-obj-$(CONFIG_SPI_BFIN_SPORT) += spi_bfin_sport.o
-obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
-obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
-obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
-obj-$(CONFIG_SPI_COLDFIRE_QSPI) += coldfire_qspi.o
-obj-$(CONFIG_SPI_DAVINCI) += davinci_spi.o
-obj-$(CONFIG_SPI_DESIGNWARE) += dw_spi.o
-obj-$(CONFIG_SPI_DW_PCI) += dw_spi_midpci.o
-dw_spi_midpci-objs := dw_spi_pci.o dw_spi_mid.o
-obj-$(CONFIG_SPI_DW_MMIO) += dw_spi_mmio.o
-obj-$(CONFIG_SPI_EP93XX) += ep93xx_spi.o
-obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
-obj-$(CONFIG_SPI_IMX) += spi_imx.o
-obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
-obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
-obj-$(CONFIG_SPI_PXA2XX_PCI) += pxa2xx_spi_pci.o
-obj-$(CONFIG_SPI_OC_TINY) += spi_oc_tiny.o
-obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
-obj-$(CONFIG_SPI_OMAP24XX) += omap2_mcspi.o
-obj-$(CONFIG_SPI_OMAP_100K) += omap_spi_100k.o
-obj-$(CONFIG_SPI_ORION) += orion_spi.o
-obj-$(CONFIG_SPI_PL022) += amba-pl022.o
-obj-$(CONFIG_SPI_MPC512x_PSC) += mpc512x_psc_spi.o
-obj-$(CONFIG_SPI_MPC52xx_PSC) += mpc52xx_psc_spi.o
-obj-$(CONFIG_SPI_MPC52xx) += mpc52xx_spi.o
-obj-$(CONFIG_SPI_FSL_LIB) += spi_fsl_lib.o
-obj-$(CONFIG_SPI_FSL_ESPI) += spi_fsl_espi.o
-obj-$(CONFIG_SPI_FSL_SPI) += spi_fsl_spi.o
-obj-$(CONFIG_SPI_PPC4xx) += spi_ppc4xx.o
-obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o
-obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx_hw.o
-obj-$(CONFIG_SPI_S3C64XX) += spi_s3c64xx.o
-obj-$(CONFIG_SPI_TEGRA) += spi_tegra.o
-obj-$(CONFIG_SPI_TI_SSP) += ti-ssp-spi.o
-obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi_topcliff_pch.o
-obj-$(CONFIG_SPI_TXX9) += spi_txx9.o
-obj-$(CONFIG_SPI_XILINX) += xilinx_spi.o
-obj-$(CONFIG_SPI_SH) += spi_sh.o
-obj-$(CONFIG_SPI_SH_SCI) += spi_sh_sci.o
-obj-$(CONFIG_SPI_SH_MSIOF) += spi_sh_msiof.o
-obj-$(CONFIG_SPI_STMP3XXX) += spi_stmp.o
-obj-$(CONFIG_SPI_NUC900) += spi_nuc900.o
+obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
+obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
+obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
+obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
+obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o
+obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
+obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
+obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
+obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
+obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
+obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
+obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
+obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
+spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
+obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
+obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
+obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
+obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
+obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
+obj-$(CONFIG_SPI_IMX) += spi-imx.o
+obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
+obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
+obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
+obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
+obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
+obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
+obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
+obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
+obj-$(CONFIG_SPI_ORION) += spi-orion.o
+obj-$(CONFIG_SPI_PL022) += spi-pl022.o
+obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
+obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx.o
+obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
+spi-s3c24xx-hw-y := spi-s3c24xx.o
+spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o
+obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o
+obj-$(CONFIG_SPI_SH) += spi-sh.o
+obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
+obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
+obj-$(CONFIG_SPI_STMP3XXX) += spi-stmp.o
+obj-$(CONFIG_SPI_TEGRA) += spi-tegra.o
+obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o
+obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
+obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
+obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
+obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
-# special build for s3c24xx spi driver with fiq support
-spi_s3c24xx_hw-y := spi_s3c24xx.o
-spi_s3c24xx_hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi_s3c24xx_fiq.o
-
-# ... add above this line ...
-
-# SPI protocol drivers (device/link on bus)
-obj-$(CONFIG_SPI_SPIDEV) += spidev.o
-obj-$(CONFIG_SPI_TLE62X0) += tle62x0.o
-# ... add above this line ...
-
-# SPI slave controller drivers (upstream link)
-# ... add above this line ...
-
-# SPI slave drivers (protocol for that link)
-# ... add above this line ...
diff --git a/drivers/spi/atmel_spi.h b/drivers/spi/atmel_spi.h
deleted file mode 100644
index 6e06b6ad3a4..00000000000
--- a/drivers/spi/atmel_spi.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Register definitions for Atmel Serial Peripheral Interface (SPI)
- *
- * Copyright (C) 2006 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __ATMEL_SPI_H__
-#define __ATMEL_SPI_H__
-
-/* SPI register offsets */
-#define SPI_CR 0x0000
-#define SPI_MR 0x0004
-#define SPI_RDR 0x0008
-#define SPI_TDR 0x000c
-#define SPI_SR 0x0010
-#define SPI_IER 0x0014
-#define SPI_IDR 0x0018
-#define SPI_IMR 0x001c
-#define SPI_CSR0 0x0030
-#define SPI_CSR1 0x0034
-#define SPI_CSR2 0x0038
-#define SPI_CSR3 0x003c
-#define SPI_RPR 0x0100
-#define SPI_RCR 0x0104
-#define SPI_TPR 0x0108
-#define SPI_TCR 0x010c
-#define SPI_RNPR 0x0110
-#define SPI_RNCR 0x0114
-#define SPI_TNPR 0x0118
-#define SPI_TNCR 0x011c
-#define SPI_PTCR 0x0120
-#define SPI_PTSR 0x0124
-
-/* Bitfields in CR */
-#define SPI_SPIEN_OFFSET 0
-#define SPI_SPIEN_SIZE 1
-#define SPI_SPIDIS_OFFSET 1
-#define SPI_SPIDIS_SIZE 1
-#define SPI_SWRST_OFFSET 7
-#define SPI_SWRST_SIZE 1
-#define SPI_LASTXFER_OFFSET 24
-#define SPI_LASTXFER_SIZE 1
-
-/* Bitfields in MR */
-#define SPI_MSTR_OFFSET 0
-#define SPI_MSTR_SIZE 1
-#define SPI_PS_OFFSET 1
-#define SPI_PS_SIZE 1
-#define SPI_PCSDEC_OFFSET 2
-#define SPI_PCSDEC_SIZE 1
-#define SPI_FDIV_OFFSET 3
-#define SPI_FDIV_SIZE 1
-#define SPI_MODFDIS_OFFSET 4
-#define SPI_MODFDIS_SIZE 1
-#define SPI_LLB_OFFSET 7
-#define SPI_LLB_SIZE 1
-#define SPI_PCS_OFFSET 16
-#define SPI_PCS_SIZE 4
-#define SPI_DLYBCS_OFFSET 24
-#define SPI_DLYBCS_SIZE 8
-
-/* Bitfields in RDR */
-#define SPI_RD_OFFSET 0
-#define SPI_RD_SIZE 16
-
-/* Bitfields in TDR */
-#define SPI_TD_OFFSET 0
-#define SPI_TD_SIZE 16
-
-/* Bitfields in SR */
-#define SPI_RDRF_OFFSET 0
-#define SPI_RDRF_SIZE 1
-#define SPI_TDRE_OFFSET 1
-#define SPI_TDRE_SIZE 1
-#define SPI_MODF_OFFSET 2
-#define SPI_MODF_SIZE 1
-#define SPI_OVRES_OFFSET 3
-#define SPI_OVRES_SIZE 1
-#define SPI_ENDRX_OFFSET 4
-#define SPI_ENDRX_SIZE 1
-#define SPI_ENDTX_OFFSET 5
-#define SPI_ENDTX_SIZE 1
-#define SPI_RXBUFF_OFFSET 6
-#define SPI_RXBUFF_SIZE 1
-#define SPI_TXBUFE_OFFSET 7
-#define SPI_TXBUFE_SIZE 1
-#define SPI_NSSR_OFFSET 8
-#define SPI_NSSR_SIZE 1
-#define SPI_TXEMPTY_OFFSET 9
-#define SPI_TXEMPTY_SIZE 1
-#define SPI_SPIENS_OFFSET 16
-#define SPI_SPIENS_SIZE 1
-
-/* Bitfields in CSR0 */
-#define SPI_CPOL_OFFSET 0
-#define SPI_CPOL_SIZE 1
-#define SPI_NCPHA_OFFSET 1
-#define SPI_NCPHA_SIZE 1
-#define SPI_CSAAT_OFFSET 3
-#define SPI_CSAAT_SIZE 1
-#define SPI_BITS_OFFSET 4
-#define SPI_BITS_SIZE 4
-#define SPI_SCBR_OFFSET 8
-#define SPI_SCBR_SIZE 8
-#define SPI_DLYBS_OFFSET 16
-#define SPI_DLYBS_SIZE 8
-#define SPI_DLYBCT_OFFSET 24
-#define SPI_DLYBCT_SIZE 8
-
-/* Bitfields in RCR */
-#define SPI_RXCTR_OFFSET 0
-#define SPI_RXCTR_SIZE 16
-
-/* Bitfields in TCR */
-#define SPI_TXCTR_OFFSET 0
-#define SPI_TXCTR_SIZE 16
-
-/* Bitfields in RNCR */
-#define SPI_RXNCR_OFFSET 0
-#define SPI_RXNCR_SIZE 16
-
-/* Bitfields in TNCR */
-#define SPI_TXNCR_OFFSET 0
-#define SPI_TXNCR_SIZE 16
-
-/* Bitfields in PTCR */
-#define SPI_RXTEN_OFFSET 0
-#define SPI_RXTEN_SIZE 1
-#define SPI_RXTDIS_OFFSET 1
-#define SPI_RXTDIS_SIZE 1
-#define SPI_TXTEN_OFFSET 8
-#define SPI_TXTEN_SIZE 1
-#define SPI_TXTDIS_OFFSET 9
-#define SPI_TXTDIS_SIZE 1
-
-/* Constants for BITS */
-#define SPI_BITS_8_BPT 0
-#define SPI_BITS_9_BPT 1
-#define SPI_BITS_10_BPT 2
-#define SPI_BITS_11_BPT 3
-#define SPI_BITS_12_BPT 4
-#define SPI_BITS_13_BPT 5
-#define SPI_BITS_14_BPT 6
-#define SPI_BITS_15_BPT 7
-#define SPI_BITS_16_BPT 8
-
-/* Bit manipulation macros */
-#define SPI_BIT(name) \
- (1 << SPI_##name##_OFFSET)
-#define SPI_BF(name,value) \
- (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
-#define SPI_BFEXT(name,value) \
- (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
-#define SPI_BFINS(name,value,old) \
- ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
- | SPI_BF(name,value))
-
-/* Register access macros */
-#define spi_readl(port,reg) \
- __raw_readl((port)->regs + SPI_##reg)
-#define spi_writel(port,reg,value) \
- __raw_writel((value), (port)->regs + SPI_##reg)
-
-#endif /* __ATMEL_SPI_H__ */
diff --git a/drivers/spi/spi_altera.c b/drivers/spi/spi-altera.c
index 4813a63ce6f..4813a63ce6f 100644
--- a/drivers/spi/spi_altera.c
+++ b/drivers/spi/spi-altera.c
diff --git a/drivers/spi/ath79_spi.c b/drivers/spi/spi-ath79.c
index fcff810ea3b..03019bf5a5e 100644
--- a/drivers/spi/ath79_spi.c
+++ b/drivers/spi/spi-ath79.c
@@ -232,7 +232,7 @@ static __devinit int ath79_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
- sp->base = ioremap(r->start, r->end - r->start + 1);
+ sp->base = ioremap(r->start, resource_size(r));
if (!sp->base) {
ret = -ENXIO;
goto err_put_master;
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/spi-atmel.c
index 08711e9202a..82dee9a6c0d 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/spi-atmel.c
@@ -25,7 +25,160 @@
#include <mach/gpio.h>
#include <mach/cpu.h>
-#include "atmel_spi.h"
+/* SPI register offsets */
+#define SPI_CR 0x0000
+#define SPI_MR 0x0004
+#define SPI_RDR 0x0008
+#define SPI_TDR 0x000c
+#define SPI_SR 0x0010
+#define SPI_IER 0x0014
+#define SPI_IDR 0x0018
+#define SPI_IMR 0x001c
+#define SPI_CSR0 0x0030
+#define SPI_CSR1 0x0034
+#define SPI_CSR2 0x0038
+#define SPI_CSR3 0x003c
+#define SPI_RPR 0x0100
+#define SPI_RCR 0x0104
+#define SPI_TPR 0x0108
+#define SPI_TCR 0x010c
+#define SPI_RNPR 0x0110
+#define SPI_RNCR 0x0114
+#define SPI_TNPR 0x0118
+#define SPI_TNCR 0x011c
+#define SPI_PTCR 0x0120
+#define SPI_PTSR 0x0124
+
+/* Bitfields in CR */
+#define SPI_SPIEN_OFFSET 0
+#define SPI_SPIEN_SIZE 1
+#define SPI_SPIDIS_OFFSET 1
+#define SPI_SPIDIS_SIZE 1
+#define SPI_SWRST_OFFSET 7
+#define SPI_SWRST_SIZE 1
+#define SPI_LASTXFER_OFFSET 24
+#define SPI_LASTXFER_SIZE 1
+
+/* Bitfields in MR */
+#define SPI_MSTR_OFFSET 0
+#define SPI_MSTR_SIZE 1
+#define SPI_PS_OFFSET 1
+#define SPI_PS_SIZE 1
+#define SPI_PCSDEC_OFFSET 2
+#define SPI_PCSDEC_SIZE 1
+#define SPI_FDIV_OFFSET 3
+#define SPI_FDIV_SIZE 1
+#define SPI_MODFDIS_OFFSET 4
+#define SPI_MODFDIS_SIZE 1
+#define SPI_LLB_OFFSET 7
+#define SPI_LLB_SIZE 1
+#define SPI_PCS_OFFSET 16
+#define SPI_PCS_SIZE 4
+#define SPI_DLYBCS_OFFSET 24
+#define SPI_DLYBCS_SIZE 8
+
+/* Bitfields in RDR */
+#define SPI_RD_OFFSET 0
+#define SPI_RD_SIZE 16
+
+/* Bitfields in TDR */
+#define SPI_TD_OFFSET 0
+#define SPI_TD_SIZE 16
+
+/* Bitfields in SR */
+#define SPI_RDRF_OFFSET 0
+#define SPI_RDRF_SIZE 1
+#define SPI_TDRE_OFFSET 1
+#define SPI_TDRE_SIZE 1
+#define SPI_MODF_OFFSET 2
+#define SPI_MODF_SIZE 1
+#define SPI_OVRES_OFFSET 3
+#define SPI_OVRES_SIZE 1
+#define SPI_ENDRX_OFFSET 4
+#define SPI_ENDRX_SIZE 1
+#define SPI_ENDTX_OFFSET 5
+#define SPI_ENDTX_SIZE 1
+#define SPI_RXBUFF_OFFSET 6
+#define SPI_RXBUFF_SIZE 1
+#define SPI_TXBUFE_OFFSET 7
+#define SPI_TXBUFE_SIZE 1
+#define SPI_NSSR_OFFSET 8
+#define SPI_NSSR_SIZE 1
+#define SPI_TXEMPTY_OFFSET 9
+#define SPI_TXEMPTY_SIZE 1
+#define SPI_SPIENS_OFFSET 16
+#define SPI_SPIENS_SIZE 1
+
+/* Bitfields in CSR0 */
+#define SPI_CPOL_OFFSET 0
+#define SPI_CPOL_SIZE 1
+#define SPI_NCPHA_OFFSET 1
+#define SPI_NCPHA_SIZE 1
+#define SPI_CSAAT_OFFSET 3
+#define SPI_CSAAT_SIZE 1
+#define SPI_BITS_OFFSET 4
+#define SPI_BITS_SIZE 4
+#define SPI_SCBR_OFFSET 8
+#define SPI_SCBR_SIZE 8
+#define SPI_DLYBS_OFFSET 16
+#define SPI_DLYBS_SIZE 8
+#define SPI_DLYBCT_OFFSET 24
+#define SPI_DLYBCT_SIZE 8
+
+/* Bitfields in RCR */
+#define SPI_RXCTR_OFFSET 0
+#define SPI_RXCTR_SIZE 16
+
+/* Bitfields in TCR */
+#define SPI_TXCTR_OFFSET 0
+#define SPI_TXCTR_SIZE 16
+
+/* Bitfields in RNCR */
+#define SPI_RXNCR_OFFSET 0
+#define SPI_RXNCR_SIZE 16
+
+/* Bitfields in TNCR */
+#define SPI_TXNCR_OFFSET 0
+#define SPI_TXNCR_SIZE 16
+
+/* Bitfields in PTCR */
+#define SPI_RXTEN_OFFSET 0
+#define SPI_RXTEN_SIZE 1
+#define SPI_RXTDIS_OFFSET 1
+#define SPI_RXTDIS_SIZE 1
+#define SPI_TXTEN_OFFSET 8
+#define SPI_TXTEN_SIZE 1
+#define SPI_TXTDIS_OFFSET 9
+#define SPI_TXTDIS_SIZE 1
+
+/* Constants for BITS */
+#define SPI_BITS_8_BPT 0
+#define SPI_BITS_9_BPT 1
+#define SPI_BITS_10_BPT 2
+#define SPI_BITS_11_BPT 3
+#define SPI_BITS_12_BPT 4
+#define SPI_BITS_13_BPT 5
+#define SPI_BITS_14_BPT 6
+#define SPI_BITS_15_BPT 7
+#define SPI_BITS_16_BPT 8
+
+/* Bit manipulation macros */
+#define SPI_BIT(name) \
+ (1 << SPI_##name##_OFFSET)
+#define SPI_BF(name,value) \
+ (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
+#define SPI_BFEXT(name,value) \
+ (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
+#define SPI_BFINS(name,value,old) \
+ ( ((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
+ | SPI_BF(name,value))
+
+/* Register access macros */
+#define spi_readl(port,reg) \
+ __raw_readl((port)->regs + SPI_##reg)
+#define spi_writel(port,reg,value) \
+ __raw_writel((value), (port)->regs + SPI_##reg)
+
/*
* The core SPI transfer engine just talks to a register bank to set up
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/spi-au1550.c
index b50563d320e..bddee5f516b 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/spi-au1550.c
@@ -1,5 +1,5 @@
/*
- * au1550_spi.c - au1550 psc spi controller driver
+ * au1550 psc spi controller driver
* may work also with au1200, au1210, au1250
* will not work on au1000, au1100 and au1500 (no full spi controller there)
*
diff --git a/drivers/spi/spi_bfin_sport.c b/drivers/spi/spi-bfin-sport.c
index e557ff617b1..e557ff617b1 100644
--- a/drivers/spi/spi_bfin_sport.c
+++ b/drivers/spi/spi-bfin-sport.c
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index cc880c95e7d..b8d25f2b703 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -58,7 +58,7 @@ struct bfin_spi_master_data {
struct spi_master *master;
/* Regs base of SPI controller */
- void __iomem *regs_base;
+ struct bfin_spi_regs __iomem *regs;
/* Pin request list */
u16 *pin_req;
@@ -122,34 +122,14 @@ struct bfin_spi_slave_data {
const struct bfin_spi_transfer_ops *ops;
};
-#define DEFINE_SPI_REG(reg, off) \
-static inline u16 read_##reg(struct bfin_spi_master_data *drv_data) \
- { return bfin_read16(drv_data->regs_base + off); } \
-static inline void write_##reg(struct bfin_spi_master_data *drv_data, u16 v) \
- { bfin_write16(drv_data->regs_base + off, v); }
-
-DEFINE_SPI_REG(CTRL, 0x00)
-DEFINE_SPI_REG(FLAG, 0x04)
-DEFINE_SPI_REG(STAT, 0x08)
-DEFINE_SPI_REG(TDBR, 0x0C)
-DEFINE_SPI_REG(RDBR, 0x10)
-DEFINE_SPI_REG(BAUD, 0x14)
-DEFINE_SPI_REG(SHAW, 0x18)
-
static void bfin_spi_enable(struct bfin_spi_master_data *drv_data)
{
- u16 cr;
-
- cr = read_CTRL(drv_data);
- write_CTRL(drv_data, (cr | BIT_CTL_ENABLE));
+ bfin_write_or(&drv_data->regs->ctl, BIT_CTL_ENABLE);
}
static void bfin_spi_disable(struct bfin_spi_master_data *drv_data)
{
- u16 cr;
-
- cr = read_CTRL(drv_data);
- write_CTRL(drv_data, (cr & (~BIT_CTL_ENABLE)));
+ bfin_write_and(&drv_data->regs->ctl, ~BIT_CTL_ENABLE);
}
/* Caculate the SPI_BAUD register value based on input HZ */
@@ -172,10 +152,10 @@ static int bfin_spi_flush(struct bfin_spi_master_data *drv_data)
unsigned long limit = loops_per_jiffy << 1;
/* wait for stop and clear stat */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF) && --limit)
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF) && --limit)
cpu_relax();
- write_STAT(drv_data, BIT_STAT_CLR);
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
return limit;
}
@@ -183,29 +163,19 @@ static int bfin_spi_flush(struct bfin_spi_master_data *drv_data)
/* Chip select operation functions for cs_change flag */
static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip)
{
- if (likely(chip->chip_select_num < MAX_CTRL_CS)) {
- u16 flag = read_FLAG(drv_data);
-
- flag &= ~chip->flag;
-
- write_FLAG(drv_data, flag);
- } else {
+ if (likely(chip->chip_select_num < MAX_CTRL_CS))
+ bfin_write_and(&drv_data->regs->flg, ~chip->flag);
+ else
gpio_set_value(chip->cs_gpio, 0);
- }
}
static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data,
struct bfin_spi_slave_data *chip)
{
- if (likely(chip->chip_select_num < MAX_CTRL_CS)) {
- u16 flag = read_FLAG(drv_data);
-
- flag |= chip->flag;
-
- write_FLAG(drv_data, flag);
- } else {
+ if (likely(chip->chip_select_num < MAX_CTRL_CS))
+ bfin_write_or(&drv_data->regs->flg, chip->flag);
+ else
gpio_set_value(chip->cs_gpio, 1);
- }
/* Move delay here for consistency */
if (chip->cs_chg_udelay)
@@ -216,25 +186,15 @@ static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data,
static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data,
struct bfin_spi_slave_data *chip)
{
- if (chip->chip_select_num < MAX_CTRL_CS) {
- u16 flag = read_FLAG(drv_data);
-
- flag |= (chip->flag >> 8);
-
- write_FLAG(drv_data, flag);
- }
+ if (chip->chip_select_num < MAX_CTRL_CS)
+ bfin_write_or(&drv_data->regs->flg, chip->flag >> 8);
}
static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data,
struct bfin_spi_slave_data *chip)
{
- if (chip->chip_select_num < MAX_CTRL_CS) {
- u16 flag = read_FLAG(drv_data);
-
- flag &= ~(chip->flag >> 8);
-
- write_FLAG(drv_data, flag);
- }
+ if (chip->chip_select_num < MAX_CTRL_CS)
+ bfin_write_and(&drv_data->regs->flg, ~(chip->flag >> 8));
}
/* stop controller and re-config current chip*/
@@ -243,15 +203,15 @@ static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data)
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
/* Clear status and disable clock */
- write_STAT(drv_data, BIT_STAT_CLR);
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
bfin_spi_disable(drv_data);
dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
SSYNC();
/* Load the registers */
- write_CTRL(drv_data, chip->ctl_reg);
- write_BAUD(drv_data, chip->baud);
+ bfin_write(&drv_data->regs->ctl, chip->ctl_reg);
+ bfin_write(&drv_data->regs->baud, chip->baud);
bfin_spi_enable(drv_data);
bfin_spi_cs_active(drv_data, chip);
@@ -260,7 +220,7 @@ static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data)
/* used to kick off transfer in rx mode and read unwanted RX data */
static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data)
{
- (void) read_RDBR(drv_data);
+ (void) bfin_read(&drv_data->regs->rdbr);
}
static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data)
@@ -269,10 +229,10 @@ static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->tx < drv_data->tx_end) {
- write_TDBR(drv_data, (*(u8 *) (drv_data->tx++)));
+ bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++)));
/* wait until transfer finished.
checking SPIF or TXS may not guarantee transfer completion */
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
/* discard RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
@@ -287,10 +247,10 @@ static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
- write_TDBR(drv_data, tx_val);
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ bfin_write(&drv_data->regs->tdbr, tx_val);
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
- *(u8 *) (drv_data->rx++) = read_RDBR(drv_data);
+ *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr);
}
}
@@ -300,10 +260,10 @@ static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
- write_TDBR(drv_data, (*(u8 *) (drv_data->tx++)));
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++)));
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
- *(u8 *) (drv_data->rx++) = read_RDBR(drv_data);
+ *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr);
}
}
@@ -319,11 +279,11 @@ static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->tx < drv_data->tx_end) {
- write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
+ bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx)));
drv_data->tx += 2;
/* wait until transfer finished.
checking SPIF or TXS may not guarantee transfer completion */
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
/* discard RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
@@ -338,10 +298,10 @@ static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
- write_TDBR(drv_data, tx_val);
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ bfin_write(&drv_data->regs->tdbr, tx_val);
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
- *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
+ *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr);
drv_data->rx += 2;
}
}
@@ -352,11 +312,11 @@ static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data)
bfin_spi_dummy_read(drv_data);
while (drv_data->rx < drv_data->rx_end) {
- write_TDBR(drv_data, (*(u16 *) (drv_data->tx)));
+ bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx)));
drv_data->tx += 2;
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
- *(u16 *) (drv_data->rx) = read_RDBR(drv_data);
+ *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr);
drv_data->rx += 2;
}
}
@@ -428,7 +388,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
int loop = 0;
/* wait until transfer finished. */
- while (!(read_STAT(drv_data) & BIT_STAT_RXS))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
cpu_relax();
if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) ||
@@ -439,11 +399,11 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
if (n_bytes % 2) {
u16 *buf = (u16 *)drv_data->rx;
for (loop = 0; loop < n_bytes / 2; loop++)
- *buf++ = read_RDBR(drv_data);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
} else {
u8 *buf = (u8 *)drv_data->rx;
for (loop = 0; loop < n_bytes; loop++)
- *buf++ = read_RDBR(drv_data);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
}
drv_data->rx += n_bytes;
}
@@ -468,15 +428,15 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
u16 *buf = (u16 *)drv_data->rx;
u16 *buf2 = (u16 *)drv_data->tx;
for (loop = 0; loop < n_bytes / 2; loop++) {
- *buf++ = read_RDBR(drv_data);
- write_TDBR(drv_data, *buf2++);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf2++);
}
} else {
u8 *buf = (u8 *)drv_data->rx;
u8 *buf2 = (u8 *)drv_data->tx;
for (loop = 0; loop < n_bytes; loop++) {
- *buf++ = read_RDBR(drv_data);
- write_TDBR(drv_data, *buf2++);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf2++);
}
}
} else if (drv_data->rx) {
@@ -485,14 +445,14 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
if (n_bytes % 2) {
u16 *buf = (u16 *)drv_data->rx;
for (loop = 0; loop < n_bytes / 2; loop++) {
- *buf++ = read_RDBR(drv_data);
- write_TDBR(drv_data, chip->idle_tx_val);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
}
} else {
u8 *buf = (u8 *)drv_data->rx;
for (loop = 0; loop < n_bytes; loop++) {
- *buf++ = read_RDBR(drv_data);
- write_TDBR(drv_data, chip->idle_tx_val);
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
}
}
} else if (drv_data->tx) {
@@ -501,14 +461,14 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
if (n_bytes % 2) {
u16 *buf = (u16 *)drv_data->tx;
for (loop = 0; loop < n_bytes / 2; loop++) {
- read_RDBR(drv_data);
- write_TDBR(drv_data, *buf++);
+ bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf++);
}
} else {
u8 *buf = (u8 *)drv_data->tx;
for (loop = 0; loop < n_bytes; loop++) {
- read_RDBR(drv_data);
- write_TDBR(drv_data, *buf++);
+ bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf++);
}
}
}
@@ -528,19 +488,19 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
struct spi_message *msg = drv_data->cur_msg;
unsigned long timeout;
unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel);
- u16 spistat = read_STAT(drv_data);
+ u16 spistat = bfin_read(&drv_data->regs->stat);
dev_dbg(&drv_data->pdev->dev,
"in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
dmastat, spistat);
if (drv_data->rx != NULL) {
- u16 cr = read_CTRL(drv_data);
+ u16 cr = bfin_read(&drv_data->regs->ctl);
/* discard old RX data and clear RXS */
bfin_spi_dummy_read(drv_data);
- write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */
- write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */
- write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */
+ bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_ENABLE); /* Disable SPI */
+ bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_TIMOD); /* Restore State */
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); /* Clear Status */
}
clear_dma_irqstat(drv_data->dma_channel);
@@ -552,17 +512,17 @@ static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
* register until it goes low for 2 successive reads
*/
if (drv_data->tx != NULL) {
- while ((read_STAT(drv_data) & BIT_STAT_TXS) ||
- (read_STAT(drv_data) & BIT_STAT_TXS))
+ while ((bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS) ||
+ (bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS))
cpu_relax();
}
dev_dbg(&drv_data->pdev->dev,
"in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
- dmastat, read_STAT(drv_data));
+ dmastat, bfin_read(&drv_data->regs->stat));
timeout = jiffies + HZ;
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
if (!time_before(jiffies, timeout)) {
dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF");
break;
@@ -699,9 +659,9 @@ static void bfin_spi_pump_transfers(unsigned long data)
bfin_spi_giveback(drv_data);
return;
}
- cr = read_CTRL(drv_data) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE);
+ cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE);
cr |= cr_width;
- write_CTRL(drv_data, cr);
+ bfin_write(&drv_data->regs->ctl, cr);
dev_dbg(&drv_data->pdev->dev,
"transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n",
@@ -712,11 +672,11 @@ static void bfin_spi_pump_transfers(unsigned long data)
/* Speed setup (surely valid because already checked) */
if (transfer->speed_hz)
- write_BAUD(drv_data, hz_to_spi_baud(transfer->speed_hz));
+ bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz));
else
- write_BAUD(drv_data, chip->baud);
+ bfin_write(&drv_data->regs->baud, chip->baud);
- write_STAT(drv_data, BIT_STAT_CLR);
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
bfin_spi_cs_active(drv_data, chip);
dev_dbg(&drv_data->pdev->dev,
@@ -749,7 +709,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
}
/* poll for SPI completion before start */
- while (!(read_STAT(drv_data) & BIT_STAT_SPIF))
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
cpu_relax();
/* dirty hack for autobuffer DMA mode */
@@ -766,7 +726,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
enable_dma(drv_data->dma_channel);
/* start SPI transfer */
- write_CTRL(drv_data, cr | BIT_CTL_TIMOD_DMA_TX);
+ bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TIMOD_DMA_TX);
/* just return here, there can only be one transfer
* in this mode
@@ -821,7 +781,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
set_dma_config(drv_data->dma_channel, dma_config);
local_irq_save(flags);
SSYNC();
- write_CTRL(drv_data, cr);
+ bfin_write(&drv_data->regs->ctl, cr);
enable_dma(drv_data->dma_channel);
dma_enable_irq(drv_data->dma_channel);
local_irq_restore(flags);
@@ -835,7 +795,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
* problems with setting up the output value in TDBR prior to the
* start of the transfer.
*/
- write_CTRL(drv_data, cr | BIT_CTL_TXMOD);
+ bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TXMOD);
if (chip->pio_interrupt) {
/* SPI irq should have been disabled by now */
@@ -845,19 +805,19 @@ static void bfin_spi_pump_transfers(unsigned long data)
/* start transfer */
if (drv_data->tx == NULL)
- write_TDBR(drv_data, chip->idle_tx_val);
+ bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
else {
int loop;
if (bits_per_word % 16 == 0) {
u16 *buf = (u16 *)drv_data->tx;
for (loop = 0; loop < bits_per_word / 16;
loop++) {
- write_TDBR(drv_data, *buf++);
+ bfin_write(&drv_data->regs->tdbr, *buf++);
}
} else if (bits_per_word % 8 == 0) {
u8 *buf = (u8 *)drv_data->tx;
for (loop = 0; loop < bits_per_word / 8; loop++)
- write_TDBR(drv_data, *buf++);
+ bfin_write(&drv_data->regs->tdbr, *buf++);
}
drv_data->tx += drv_data->n_bytes;
@@ -1005,7 +965,7 @@ static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg)
#define MAX_SPI_SSEL 7
-static u16 ssel[][MAX_SPI_SSEL] = {
+static const u16 ssel[][MAX_SPI_SSEL] = {
{P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
P_SPI0_SSEL4, P_SPI0_SSEL5,
P_SPI0_SSEL6, P_SPI0_SSEL7},
@@ -1226,7 +1186,7 @@ static void bfin_spi_cleanup(struct spi_device *spi)
spi_set_ctldata(spi, NULL);
}
-static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
+static int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
{
INIT_LIST_HEAD(&drv_data->queue);
spin_lock_init(&drv_data->lock);
@@ -1248,7 +1208,7 @@ static inline int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
return 0;
}
-static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
+static int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
{
unsigned long flags;
@@ -1270,7 +1230,7 @@ static inline int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
return 0;
}
-static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
+static int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
{
unsigned long flags;
unsigned limit = 500;
@@ -1299,7 +1259,7 @@ static inline int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
return status;
}
-static inline int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
+static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
{
int status;
@@ -1353,8 +1313,8 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
goto out_error_get_res;
}
- drv_data->regs_base = ioremap(res->start, resource_size(res));
- if (drv_data->regs_base == NULL) {
+ drv_data->regs = ioremap(res->start, resource_size(res));
+ if (drv_data->regs == NULL) {
dev_err(dev, "Cannot map IO\n");
status = -ENXIO;
goto out_error_ioremap;
@@ -1397,8 +1357,8 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
/* Reset SPI registers. If these registers were used by the boot loader,
* the sky may fall on your head if you enable the dma controller.
*/
- write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER);
- write_FLAG(drv_data, 0xFF00);
+ bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER);
+ bfin_write(&drv_data->regs->flg, 0xFF00);
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
@@ -1408,15 +1368,15 @@ static int __init bfin_spi_probe(struct platform_device *pdev)
goto out_error_queue_alloc;
}
- dev_info(dev, "%s, Version %s, regs_base@%p, dma channel@%d\n",
- DRV_DESC, DRV_VERSION, drv_data->regs_base,
+ dev_info(dev, "%s, Version %s, regs@%p, dma channel@%d\n",
+ DRV_DESC, DRV_VERSION, drv_data->regs,
drv_data->dma_channel);
return status;
out_error_queue_alloc:
bfin_spi_destroy_queue(drv_data);
out_error_free_io:
- iounmap((void *) drv_data->regs_base);
+ iounmap(drv_data->regs);
out_error_ioremap:
out_error_get_res:
spi_master_put(master);
@@ -1473,14 +1433,14 @@ static int bfin_spi_suspend(struct platform_device *pdev, pm_message_t state)
if (status != 0)
return status;
- drv_data->ctrl_reg = read_CTRL(drv_data);
- drv_data->flag_reg = read_FLAG(drv_data);
+ drv_data->ctrl_reg = bfin_read(&drv_data->regs->ctl);
+ drv_data->flag_reg = bfin_read(&drv_data->regs->flg);
/*
* reset SPI_CTL and SPI_FLG registers
*/
- write_CTRL(drv_data, BIT_CTL_CPHA | BIT_CTL_MASTER);
- write_FLAG(drv_data, 0xFF00);
+ bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER);
+ bfin_write(&drv_data->regs->flg, 0xFF00);
return 0;
}
@@ -1490,8 +1450,8 @@ static int bfin_spi_resume(struct platform_device *pdev)
struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
int status = 0;
- write_CTRL(drv_data, drv_data->ctrl_reg);
- write_FLAG(drv_data, drv_data->flag_reg);
+ bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg);
+ bfin_write(&drv_data->regs->flg, drv_data->flag_reg);
/* Start the queue running */
status = bfin_spi_start_queue(drv_data);
diff --git a/drivers/spi/spi_bitbang_txrx.h b/drivers/spi/spi-bitbang-txrx.h
index c16bf853c3e..c16bf853c3e 100644
--- a/drivers/spi/spi_bitbang_txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi-bitbang.c
index 14a63f6010d..02d57fbba29 100644
--- a/drivers/spi/spi_bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -1,5 +1,5 @@
/*
- * spi_bitbang.c - polling/bitbanging SPI master controller driver utilities
+ * polling/bitbanging SPI master controller driver utilities
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ static unsigned bitbang_txrx_8(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = spi->bits_per_word;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned count = t->len;
const u8 *tx = t->tx_buf;
u8 *rx = t->rx_buf;
@@ -94,7 +94,7 @@ static unsigned bitbang_txrx_16(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = spi->bits_per_word;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned count = t->len;
const u16 *tx = t->tx_buf;
u16 *rx = t->rx_buf;
@@ -120,7 +120,7 @@ static unsigned bitbang_txrx_32(
unsigned ns,
struct spi_transfer *t
) {
- unsigned bits = spi->bits_per_word;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned count = t->len;
const u32 *tx = t->tx_buf;
u32 *rx = t->rx_buf;
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi-butterfly.c
index 0d4ceba3b59..9f907ec52de 100644
--- a/drivers/spi/spi_butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -1,5 +1,5 @@
/*
- * spi_butterfly.c - parport-to-butterfly adapter
+ * parport-to-butterfly adapter
*
* Copyright (C) 2005 David Brownell
*
@@ -149,7 +149,7 @@ static void butterfly_chipselect(struct spi_device *spi, int value)
#define spidelay(X) do{}while(0)
//#define spidelay ndelay
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
static u32
butterfly_txrx_word_mode0(struct spi_device *spi,
diff --git a/drivers/spi/coldfire_qspi.c b/drivers/spi/spi-coldfire-qspi.c
index ae2cd1c1fda..ae2cd1c1fda 100644
--- a/drivers/spi/coldfire_qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/spi-davinci.c
index 1f0ed8005c9..1f0ed8005c9 100644
--- a/drivers/spi/davinci_spi.c
+++ b/drivers/spi/spi-davinci.c
diff --git a/drivers/spi/dw_spi_mid.c b/drivers/spi/spi-dw-mid.c
index 489178243d8..130e55537db 100644
--- a/drivers/spi/dw_spi_mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -1,5 +1,5 @@
/*
- * dw_spi_mid.c - special handling for DW core on Intel MID platform
+ * Special handling for DW core on Intel MID platform
*
* Copyright (c) 2009, Intel Corporation.
*
@@ -23,7 +23,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include "dw_spi.h"
+#include "spi-dw.h"
#ifdef CONFIG_SPI_DW_MID_DMA
#include <linux/intel_mid_dma.h>
diff --git a/drivers/spi/dw_spi_mmio.c b/drivers/spi/spi-dw-mmio.c
index e0e813dad15..34eb66501db 100644
--- a/drivers/spi/dw_spi_mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -1,5 +1,5 @@
/*
- * dw_spi_mmio.c - Memory-mapped interface driver for DW SPI Core
+ * Memory-mapped interface driver for DW SPI Core
*
* Copyright (c) 2010, Octasic semiconductor.
*
@@ -16,7 +16,7 @@
#include <linux/spi/spi.h>
#include <linux/scatterlist.h>
-#include "dw_spi.h"
+#include "spi-dw.h"
#define DRIVER_NAME "dw_spi_mmio"
diff --git a/drivers/spi/dw_spi_pci.c b/drivers/spi/spi-dw-pci.c
index ad260aa5e52..c5f37f03ac8 100644
--- a/drivers/spi/dw_spi_pci.c
+++ b/drivers/spi/spi-dw-pci.c
@@ -1,5 +1,5 @@
/*
- * dw_spi_pci.c - PCI interface driver for DW SPI Core
+ * PCI interface driver for DW SPI Core
*
* Copyright (c) 2009, Intel Corporation.
*
@@ -22,7 +22,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include "dw_spi.h"
+#include "spi-dw.h"
#define DRIVER_NAME "dw_spi_pci"
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/spi-dw.c
index 919fa9d9e16..857cd30b44b 100644
--- a/drivers/spi/dw_spi.c
+++ b/drivers/spi/spi-dw.c
@@ -1,5 +1,5 @@
/*
- * dw_spi.c - Designware SPI core controller driver (refer pxa2xx_spi.c)
+ * Designware SPI core controller driver (refer pxa2xx_spi.c)
*
* Copyright (c) 2009, Intel Corporation.
*
@@ -24,7 +24,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
-#include "dw_spi.h"
+#include "spi-dw.h"
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
@@ -818,9 +818,11 @@ int __devinit dw_spi_add_host(struct dw_spi *dws)
dws->prev_chip = NULL;
dws->dma_inited = 0;
dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
+ snprintf(dws->name, sizeof(dws->name), "dw_spi%d",
+ dws->bus_num);
ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED,
- "dw_spi", dws);
+ dws->name, dws);
if (ret < 0) {
dev_err(&master->dev, "can not get IRQ\n");
goto err_free_master;
diff --git a/drivers/spi/dw_spi.h b/drivers/spi/spi-dw.h
index 7a5e78d2a5c..8b7b07bf6c3 100644
--- a/drivers/spi/dw_spi.h
+++ b/drivers/spi/spi-dw.h
@@ -96,6 +96,7 @@ struct dw_spi {
struct spi_device *cur_dev;
struct device *parent_dev;
enum dw_ssi_type type;
+ char name[16];
void __iomem *regs;
unsigned long paddr;
diff --git a/drivers/spi/ep93xx_spi.c b/drivers/spi/spi-ep93xx.c
index d3570071e98..1cf645479bf 100644
--- a/drivers/spi/ep93xx_spi.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -1,7 +1,7 @@
/*
* Driver for Cirrus Logic EP93xx SPI controller.
*
- * Copyright (c) 2010 Mika Westerberg
+ * Copyright (C) 2010-2011 Mika Westerberg
*
* Explicit FIFO handling code was inspired by amba-pl022 driver.
*
@@ -21,13 +21,16 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/dmaengine.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
+#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
+#include <mach/dma.h>
#include <mach/ep93xx_spi.h>
#define SSPCR0 0x0000
@@ -71,6 +74,7 @@
* @pdev: pointer to platform device
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
+ * @sspdr_phys: physical address of the SSPDR register
* @irq: IRQ number used by the driver
* @min_rate: minimum clock rate (in Hz) supported by the controller
* @max_rate: maximum clock rate (in Hz) supported by the controller
@@ -84,6 +88,14 @@
* @rx: current byte in transfer to receive
* @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
* frame decreases this level and sending one frame increases it.
+ * @dma_rx: RX DMA channel
+ * @dma_tx: TX DMA channel
+ * @dma_rx_data: RX parameters passed to the DMA engine
+ * @dma_tx_data: TX parameters passed to the DMA engine
+ * @rx_sgt: sg table for RX transfers
+ * @tx_sgt: sg table for TX transfers
+ * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
+ * the client
*
* This structure holds EP93xx SPI controller specific information. When
* @running is %true, driver accepts transfer requests from protocol drivers.
@@ -100,6 +112,7 @@ struct ep93xx_spi {
const struct platform_device *pdev;
struct clk *clk;
void __iomem *regs_base;
+ unsigned long sspdr_phys;
int irq;
unsigned long min_rate;
unsigned long max_rate;
@@ -112,6 +125,13 @@ struct ep93xx_spi {
size_t tx;
size_t rx;
size_t fifo_level;
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+ struct ep93xx_dma_data dma_rx_data;
+ struct ep93xx_dma_data dma_tx_data;
+ struct sg_table rx_sgt;
+ struct sg_table tx_sgt;
+ void *zeropage;
};
/**
@@ -496,14 +516,195 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
espi->fifo_level++;
}
- if (espi->rx == t->len) {
- msg->actual_length += t->len;
+ if (espi->rx == t->len)
return 0;
- }
return -EINPROGRESS;
}
+static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
+{
+ /*
+ * Now everything is set up for the current transfer. We prime the TX
+ * FIFO, enable interrupts, and wait for the transfer to complete.
+ */
+ if (ep93xx_spi_read_write(espi)) {
+ ep93xx_spi_enable_interrupts(espi);
+ wait_for_completion(&espi->wait);
+ }
+}
+
+/**
+ * ep93xx_spi_dma_prepare() - prepares a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function configures the DMA, maps the buffer and prepares the DMA
+ * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
+ * in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
+{
+ struct spi_transfer *t = espi->current_msg->state;
+ struct dma_async_tx_descriptor *txd;
+ enum dma_slave_buswidth buswidth;
+ struct dma_slave_config conf;
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+ struct dma_chan *chan;
+ const void *buf, *pbuf;
+ size_t len = t->len;
+ int i, ret, nents;
+
+ if (bits_per_word(espi) > 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = dir;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = espi->dma_rx;
+ buf = t->rx_buf;
+ sgt = &espi->rx_sgt;
+
+ conf.src_addr = espi->sspdr_phys;
+ conf.src_addr_width = buswidth;
+ } else {
+ chan = espi->dma_tx;
+ buf = t->tx_buf;
+ sgt = &espi->tx_sgt;
+
+ conf.dst_addr = espi->sspdr_phys;
+ conf.dst_addr_width = buswidth;
+ }
+
+ ret = dmaengine_slave_config(chan, &conf);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * We need to split the transfer into PAGE_SIZE'd chunks. This is
+ * because we are using @espi->zeropage to provide a zero RX buffer
+ * for the TX transfers and we have only allocated one page for that.
+ *
+ * For performance reasons we allocate a new sg_table only when
+ * needed. Otherwise we will re-use the current one. Eventually the
+ * last sg_table is released in ep93xx_spi_release_dma().
+ */
+
+ nents = DIV_ROUND_UP(len, PAGE_SIZE);
+ if (nents != sgt->nents) {
+ sg_free_table(sgt);
+
+ ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ pbuf = buf;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = min_t(size_t, len, PAGE_SIZE);
+
+ if (buf) {
+ sg_set_page(sg, virt_to_page(pbuf), bytes,
+ offset_in_page(pbuf));
+ } else {
+ sg_set_page(sg, virt_to_page(espi->zeropage),
+ bytes, 0);
+ }
+
+ pbuf += bytes;
+ len -= bytes;
+ }
+
+ if (WARN_ON(len)) {
+ dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents)
+ return ERR_PTR(-ENOMEM);
+
+ txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
+ dir, DMA_CTRL_ACK);
+ if (!txd) {
+ dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+ return ERR_PTR(-ENOMEM);
+ }
+ return txd;
+}
+
+/**
+ * ep93xx_spi_dma_finish() - finishes with a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function finishes with the DMA transfer. After this, the DMA buffer is
+ * unmapped.
+ */
+static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
+ enum dma_data_direction dir)
+{
+ struct dma_chan *chan;
+ struct sg_table *sgt;
+
+ if (dir == DMA_FROM_DEVICE) {
+ chan = espi->dma_rx;
+ sgt = &espi->rx_sgt;
+ } else {
+ chan = espi->dma_tx;
+ sgt = &espi->tx_sgt;
+ }
+
+ dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+}
+
+static void ep93xx_spi_dma_callback(void *callback_param)
+{
+ complete(callback_param);
+}
+
+static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
+{
+ struct spi_message *msg = espi->current_msg;
+ struct dma_async_tx_descriptor *rxd, *txd;
+
+ rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE);
+ if (IS_ERR(rxd)) {
+ dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
+ msg->status = PTR_ERR(rxd);
+ return;
+ }
+
+ txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE);
+ if (IS_ERR(txd)) {
+ ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+ dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
+ msg->status = PTR_ERR(txd);
+ return;
+ }
+
+ /* We are ready when RX is done */
+ rxd->callback = ep93xx_spi_dma_callback;
+ rxd->callback_param = &espi->wait;
+
+ /* Now submit both descriptors and wait while they finish */
+ dmaengine_submit(rxd);
+ dmaengine_submit(txd);
+
+ dma_async_issue_pending(espi->dma_rx);
+ dma_async_issue_pending(espi->dma_tx);
+
+ wait_for_completion(&espi->wait);
+
+ ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE);
+ ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE);
+}
+
/**
* ep93xx_spi_process_transfer() - processes one SPI transfer
* @espi: ep93xx SPI controller struct
@@ -556,13 +757,14 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
espi->tx = 0;
/*
- * Now everything is set up for the current transfer. We prime the TX
- * FIFO, enable interrupts, and wait for the transfer to complete.
+ * There is no point of setting up DMA for the transfers which will
+ * fit into the FIFO and can be transferred with a single interrupt.
+ * So in these cases we will be using PIO and don't bother for DMA.
*/
- if (ep93xx_spi_read_write(espi)) {
- ep93xx_spi_enable_interrupts(espi);
- wait_for_completion(&espi->wait);
- }
+ if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
+ ep93xx_spi_dma_transfer(espi);
+ else
+ ep93xx_spi_pio_transfer(espi);
/*
* In case of error during transmit, we bail out from processing
@@ -571,6 +773,8 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
if (msg->status)
return;
+ msg->actual_length += t->len;
+
/*
* After this transfer is finished, perform any possible
* post-transfer actions requested by the protocol driver.
@@ -752,6 +956,75 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ if (ep93xx_dma_chan_is_m2p(chan))
+ return false;
+
+ chan->private = filter_param;
+ return true;
+}
+
+static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
+{
+ dma_cap_mask_t mask;
+ int ret;
+
+ espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!espi->zeropage)
+ return -ENOMEM;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ espi->dma_rx_data.port = EP93XX_DMA_SSP;
+ espi->dma_rx_data.direction = DMA_FROM_DEVICE;
+ espi->dma_rx_data.name = "ep93xx-spi-rx";
+
+ espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+ &espi->dma_rx_data);
+ if (!espi->dma_rx) {
+ ret = -ENODEV;
+ goto fail_free_page;
+ }
+
+ espi->dma_tx_data.port = EP93XX_DMA_SSP;
+ espi->dma_tx_data.direction = DMA_TO_DEVICE;
+ espi->dma_tx_data.name = "ep93xx-spi-tx";
+
+ espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+ &espi->dma_tx_data);
+ if (!espi->dma_tx) {
+ ret = -ENODEV;
+ goto fail_release_rx;
+ }
+
+ return 0;
+
+fail_release_rx:
+ dma_release_channel(espi->dma_rx);
+ espi->dma_rx = NULL;
+fail_free_page:
+ free_page((unsigned long)espi->zeropage);
+
+ return ret;
+}
+
+static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
+{
+ if (espi->dma_rx) {
+ dma_release_channel(espi->dma_rx);
+ sg_free_table(&espi->rx_sgt);
+ }
+ if (espi->dma_tx) {
+ dma_release_channel(espi->dma_tx);
+ sg_free_table(&espi->tx_sgt);
+ }
+
+ if (espi->zeropage)
+ free_page((unsigned long)espi->zeropage);
+}
+
static int __init ep93xx_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -818,6 +1091,7 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
goto fail_put_clock;
}
+ espi->sspdr_phys = res->start + SSPDR;
espi->regs_base = ioremap(res->start, resource_size(res));
if (!espi->regs_base) {
dev_err(&pdev->dev, "failed to map resources\n");
@@ -832,10 +1106,13 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
goto fail_unmap_regs;
}
+ if (info->use_dma && ep93xx_spi_setup_dma(espi))
+ dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
+
espi->wq = create_singlethread_workqueue("ep93xx_spid");
if (!espi->wq) {
dev_err(&pdev->dev, "unable to create workqueue\n");
- goto fail_free_irq;
+ goto fail_free_dma;
}
INIT_WORK(&espi->msg_work, ep93xx_spi_work);
INIT_LIST_HEAD(&espi->msg_queue);
@@ -857,7 +1134,8 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev)
fail_free_queue:
destroy_workqueue(espi->wq);
-fail_free_irq:
+fail_free_dma:
+ ep93xx_spi_release_dma(espi);
free_irq(espi->irq, espi);
fail_unmap_regs:
iounmap(espi->regs_base);
@@ -901,6 +1179,7 @@ static int __exit ep93xx_spi_remove(struct platform_device *pdev)
}
spin_unlock_irq(&espi->lock);
+ ep93xx_spi_release_dma(espi);
free_irq(espi->irq, espi);
iounmap(espi->regs_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi_fsl_espi.c b/drivers/spi/spi-fsl-espi.c
index 496f895a002..54e499d5f92 100644
--- a/drivers/spi/spi_fsl_espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -22,7 +22,7 @@
#include <linux/err.h>
#include <sysdev/fsl_soc.h>
-#include "spi_fsl_lib.h"
+#include "spi-fsl-lib.h"
/* eSPI Controller registers */
struct fsl_espi_reg {
diff --git a/drivers/spi/spi_fsl_lib.c b/drivers/spi/spi-fsl-lib.c
index ff59f42ae99..2674fad7f68 100644
--- a/drivers/spi/spi_fsl_lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -25,7 +25,7 @@
#include <linux/of_spi.h>
#include <sysdev/fsl_soc.h>
-#include "spi_fsl_lib.h"
+#include "spi-fsl-lib.h"
#define MPC8XXX_SPI_RX_BUF(type) \
void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
diff --git a/drivers/spi/spi_fsl_lib.h b/drivers/spi/spi-fsl-lib.h
index cbe881b9ea7..cbe881b9ea7 100644
--- a/drivers/spi/spi_fsl_lib.h
+++ b/drivers/spi/spi-fsl-lib.h
diff --git a/drivers/spi/spi_fsl_spi.c b/drivers/spi/spi-fsl-spi.c
index 7963c9b4956..d2407558773 100644
--- a/drivers/spi/spi_fsl_spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -37,7 +37,7 @@
#include <asm/cpm.h>
#include <asm/qe.h>
-#include "spi_fsl_lib.h"
+#include "spi-fsl-lib.h"
/* CPM1 and CPM2 are mutually exclusive. */
#ifdef CONFIG_CPM1
@@ -684,7 +684,7 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
struct device_node *np = dev->of_node;
const u32 *iprop;
int size;
- unsigned long spi_base_ofs;
+ void __iomem *spi_base;
unsigned long pram_ofs = -ENOMEM;
/* Can't use of_address_to_resource(), QE muram isn't at 0. */
@@ -702,33 +702,27 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
return pram_ofs;
}
- /* CPM1 and CPM2 pram must be at a fixed addr. */
- if (!iprop || size != sizeof(*iprop) * 4)
- return -ENOMEM;
-
- spi_base_ofs = cpm_muram_alloc_fixed(iprop[2], 2);
- if (IS_ERR_VALUE(spi_base_ofs))
- return -ENOMEM;
+ spi_base = of_iomap(np, 1);
+ if (spi_base == NULL)
+ return -EINVAL;
if (mspi->flags & SPI_CPM2) {
pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
- if (!IS_ERR_VALUE(pram_ofs)) {
- u16 __iomem *spi_base = cpm_muram_addr(spi_base_ofs);
-
- out_be16(spi_base, pram_ofs);
- }
+ out_be16(spi_base, pram_ofs);
} else {
- struct spi_pram __iomem *pram = cpm_muram_addr(spi_base_ofs);
+ struct spi_pram __iomem *pram = spi_base;
u16 rpbase = in_be16(&pram->rpbase);
/* Microcode relocation patch applied? */
if (rpbase)
pram_ofs = rpbase;
- else
- return spi_base_ofs;
+ else {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ out_be16(spi_base, pram_ofs);
+ }
}
- cpm_muram_free(spi_base_ofs);
+ iounmap(spi_base);
return pram_ofs;
}
diff --git a/drivers/spi/spi_gpio.c b/drivers/spi/spi-gpio.c
index 63e51b011d5..0e88ab74549 100644
--- a/drivers/spi/spi_gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -1,5 +1,5 @@
/*
- * spi_gpio.c - SPI master driver using generic bitbanged GPIO
+ * SPI master driver using generic bitbanged GPIO
*
* Copyright (C) 2006,2008 David Brownell
*
@@ -69,7 +69,7 @@ struct spi_gpio {
* #define SPI_MOSI_GPIO 120
* #define SPI_SCK_GPIO 121
* #define SPI_N_CHIPSEL 4
- * #include "spi_gpio.c"
+ * #include "spi-gpio.c"
*/
#ifndef DRIVER_NAME
@@ -127,7 +127,7 @@ static inline int getmiso(const struct spi_device *spi)
*/
#define spidelay(nsecs) do {} while (0)
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
/*
* These functions can leverage inline expansion of GPIO calls to shrink
diff --git a/drivers/spi/spi_imx.c b/drivers/spi/spi-imx.c
index 69d6dba67c1..8ac6542aedc 100644
--- a/drivers/spi/spi_imx.c
+++ b/drivers/spi/spi-imx.c
@@ -34,6 +34,9 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <mach/spi.h>
@@ -45,9 +48,6 @@
#define MXC_CSPIINT 0x0c
#define MXC_RESET 0x1c
-#define MX3_CSPISTAT 0x14
-#define MX3_CSPISTAT_RR (1 << 3)
-
/* generic defines to abstract from the different register layouts */
#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
@@ -60,12 +60,12 @@ struct spi_imx_config {
};
enum spi_imx_devtype {
- SPI_IMX_VER_IMX1,
- SPI_IMX_VER_0_0,
- SPI_IMX_VER_0_4,
- SPI_IMX_VER_0_5,
- SPI_IMX_VER_0_7,
- SPI_IMX_VER_2_3,
+ IMX1_CSPI,
+ IMX21_CSPI,
+ IMX27_CSPI,
+ IMX31_CSPI,
+ IMX35_CSPI, /* CSPI on all i.mx except above */
+ IMX51_ECSPI, /* ECSPI on i.mx51 and later */
};
struct spi_imx_data;
@@ -76,7 +76,7 @@ struct spi_imx_devtype_data {
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
- unsigned int fifosize;
+ enum spi_imx_devtype devtype;
};
struct spi_imx_data {
@@ -87,7 +87,6 @@ struct spi_imx_data {
int irq;
struct clk *clk;
unsigned long spi_clk;
- int *chipselect;
unsigned int count;
void (*tx)(struct spi_imx_data *);
@@ -96,9 +95,25 @@ struct spi_imx_data {
const void *tx_buf;
unsigned int txfifo; /* number of words pushed in tx FIFO */
- struct spi_imx_devtype_data devtype_data;
+ struct spi_imx_devtype_data *devtype_data;
+ int chipselect[0];
};
+static inline int is_imx27_cspi(struct spi_imx_data *d)
+{
+ return d->devtype_data->devtype == IMX27_CSPI;
+}
+
+static inline int is_imx35_cspi(struct spi_imx_data *d)
+{
+ return d->devtype_data->devtype == IMX35_CSPI;
+}
+
+static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d)
+{
+ return (d->devtype_data->devtype == IMX51_ECSPI) ? 64 : 8;
+}
+
#define MXC_SPI_BUF_RX(type) \
static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
{ \
@@ -140,14 +155,9 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
/* MX21, MX27 */
static unsigned int spi_imx_clkdiv_1(unsigned int fin,
- unsigned int fspi)
+ unsigned int fspi, unsigned int max)
{
- int i, max;
-
- if (cpu_is_mx21())
- max = 18;
- else
- max = 16;
+ int i;
for (i = 2; i < max; i++)
if (fspi * mxc_clkdivs[i] >= fin)
@@ -171,30 +181,30 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
return 7;
}
-#define SPI_IMX2_3_CTRL 0x08
-#define SPI_IMX2_3_CTRL_ENABLE (1 << 0)
-#define SPI_IMX2_3_CTRL_XCH (1 << 2)
-#define SPI_IMX2_3_CTRL_MODE_MASK (0xf << 4)
-#define SPI_IMX2_3_CTRL_POSTDIV_OFFSET 8
-#define SPI_IMX2_3_CTRL_PREDIV_OFFSET 12
-#define SPI_IMX2_3_CTRL_CS(cs) ((cs) << 18)
-#define SPI_IMX2_3_CTRL_BL_OFFSET 20
+#define MX51_ECSPI_CTRL 0x08
+#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
+#define MX51_ECSPI_CTRL_XCH (1 << 2)
+#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
+#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
+#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
+#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
+#define MX51_ECSPI_CTRL_BL_OFFSET 20
-#define SPI_IMX2_3_CONFIG 0x0c
-#define SPI_IMX2_3_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
-#define SPI_IMX2_3_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
-#define SPI_IMX2_3_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
-#define SPI_IMX2_3_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
+#define MX51_ECSPI_CONFIG 0x0c
+#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
+#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
+#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
+#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
-#define SPI_IMX2_3_INT 0x10
-#define SPI_IMX2_3_INT_TEEN (1 << 0)
-#define SPI_IMX2_3_INT_RREN (1 << 3)
+#define MX51_ECSPI_INT 0x10
+#define MX51_ECSPI_INT_TEEN (1 << 0)
+#define MX51_ECSPI_INT_RREN (1 << 3)
-#define SPI_IMX2_3_STAT 0x18
-#define SPI_IMX2_3_STAT_RR (1 << 3)
+#define MX51_ECSPI_STAT 0x18
+#define MX51_ECSPI_STAT_RR (1 << 3)
/* MX51 eCSPI */
-static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi)
+static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi)
{
/*
* there are two 4-bit dividers, the pre-divider divides by
@@ -222,36 +232,36 @@ static unsigned int spi_imx2_3_clkdiv(unsigned int fin, unsigned int fspi)
pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
__func__, fin, fspi, post, pre);
- return (pre << SPI_IMX2_3_CTRL_PREDIV_OFFSET) |
- (post << SPI_IMX2_3_CTRL_POSTDIV_OFFSET);
+ return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
+ (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
}
-static void __maybe_unused spi_imx2_3_intctrl(struct spi_imx_data *spi_imx, int enable)
+static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned val = 0;
if (enable & MXC_INT_TE)
- val |= SPI_IMX2_3_INT_TEEN;
+ val |= MX51_ECSPI_INT_TEEN;
if (enable & MXC_INT_RR)
- val |= SPI_IMX2_3_INT_RREN;
+ val |= MX51_ECSPI_INT_RREN;
- writel(val, spi_imx->base + SPI_IMX2_3_INT);
+ writel(val, spi_imx->base + MX51_ECSPI_INT);
}
-static void __maybe_unused spi_imx2_3_trigger(struct spi_imx_data *spi_imx)
+static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
{
u32 reg;
- reg = readl(spi_imx->base + SPI_IMX2_3_CTRL);
- reg |= SPI_IMX2_3_CTRL_XCH;
- writel(reg, spi_imx->base + SPI_IMX2_3_CTRL);
+ reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+ reg |= MX51_ECSPI_CTRL_XCH;
+ writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
}
-static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx,
+static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
- u32 ctrl = SPI_IMX2_3_CTRL_ENABLE, cfg = 0;
+ u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
/*
* The hardware seems to have a race condition when changing modes. The
@@ -260,42 +270,42 @@ static int __maybe_unused spi_imx2_3_config(struct spi_imx_data *spi_imx,
* the same time.
* So set master mode for all channels as we do not support slave mode.
*/
- ctrl |= SPI_IMX2_3_CTRL_MODE_MASK;
+ ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
/* set clock speed */
- ctrl |= spi_imx2_3_clkdiv(spi_imx->spi_clk, config->speed_hz);
+ ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz);
/* set chip select to use */
- ctrl |= SPI_IMX2_3_CTRL_CS(config->cs);
+ ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
- ctrl |= (config->bpw - 1) << SPI_IMX2_3_CTRL_BL_OFFSET;
+ ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
- cfg |= SPI_IMX2_3_CONFIG_SBBCTRL(config->cs);
+ cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs);
if (config->mode & SPI_CPHA)
- cfg |= SPI_IMX2_3_CONFIG_SCLKPHA(config->cs);
+ cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
if (config->mode & SPI_CPOL)
- cfg |= SPI_IMX2_3_CONFIG_SCLKPOL(config->cs);
+ cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
if (config->mode & SPI_CS_HIGH)
- cfg |= SPI_IMX2_3_CONFIG_SSBPOL(config->cs);
+ cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
- writel(ctrl, spi_imx->base + SPI_IMX2_3_CTRL);
- writel(cfg, spi_imx->base + SPI_IMX2_3_CONFIG);
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+ writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
return 0;
}
-static int __maybe_unused spi_imx2_3_rx_available(struct spi_imx_data *spi_imx)
+static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
{
- return readl(spi_imx->base + SPI_IMX2_3_STAT) & SPI_IMX2_3_STAT_RR;
+ return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
}
-static void __maybe_unused spi_imx2_3_reset(struct spi_imx_data *spi_imx)
+static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx)
{
/* drain receive buffer */
- while (spi_imx2_3_rx_available(spi_imx))
+ while (mx51_ecspi_rx_available(spi_imx))
readl(spi_imx->base + MXC_CSPIRXDATA);
}
@@ -343,32 +353,7 @@ static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx)
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int __maybe_unused spi_imx0_4_config(struct spi_imx_data *spi_imx,
- struct spi_imx_config *config)
-{
- unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
- int cs = spi_imx->chipselect[config->cs];
-
- reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
- MX31_CSPICTRL_DR_SHIFT;
-
- reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
-
- if (config->mode & SPI_CPHA)
- reg |= MX31_CSPICTRL_PHA;
- if (config->mode & SPI_CPOL)
- reg |= MX31_CSPICTRL_POL;
- if (config->mode & SPI_CS_HIGH)
- reg |= MX31_CSPICTRL_SSPOL;
- if (cs < 0)
- reg |= (cs + 32) << MX31_CSPICTRL_CS_SHIFT;
-
- writel(reg, spi_imx->base + MXC_CSPICTRL);
-
- return 0;
-}
-
-static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx,
+static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
@@ -377,8 +362,12 @@ static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx,
reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
MX31_CSPICTRL_DR_SHIFT;
- reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
- reg |= MX31_CSPICTRL_SSCTL;
+ if (is_imx35_cspi(spi_imx)) {
+ reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
+ reg |= MX31_CSPICTRL_SSCTL;
+ } else {
+ reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
+ }
if (config->mode & SPI_CPHA)
reg |= MX31_CSPICTRL_PHA;
@@ -387,7 +376,9 @@ static int __maybe_unused spi_imx0_7_config(struct spi_imx_data *spi_imx,
if (config->mode & SPI_CS_HIGH)
reg |= MX31_CSPICTRL_SSPOL;
if (cs < 0)
- reg |= (cs + 32) << MX35_CSPICTRL_CS_SHIFT;
+ reg |= (cs + 32) <<
+ (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
+ MX31_CSPICTRL_CS_SHIFT);
writel(reg, spi_imx->base + MXC_CSPICTRL);
@@ -399,77 +390,78 @@ static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx)
return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
}
-static void __maybe_unused spi_imx0_4_reset(struct spi_imx_data *spi_imx)
+static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx)
{
/* drain receive buffer */
- while (readl(spi_imx->base + MX3_CSPISTAT) & MX3_CSPISTAT_RR)
+ while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
readl(spi_imx->base + MXC_CSPIRXDATA);
}
-#define MX27_INTREG_RR (1 << 4)
-#define MX27_INTREG_TEEN (1 << 9)
-#define MX27_INTREG_RREN (1 << 13)
+#define MX21_INTREG_RR (1 << 4)
+#define MX21_INTREG_TEEN (1 << 9)
+#define MX21_INTREG_RREN (1 << 13)
-#define MX27_CSPICTRL_POL (1 << 5)
-#define MX27_CSPICTRL_PHA (1 << 6)
-#define MX27_CSPICTRL_SSPOL (1 << 8)
-#define MX27_CSPICTRL_XCH (1 << 9)
-#define MX27_CSPICTRL_ENABLE (1 << 10)
-#define MX27_CSPICTRL_MASTER (1 << 11)
-#define MX27_CSPICTRL_DR_SHIFT 14
-#define MX27_CSPICTRL_CS_SHIFT 19
+#define MX21_CSPICTRL_POL (1 << 5)
+#define MX21_CSPICTRL_PHA (1 << 6)
+#define MX21_CSPICTRL_SSPOL (1 << 8)
+#define MX21_CSPICTRL_XCH (1 << 9)
+#define MX21_CSPICTRL_ENABLE (1 << 10)
+#define MX21_CSPICTRL_MASTER (1 << 11)
+#define MX21_CSPICTRL_DR_SHIFT 14
+#define MX21_CSPICTRL_CS_SHIFT 19
-static void __maybe_unused mx27_intctrl(struct spi_imx_data *spi_imx, int enable)
+static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
{
unsigned int val = 0;
if (enable & MXC_INT_TE)
- val |= MX27_INTREG_TEEN;
+ val |= MX21_INTREG_TEEN;
if (enable & MXC_INT_RR)
- val |= MX27_INTREG_RREN;
+ val |= MX21_INTREG_RREN;
writel(val, spi_imx->base + MXC_CSPIINT);
}
-static void __maybe_unused mx27_trigger(struct spi_imx_data *spi_imx)
+static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx)
{
unsigned int reg;
reg = readl(spi_imx->base + MXC_CSPICTRL);
- reg |= MX27_CSPICTRL_XCH;
+ reg |= MX21_CSPICTRL_XCH;
writel(reg, spi_imx->base + MXC_CSPICTRL);
}
-static int __maybe_unused mx27_config(struct spi_imx_data *spi_imx,
+static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx,
struct spi_imx_config *config)
{
- unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER;
+ unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
int cs = spi_imx->chipselect[config->cs];
+ unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
- reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) <<
- MX27_CSPICTRL_DR_SHIFT;
+ reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
+ MX21_CSPICTRL_DR_SHIFT;
reg |= config->bpw - 1;
if (config->mode & SPI_CPHA)
- reg |= MX27_CSPICTRL_PHA;
+ reg |= MX21_CSPICTRL_PHA;
if (config->mode & SPI_CPOL)
- reg |= MX27_CSPICTRL_POL;
+ reg |= MX21_CSPICTRL_POL;
if (config->mode & SPI_CS_HIGH)
- reg |= MX27_CSPICTRL_SSPOL;
+ reg |= MX21_CSPICTRL_SSPOL;
if (cs < 0)
- reg |= (cs + 32) << MX27_CSPICTRL_CS_SHIFT;
+ reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT;
writel(reg, spi_imx->base + MXC_CSPICTRL);
return 0;
}
-static int __maybe_unused mx27_rx_available(struct spi_imx_data *spi_imx)
+static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx)
{
- return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR;
+ return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
}
-static void __maybe_unused spi_imx0_0_reset(struct spi_imx_data *spi_imx)
+static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx)
{
writel(1, spi_imx->base + MXC_RESET);
}
@@ -535,61 +527,94 @@ static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx)
writel(1, spi_imx->base + MXC_RESET);
}
-/*
- * These version numbers are taken from the Freescale driver. Unfortunately it
- * doesn't support i.MX1, so this entry doesn't match the scheme. :-(
- */
-static struct spi_imx_devtype_data spi_imx_devtype_data[] __devinitdata = {
-#ifdef CONFIG_SPI_IMX_VER_IMX1
- [SPI_IMX_VER_IMX1] = {
- .intctrl = mx1_intctrl,
- .config = mx1_config,
- .trigger = mx1_trigger,
- .rx_available = mx1_rx_available,
- .reset = mx1_reset,
- .fifosize = 8,
- },
-#endif
-#ifdef CONFIG_SPI_IMX_VER_0_0
- [SPI_IMX_VER_0_0] = {
- .intctrl = mx27_intctrl,
- .config = mx27_config,
- .trigger = mx27_trigger,
- .rx_available = mx27_rx_available,
- .reset = spi_imx0_0_reset,
- .fifosize = 8,
- },
-#endif
-#ifdef CONFIG_SPI_IMX_VER_0_4
- [SPI_IMX_VER_0_4] = {
- .intctrl = mx31_intctrl,
- .config = spi_imx0_4_config,
- .trigger = mx31_trigger,
- .rx_available = mx31_rx_available,
- .reset = spi_imx0_4_reset,
- .fifosize = 8,
- },
-#endif
-#ifdef CONFIG_SPI_IMX_VER_0_7
- [SPI_IMX_VER_0_7] = {
- .intctrl = mx31_intctrl,
- .config = spi_imx0_7_config,
- .trigger = mx31_trigger,
- .rx_available = mx31_rx_available,
- .reset = spi_imx0_4_reset,
- .fifosize = 8,
- },
-#endif
-#ifdef CONFIG_SPI_IMX_VER_2_3
- [SPI_IMX_VER_2_3] = {
- .intctrl = spi_imx2_3_intctrl,
- .config = spi_imx2_3_config,
- .trigger = spi_imx2_3_trigger,
- .rx_available = spi_imx2_3_rx_available,
- .reset = spi_imx2_3_reset,
- .fifosize = 64,
- },
-#endif
+static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
+ .intctrl = mx1_intctrl,
+ .config = mx1_config,
+ .trigger = mx1_trigger,
+ .rx_available = mx1_rx_available,
+ .reset = mx1_reset,
+ .devtype = IMX1_CSPI,
+};
+
+static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
+ .intctrl = mx21_intctrl,
+ .config = mx21_config,
+ .trigger = mx21_trigger,
+ .rx_available = mx21_rx_available,
+ .reset = mx21_reset,
+ .devtype = IMX21_CSPI,
+};
+
+static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
+ /* i.mx27 cspi shares the functions with i.mx21 one */
+ .intctrl = mx21_intctrl,
+ .config = mx21_config,
+ .trigger = mx21_trigger,
+ .rx_available = mx21_rx_available,
+ .reset = mx21_reset,
+ .devtype = IMX27_CSPI,
+};
+
+static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
+ .intctrl = mx31_intctrl,
+ .config = mx31_config,
+ .trigger = mx31_trigger,
+ .rx_available = mx31_rx_available,
+ .reset = mx31_reset,
+ .devtype = IMX31_CSPI,
+};
+
+static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
+ /* i.mx35 and later cspi shares the functions with i.mx31 one */
+ .intctrl = mx31_intctrl,
+ .config = mx31_config,
+ .trigger = mx31_trigger,
+ .rx_available = mx31_rx_available,
+ .reset = mx31_reset,
+ .devtype = IMX35_CSPI,
+};
+
+static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
+ .intctrl = mx51_ecspi_intctrl,
+ .config = mx51_ecspi_config,
+ .trigger = mx51_ecspi_trigger,
+ .rx_available = mx51_ecspi_rx_available,
+ .reset = mx51_ecspi_reset,
+ .devtype = IMX51_ECSPI,
+};
+
+static struct platform_device_id spi_imx_devtype[] = {
+ {
+ .name = "imx1-cspi",
+ .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
+ }, {
+ .name = "imx21-cspi",
+ .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
+ }, {
+ .name = "imx27-cspi",
+ .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
+ }, {
+ .name = "imx31-cspi",
+ .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
+ }, {
+ .name = "imx35-cspi",
+ .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
+ }, {
+ .name = "imx51-ecspi",
+ .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct of_device_id spi_imx_dt_ids[] = {
+ { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
+ { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
+ { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
+ { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
+ { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
+ { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
+ { /* sentinel */ }
};
static void spi_imx_chipselect(struct spi_device *spi, int is_active)
@@ -607,21 +632,21 @@ static void spi_imx_chipselect(struct spi_device *spi, int is_active)
static void spi_imx_push(struct spi_imx_data *spi_imx)
{
- while (spi_imx->txfifo < spi_imx->devtype_data.fifosize) {
+ while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) {
if (!spi_imx->count)
break;
spi_imx->tx(spi_imx);
spi_imx->txfifo++;
}
- spi_imx->devtype_data.trigger(spi_imx);
+ spi_imx->devtype_data->trigger(spi_imx);
}
static irqreturn_t spi_imx_isr(int irq, void *dev_id)
{
struct spi_imx_data *spi_imx = dev_id;
- while (spi_imx->devtype_data.rx_available(spi_imx)) {
+ while (spi_imx->devtype_data->rx_available(spi_imx)) {
spi_imx->rx(spi_imx);
spi_imx->txfifo--;
}
@@ -635,12 +660,12 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
/* No data left to push, but still waiting for rx data,
* enable receive data available interrupt.
*/
- spi_imx->devtype_data.intctrl(
+ spi_imx->devtype_data->intctrl(
spi_imx, MXC_INT_RR);
return IRQ_HANDLED;
}
- spi_imx->devtype_data.intctrl(spi_imx, 0);
+ spi_imx->devtype_data->intctrl(spi_imx, 0);
complete(&spi_imx->xfer_done);
return IRQ_HANDLED;
@@ -677,7 +702,7 @@ static int spi_imx_setupxfer(struct spi_device *spi,
} else
BUG();
- spi_imx->devtype_data.config(spi_imx, &config);
+ spi_imx->devtype_data->config(spi_imx, &config);
return 0;
}
@@ -696,7 +721,7 @@ static int spi_imx_transfer(struct spi_device *spi,
spi_imx_push(spi_imx);
- spi_imx->devtype_data.intctrl(spi_imx, MXC_INT_TE);
+ spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
wait_for_completion(&spi_imx->xfer_done);
@@ -723,72 +748,47 @@ static void spi_imx_cleanup(struct spi_device *spi)
{
}
-static struct platform_device_id spi_imx_devtype[] = {
- {
- .name = "imx1-cspi",
- .driver_data = SPI_IMX_VER_IMX1,
- }, {
- .name = "imx21-cspi",
- .driver_data = SPI_IMX_VER_0_0,
- }, {
- .name = "imx25-cspi",
- .driver_data = SPI_IMX_VER_0_7,
- }, {
- .name = "imx27-cspi",
- .driver_data = SPI_IMX_VER_0_0,
- }, {
- .name = "imx31-cspi",
- .driver_data = SPI_IMX_VER_0_4,
- }, {
- .name = "imx35-cspi",
- .driver_data = SPI_IMX_VER_0_7,
- }, {
- .name = "imx51-cspi",
- .driver_data = SPI_IMX_VER_0_7,
- }, {
- .name = "imx51-ecspi",
- .driver_data = SPI_IMX_VER_2_3,
- }, {
- .name = "imx53-cspi",
- .driver_data = SPI_IMX_VER_0_7,
- }, {
- .name = "imx53-ecspi",
- .driver_data = SPI_IMX_VER_2_3,
- }, {
- /* sentinel */
- }
-};
-
static int __devinit spi_imx_probe(struct platform_device *pdev)
{
- struct spi_imx_master *mxc_platform_info;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(spi_imx_dt_ids, &pdev->dev);
+ struct spi_imx_master *mxc_platform_info =
+ dev_get_platdata(&pdev->dev);
struct spi_master *master;
struct spi_imx_data *spi_imx;
struct resource *res;
- int i, ret;
+ int i, ret, num_cs;
- mxc_platform_info = dev_get_platdata(&pdev->dev);
- if (!mxc_platform_info) {
+ if (!np && !mxc_platform_info) {
dev_err(&pdev->dev, "can't get the platform data\n");
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
+ ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
+ if (ret < 0)
+ num_cs = mxc_platform_info->num_chipselect;
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
master->bus_num = pdev->id;
- master->num_chipselect = mxc_platform_info->num_chipselect;
+ master->num_chipselect = num_cs;
spi_imx = spi_master_get_devdata(master);
spi_imx->bitbang.master = spi_master_get(master);
- spi_imx->chipselect = mxc_platform_info->chipselect;
for (i = 0; i < master->num_chipselect; i++) {
- if (spi_imx->chipselect[i] < 0)
+ int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+ if (cs_gpio < 0)
+ cs_gpio = mxc_platform_info->chipselect[i];
+ if (cs_gpio < 0)
continue;
+ spi_imx->chipselect[i] = cs_gpio;
ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
if (ret) {
while (i > 0) {
@@ -810,8 +810,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
init_completion(&spi_imx->xfer_done);
- spi_imx->devtype_data =
- spi_imx_devtype_data[pdev->id_entry->driver_data];
+ spi_imx->devtype_data = of_id ? of_id->data :
+ (struct spi_imx_devtype_data *) pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -854,10 +854,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
clk_enable(spi_imx->clk);
spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
- spi_imx->devtype_data.reset(spi_imx);
+ spi_imx->devtype_data->reset(spi_imx);
- spi_imx->devtype_data.intctrl(spi_imx, 0);
+ spi_imx->devtype_data->intctrl(spi_imx, 0);
+ master->dev.of_node = pdev->dev.of_node;
ret = spi_bitbang_start(&spi_imx->bitbang);
if (ret) {
dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
@@ -920,6 +921,7 @@ static struct platform_driver spi_imx_driver = {
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = spi_imx_dt_ids,
},
.id_table = spi_imx_devtype,
.probe = spi_imx_probe,
diff --git a/drivers/spi/spi_lm70llp.c b/drivers/spi/spi-lm70llp.c
index 7746a41ab6d..933eb9d9ddd 100644
--- a/drivers/spi/spi_lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -1,5 +1,5 @@
/*
- * spi_lm70llp.c - driver for LM70EVAL-LLP board for the LM70 sensor
+ * Driver for LM70EVAL-LLP board for the LM70 sensor
*
* Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
*
@@ -174,7 +174,7 @@ static inline int getmiso(struct spi_device *s)
}
/*--------------------------------------------------------------------*/
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
static void lm70_chipselect(struct spi_device *spi, int value)
{
diff --git a/drivers/spi/mpc512x_psc_spi.c b/drivers/spi/spi-mpc512x-psc.c
index 6a5b4238fb6..6a5b4238fb6 100644
--- a/drivers/spi/mpc512x_psc_spi.c
+++ b/drivers/spi/spi-mpc512x-psc.c
diff --git a/drivers/spi/mpc52xx_psc_spi.c b/drivers/spi/spi-mpc52xx-psc.c
index e30baf0852a..e30baf0852a 100644
--- a/drivers/spi/mpc52xx_psc_spi.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
diff --git a/drivers/spi/mpc52xx_spi.c b/drivers/spi/spi-mpc52xx.c
index 015a974bed7..015a974bed7 100644
--- a/drivers/spi/mpc52xx_spi.c
+++ b/drivers/spi/spi-mpc52xx.c
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi-nuc900.c
index 3cd15f690f1..c0a6ce81f9c 100644
--- a/drivers/spi/spi_nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -1,5 +1,4 @@
-/* linux/drivers/spi/spi_nuc900.c
- *
+/*
* Copyright (c) 2009 Nuvoton technology.
* Wan ZongShun <mcuos.com@gmail.com>
*
@@ -7,7 +6,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
-*/
+ */
#include <linux/init.h>
#include <linux/spinlock.h>
diff --git a/drivers/spi/spi_oc_tiny.c b/drivers/spi/spi-oc-tiny.c
index f1bde66cea1..f1bde66cea1 100644
--- a/drivers/spi/spi_oc_tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
diff --git a/drivers/spi/omap_spi_100k.c b/drivers/spi/spi-omap-100k.c
index 9bd1c92ad96..9bd1c92ad96 100644
--- a/drivers/spi/omap_spi_100k.c
+++ b/drivers/spi/spi-omap-100k.c
diff --git a/drivers/spi/omap_uwire.c b/drivers/spi/spi-omap-uwire.c
index 160d3266205..00a8e9d7dbe 100644
--- a/drivers/spi/omap_uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -1,5 +1,5 @@
/*
- * omap_uwire.c -- MicroWire interface driver for OMAP
+ * MicroWire interface driver for OMAP
*
* Copyright 2003 MontaVista Software Inc. <source@mvista.com>
*
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 969cdd2fe12..fde3a2d4f12 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1116,8 +1116,8 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
status = -ENODEV;
goto err1;
}
- if (!request_mem_region(r->start, (r->end - r->start) + 1,
- dev_name(&pdev->dev))) {
+ if (!request_mem_region(r->start, resource_size(r),
+ dev_name(&pdev->dev))) {
status = -EBUSY;
goto err1;
}
@@ -1125,7 +1125,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
r->start += pdata->regs_offset;
r->end += pdata->regs_offset;
mcspi->phys = r->start;
- mcspi->base = ioremap(r->start, r->end - r->start + 1);
+ mcspi->base = ioremap(r->start, resource_size(r));
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
status = -ENOMEM;
@@ -1190,7 +1190,7 @@ err4:
err3:
kfree(mcspi->dma_channels);
err2:
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
iounmap(mcspi->base);
err1:
return status;
@@ -1210,7 +1210,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
omap2_mcspi_disable_clocks(mcspi);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
base = mcspi->base;
spi_unregister_master(master);
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/spi-orion.c
index 0b677dc041a..9421a390a5e 100644
--- a/drivers/spi/orion_spi.c
+++ b/drivers/spi/spi-orion.c
@@ -1,5 +1,5 @@
/*
- * orion_spi.c -- Marvell Orion SPI controller driver
+ * Marvell Orion SPI controller driver
*
* Author: Shadi Ammouri <shadi@marvell.com>
* Copyright (C) 2007-2008 Marvell Ltd.
@@ -489,7 +489,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
goto out;
}
- if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
status = -EBUSY;
goto out;
@@ -511,7 +511,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
return status;
out_rel_mem:
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
out:
spi_master_put(master);
@@ -531,7 +531,7 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
cancel_work_sync(&spi->work);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
spi_unregister_master(master);
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/spi-pl022.c
index d18ce9e946d..eba88c749fb 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1,6 +1,4 @@
/*
- * drivers/spi/amba-pl022.c
- *
* A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
*
* Copyright (C) 2008-2009 ST-Ericsson AB
@@ -42,6 +40,7 @@
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
+#include <linux/pm_runtime.h>
/*
* This macro is used to define some register default values.
@@ -383,6 +382,8 @@ struct pl022 {
enum ssp_reading read;
enum ssp_writing write;
u32 exp_fifo_level;
+ enum ssp_rx_level_trig rx_lev_trig;
+ enum ssp_tx_level_trig tx_lev_trig;
/* DMA settings */
#ifdef CONFIG_DMA_ENGINE
struct dma_chan *dma_rx_channel;
@@ -517,6 +518,7 @@ static void giveback(struct pl022 *pl022)
clk_disable(pl022->clk);
amba_pclk_disable(pl022->adev);
amba_vcore_disable(pl022->adev);
+ pm_runtime_put(&pl022->adev->dev);
}
/**
@@ -909,12 +911,10 @@ static int configure_dma(struct pl022 *pl022)
struct dma_slave_config rx_conf = {
.src_addr = SSP_DR(pl022->phybase),
.direction = DMA_FROM_DEVICE,
- .src_maxburst = pl022->vendor->fifodepth >> 1,
};
struct dma_slave_config tx_conf = {
.dst_addr = SSP_DR(pl022->phybase),
.direction = DMA_TO_DEVICE,
- .dst_maxburst = pl022->vendor->fifodepth >> 1,
};
unsigned int pages;
int ret;
@@ -928,6 +928,54 @@ static int configure_dma(struct pl022 *pl022)
if (!rxchan || !txchan)
return -ENODEV;
+ /*
+ * If supplied, the DMA burstsize should equal the FIFO trigger level.
+ * Notice that the DMA engine uses one-to-one mapping. Since we can
+ * not trigger on 2 elements this needs explicit mapping rather than
+ * calculation.
+ */
+ switch (pl022->rx_lev_trig) {
+ case SSP_RX_1_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 1;
+ break;
+ case SSP_RX_4_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 4;
+ break;
+ case SSP_RX_8_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 8;
+ break;
+ case SSP_RX_16_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 16;
+ break;
+ case SSP_RX_32_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 32;
+ break;
+ default:
+ rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (pl022->tx_lev_trig) {
+ case SSP_TX_1_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 1;
+ break;
+ case SSP_TX_4_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 4;
+ break;
+ case SSP_TX_8_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 8;
+ break;
+ case SSP_TX_16_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 16;
+ break;
+ case SSP_TX_32_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 32;
+ break;
+ default:
+ tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
+ break;
+ }
+
switch (pl022->read) {
case READING_NULL:
/* Use the same as for writing */
@@ -1496,6 +1544,7 @@ static void pump_messages(struct work_struct *work)
* and core will be disabled when giveback() is called in each method
* (poll/interrupt/DMA)
*/
+ pm_runtime_get_sync(&pl022->adev->dev);
amba_vcore_enable(pl022->adev);
amba_pclk_enable(pl022->adev);
clk_enable(pl022->clk);
@@ -1629,17 +1678,57 @@ static int verify_controller_parameters(struct pl022 *pl022,
"Communication mode is configured incorrectly\n");
return -EINVAL;
}
- if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM)
- || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) {
+ switch (chip_info->rx_lev_trig) {
+ case SSP_RX_1_OR_MORE_ELEM:
+ case SSP_RX_4_OR_MORE_ELEM:
+ case SSP_RX_8_OR_MORE_ELEM:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SSP_RX_16_OR_MORE_ELEM:
+ if (pl022->vendor->fifodepth < 16) {
+ dev_err(&pl022->adev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SSP_RX_32_OR_MORE_ELEM:
+ if (pl022->vendor->fifodepth < 32) {
+ dev_err(&pl022->adev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
dev_err(&pl022->adev->dev,
"RX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
+ break;
}
- if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC)
- || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) {
+ switch (chip_info->tx_lev_trig) {
+ case SSP_TX_1_OR_MORE_EMPTY_LOC:
+ case SSP_TX_4_OR_MORE_EMPTY_LOC:
+ case SSP_TX_8_OR_MORE_EMPTY_LOC:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SSP_TX_16_OR_MORE_EMPTY_LOC:
+ if (pl022->vendor->fifodepth < 16) {
+ dev_err(&pl022->adev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SSP_TX_32_OR_MORE_EMPTY_LOC:
+ if (pl022->vendor->fifodepth < 32) {
+ dev_err(&pl022->adev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
dev_err(&pl022->adev->dev,
"TX FIFO Trigger Level is configured incorrectly\n");
return -EINVAL;
+ break;
}
if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
if ((chip_info->ctrl_len < SSP_BITS_4)
@@ -1874,6 +1963,9 @@ static int pl022_setup(struct spi_device *spi)
goto err_config_params;
}
+ pl022->rx_lev_trig = chip_info->rx_lev_trig;
+ pl022->tx_lev_trig = chip_info->tx_lev_trig;
+
/* Now set controller state based on controller data */
chip->xfer_type = chip_info->com_mode;
if (!chip_info->cs_control) {
@@ -2094,6 +2186,8 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
}
printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
adev->res.start, pl022->virtbase);
+ pm_runtime_enable(dev);
+ pm_runtime_resume(dev);
pl022->clk = clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
@@ -2155,6 +2249,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
destroy_queue(pl022);
pl022_dma_remove(pl022);
free_irq(adev->irq[0], pl022);
+ pm_runtime_disable(&adev->dev);
err_no_irq:
clk_put(pl022->clk);
err_no_clk:
diff --git a/drivers/spi/spi_ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 2a298c02919..b267fd901e5 100644
--- a/drivers/spi/spi_ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -502,7 +502,7 @@ static int __init spi_ppc4xx_of_probe(struct platform_device *op)
goto free_gpios;
}
hw->mapbase = resource.start;
- hw->mapsize = resource.end - resource.start + 1;
+ hw->mapsize = resource_size(&resource);
/* Sanity check */
if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) {
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 378e504f89e..378e504f89e 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/spi-pxa2xx.c
index dc25bee8d33..dc25bee8d33 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/spi-pxa2xx.c
diff --git a/drivers/spi/spi_s3c24xx_fiq.S b/drivers/spi/spi-s3c24xx-fiq.S
index 3793cae361d..059f2dc1fda 100644
--- a/drivers/spi/spi_s3c24xx_fiq.S
+++ b/drivers/spi/spi-s3c24xx-fiq.S
@@ -17,7 +17,7 @@
#include <mach/regs-irq.h>
#include <plat/regs-spi.h>
-#include "spi_s3c24xx_fiq.h"
+#include "spi-s3c24xx-fiq.h"
.text
diff --git a/drivers/spi/spi_s3c24xx_fiq.h b/drivers/spi/spi-s3c24xx-fiq.h
index a5950bb25b5..a5950bb25b5 100644
--- a/drivers/spi/spi_s3c24xx_fiq.h
+++ b/drivers/spi/spi-s3c24xx-fiq.h
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 1a5fcabfd56..1996ac57ef9 100644
--- a/drivers/spi/spi_s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -1,5 +1,4 @@
-/* linux/drivers/spi/spi_s3c24xx.c
- *
+/*
* Copyright (c) 2006 Ben Dooks
* Copyright 2006-2009 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
@@ -32,7 +31,7 @@
#include <plat/fiq.h>
#include <asm/fiq.h>
-#include "spi_s3c24xx_fiq.h"
+#include "spi-s3c24xx-fiq.h"
/**
* s3c24xx_spi_devstate - per device data
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 8945e201e42..595dacc7645 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1,5 +1,4 @@
-/* linux/drivers/spi/spi_s3c64xx.c
- *
+/*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
diff --git a/drivers/spi/spi_sh_msiof.c b/drivers/spi/spi-sh-msiof.c
index e00d94b2225..e00d94b2225 100644
--- a/drivers/spi/spi_sh_msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
diff --git a/drivers/spi/spi_sh_sci.c b/drivers/spi/spi-sh-sci.c
index 5c643916119..e7779c09f6e 100644
--- a/drivers/spi/spi_sh_sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -78,7 +78,7 @@ static inline u32 getmiso(struct spi_device *dev)
#define spidelay(x) ndelay(x)
-#include "spi_bitbang_txrx.h"
+#include "spi-bitbang-txrx.h"
static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
unsigned nsecs, u32 word, u8 bits)
diff --git a/drivers/spi/spi_sh.c b/drivers/spi/spi-sh.c
index 9eedd71ad89..9eedd71ad89 100644
--- a/drivers/spi/spi_sh.c
+++ b/drivers/spi/spi-sh.c
diff --git a/drivers/spi/spi_stmp.c b/drivers/spi/spi-stmp.c
index fadff76eb7e..fadff76eb7e 100644
--- a/drivers/spi/spi_stmp.c
+++ b/drivers/spi/spi-stmp.c
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi-tegra.c
index 6c3aa6ecaad..a5a6302dc8e 100644
--- a/drivers/spi/spi_tegra.c
+++ b/drivers/spi/spi-tegra.c
@@ -498,14 +498,14 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
goto err0;
}
- if (!request_mem_region(r->start, (r->end - r->start) + 1,
+ if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
ret = -EBUSY;
goto err0;
}
tspi->phys = r->start;
- tspi->base = ioremap(r->start, r->end - r->start + 1);
+ tspi->base = ioremap(r->start, resource_size(r));
if (!tspi->base) {
dev_err(&pdev->dev, "can't ioremap iomem\n");
ret = -ENOMEM;
@@ -546,6 +546,7 @@ static int __init spi_tegra_probe(struct platform_device *pdev)
tspi->rx_dma_req.req_sel = spi_tegra_req_sels[pdev->id];
tspi->rx_dma_req.dev = tspi;
+ master->dev.of_node = pdev->dev.of_node;
ret = spi_register_master(master);
if (ret < 0)
@@ -563,7 +564,7 @@ err3:
err2:
iounmap(tspi->base);
err1:
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
err0:
spi_master_put(master);
return ret;
@@ -588,17 +589,28 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev)
iounmap(tspi->base);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, (r->end - r->start) + 1);
+ release_mem_region(r->start, resource_size(r));
return 0;
}
MODULE_ALIAS("platform:spi_tegra");
+#ifdef CONFIG_OF
+static struct of_device_id spi_tegra_of_match_table[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_tegra_of_match_table);
+#else /* CONFIG_OF */
+#define spi_tegra_of_match_table NULL
+#endif /* CONFIG_OF */
+
static struct platform_driver spi_tegra_driver = {
.driver = {
.name = "spi_tegra",
.owner = THIS_MODULE,
+ .of_match_table = spi_tegra_of_match_table,
},
.remove = __devexit_p(spi_tegra_remove),
};
diff --git a/drivers/spi/ti-ssp-spi.c b/drivers/spi/spi-ti-ssp.c
index ee22795c797..ee22795c797 100644
--- a/drivers/spi/ti-ssp-spi.c
+++ b/drivers/spi/spi-ti-ssp.c
diff --git a/drivers/spi/tle62x0.c b/drivers/spi/spi-tle62x0.c
index 32a40876532..940e73d1cf0 100644
--- a/drivers/spi/tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -1,5 +1,5 @@
/*
- * tle62x0.c -- support Infineon TLE62x0 driver chips
+ * Support Infineon TLE62x0 driver chips
*
* Copyright (c) 2007 Simtec Electronics
* Ben Dooks, <ben@simtec.co.uk>
diff --git a/drivers/spi/spi_topcliff_pch.c b/drivers/spi/spi-topcliff-pch.c
index 79e48d45113..1d23f383186 100644
--- a/drivers/spi/spi_topcliff_pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -26,6 +26,10 @@
#include <linux/spi/spidev.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/dmaengine.h>
+#include <linux/pch_dma.h>
/* Register offsets */
#define PCH_SPCR 0x00 /* SPI control register */
@@ -35,6 +39,7 @@
#define PCH_SPDRR 0x10 /* SPI read data register */
#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */
#define PCH_SRST 0x1C /* SPI reset register */
+#define PCH_ADDRESS_SIZE 0x20
#define PCH_SPSR_TFD 0x000007C0
#define PCH_SPSR_RFD 0x0000F800
@@ -52,8 +57,6 @@
#define STATUS_EXITING 2
#define PCH_SLEEP_TIME 10
-#define PCH_ADDRESS_SIZE 0x20
-
#define SSN_LOW 0x02U
#define SSN_NO_CONTROL 0x00U
#define PCH_MAX_CS 0xFF
@@ -73,22 +76,57 @@
#define SPSR_TFI_BIT (1 << 0)
#define SPSR_RFI_BIT (1 << 1)
#define SPSR_FI_BIT (1 << 2)
+#define SPSR_ORF_BIT (1 << 3)
#define SPBRR_SIZE_BIT (1 << 10)
-#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
+#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
+ SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
#define SPCR_RFIC_FIELD 20
#define SPCR_TFIC_FIELD 16
-#define SPSR_INT_BITS 0x1F
-#define MASK_SPBRR_SPBR_BITS (~((1 << 10) - 1))
-#define MASK_RFIC_SPCR_BITS (~(0xf << 20))
-#define MASK_TFIC_SPCR_BITS (~(0xf000f << 12))
+#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
+#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
+#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
#define PCH_CLOCK_HZ 50000000
#define PCH_MAX_SPBR 1023
+/* Definition for ML7213 by OKI SEMICONDUCTOR */
+#define PCI_VENDOR_ID_ROHM 0x10DB
+#define PCI_DEVICE_ID_ML7213_SPI 0x802c
+#define PCI_DEVICE_ID_ML7223_SPI 0x800F
+/*
+ * Set the number of SPI instance max
+ * Intel EG20T PCH : 1ch
+ * OKI SEMICONDUCTOR ML7213 IOH : 2ch
+ * OKI SEMICONDUCTOR ML7223 IOH : 1ch
+*/
+#define PCH_SPI_MAX_DEV 2
+
+#define PCH_BUF_SIZE 4096
+#define PCH_DMA_TRANS_SIZE 12
+
+static int use_dma = 1;
+
+struct pch_spi_dma_ctrl {
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
+ struct pch_dma_slave param_tx;
+ struct pch_dma_slave param_rx;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct scatterlist *sg_tx_p;
+ struct scatterlist *sg_rx_p;
+ struct scatterlist sg_tx;
+ struct scatterlist sg_rx;
+ int nent;
+ void *tx_buf_virt;
+ void *rx_buf_virt;
+ dma_addr_t tx_buf_dma;
+ dma_addr_t rx_buf_dma;
+};
/**
* struct pch_spi_data - Holds the SPI channel specific details
* @io_remap_addr: The remapped PCI base address
@@ -121,9 +159,13 @@
* @cur_trans: The current transfer that this SPI driver is
* handling
* @board_dat: Reference to the SPI device data structure
+ * @plat_dev: platform_device structure
+ * @ch: SPI channel number
+ * @irq_reg_sts: Status of IRQ registration
*/
struct pch_spi_data {
void __iomem *io_remap_addr;
+ unsigned long io_base_addr;
struct spi_master *master;
struct work_struct work;
struct workqueue_struct *wk;
@@ -144,27 +186,36 @@ struct pch_spi_data {
struct spi_message *current_msg;
struct spi_transfer *cur_trans;
struct pch_spi_board_data *board_dat;
+ struct platform_device *plat_dev;
+ int ch;
+ struct pch_spi_dma_ctrl dma;
+ int use_dma;
+ u8 irq_reg_sts;
};
/**
* struct pch_spi_board_data - Holds the SPI device specific details
* @pdev: Pointer to the PCI device
- * @irq_reg_sts: Status of IRQ registration
- * @pci_req_sts: Status of pci_request_regions
* @suspend_sts: Status of suspend
- * @data: Pointer to SPI channel data structure
+ * @num: The number of SPI device instance
*/
struct pch_spi_board_data {
struct pci_dev *pdev;
- u8 irq_reg_sts;
- u8 pci_req_sts;
u8 suspend_sts;
- struct pch_spi_data *data;
+ int num;
+};
+
+struct pch_pd_dev_save {
+ int num;
+ struct platform_device *pd_save[PCH_SPI_MAX_DEV];
+ struct pch_spi_board_data *board_dat;
};
static struct pci_device_id pch_spi_pcidev_id[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_GE_SPI)},
- {0,}
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
+ { }
};
/**
@@ -251,10 +302,10 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
/* reset rx threshold */
- reg_spcr_val &= MASK_RFIC_SPCR_BITS;
+ reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
- iowrite32(((reg_spcr_val) &= (~(SPCR_RFIE_BIT))),
- (io_remap_addr + PCH_SPCR));
+
+ iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
}
/* update counts */
@@ -265,12 +316,15 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
/* if transfer complete interrupt */
if (reg_spsr_val & SPSR_FI_BIT) {
- /* disable FI & RFI interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
- SPCR_FIE_BIT | SPCR_RFIE_BIT);
+ if (tx_index < bpw_len)
+ dev_err(&data->master->dev,
+ "%s : Transfer is not completed", __func__);
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
/* transfer is completed;inform pch_spi_process_messages */
data->transfer_complete = true;
+ data->transfer_active = false;
wake_up(&data->wait);
}
}
@@ -283,24 +337,28 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
static irqreturn_t pch_spi_handler(int irq, void *dev_id)
{
u32 reg_spsr_val;
- struct pch_spi_data *data;
void __iomem *spsr;
void __iomem *io_remap_addr;
irqreturn_t ret = IRQ_NONE;
- struct pch_spi_board_data *board_dat = dev_id;
+ struct pch_spi_data *data = dev_id;
+ struct pch_spi_board_data *board_dat = data->board_dat;
if (board_dat->suspend_sts) {
dev_dbg(&board_dat->pdev->dev,
"%s returning due to suspend\n", __func__);
return IRQ_NONE;
}
+ if (data->use_dma)
+ return IRQ_NONE;
- data = board_dat->data;
io_remap_addr = data->io_remap_addr;
spsr = io_remap_addr + PCH_SPSR;
reg_spsr_val = ioread32(spsr);
+ if (reg_spsr_val & SPSR_ORF_BIT)
+ dev_err(&board_dat->pdev->dev, "%s Over run error", __func__);
+
/* Check if the interrupt is for SPI device */
if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
@@ -326,7 +384,7 @@ static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
if (n_spbr > PCH_MAX_SPBR)
n_spbr = PCH_MAX_SPBR;
- pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, ~MASK_SPBRR_SPBR_BITS);
+ pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
}
/**
@@ -435,26 +493,27 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
dev_dbg(&pspi->dev, "%s Transfer List not empty. "
"Transfer Speed is set.\n", __func__);
+ spin_lock_irqsave(&data->lock, flags);
/* validate Tx/Rx buffers and Transfer length */
list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
if (!transfer->tx_buf && !transfer->rx_buf) {
dev_err(&pspi->dev,
"%s Tx and Rx buffer NULL\n", __func__);
retval = -EINVAL;
- goto err_out;
+ goto err_return_spinlock;
}
if (!transfer->len) {
dev_err(&pspi->dev, "%s Transfer length invalid\n",
__func__);
retval = -EINVAL;
- goto err_out;
+ goto err_return_spinlock;
}
dev_dbg(&pspi->dev, "%s Tx/Rx buffer valid. Transfer length"
" valid\n", __func__);
- /* if baud rate hs been specified validate the same */
+ /* if baud rate has been specified validate the same */
if (transfer->speed_hz > PCH_MAX_BAUDRATE)
transfer->speed_hz = PCH_MAX_BAUDRATE;
@@ -465,25 +524,24 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
retval = -EINVAL;
dev_err(&pspi->dev,
"%s Invalid bits per word\n", __func__);
- goto err_out;
+ goto err_return_spinlock;
}
}
}
-
- spin_lock_irqsave(&data->lock, flags);
+ spin_unlock_irqrestore(&data->lock, flags);
/* We won't process any messages if we have been asked to terminate */
if (data->status == STATUS_EXITING) {
dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
retval = -ESHUTDOWN;
- goto err_return_spinlock;
+ goto err_out;
}
/* If suspended ,return -EINVAL */
if (data->board_dat->suspend_sts) {
dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
retval = -EINVAL;
- goto err_return_spinlock;
+ goto err_out;
}
/* set status of message */
@@ -491,9 +549,11 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
pmsg->status = -EINPROGRESS;
-
+ spin_lock_irqsave(&data->lock, flags);
/* add message to queue */
list_add_tail(&pmsg->queue, &data->queue);
+ spin_unlock_irqrestore(&data->lock, flags);
+
dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
/* schedule work queue to run */
@@ -502,11 +562,13 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
retval = 0;
-err_return_spinlock:
- spin_unlock_irqrestore(&data->lock, flags);
err_out:
dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
return retval;
+err_return_spinlock:
+ dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return retval;
}
static inline void pch_spi_select_chip(struct pch_spi_data *data,
@@ -527,8 +589,7 @@ static inline void pch_spi_select_chip(struct pch_spi_data *data,
pch_spi_setup_transfer(pspi);
}
-static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
- struct spi_message **ppmsg)
+static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
{
int size;
u32 n_writes;
@@ -537,8 +598,6 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
const u8 *tx_buf;
const u16 *tx_sbuf;
- pmsg = *ppmsg;
-
/* set baud rate if needed */
if (data->cur_trans->speed_hz) {
dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
@@ -621,10 +680,9 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw,
data->transfer_active = true;
}
-
-static void pch_spi_nomore_transfer(struct pch_spi_data *data,
- struct spi_message *pmsg)
+static void pch_spi_nomore_transfer(struct pch_spi_data *data)
{
+ struct spi_message *pmsg;
dev_dbg(&data->master->dev, "%s called\n", __func__);
/* Invoke complete callback
* [To the spi core..indicating end of transfer] */
@@ -675,29 +733,21 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data,
static void pch_spi_set_ir(struct pch_spi_data *data)
{
- /* enable interrupts */
- if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH) {
+ /* enable interrupts, set threshold, enable SPI */
+ if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
/* set receive threshold to PCH_RX_THOLD */
pch_spi_setclr_reg(data->master, PCH_SPCR,
- PCH_RX_THOLD << SPCR_RFIC_FIELD,
- ~MASK_RFIC_SPCR_BITS);
- /* enable FI and RFI interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR,
- SPCR_RFIE_BIT | SPCR_FIE_BIT, 0);
- } else {
+ PCH_RX_THOLD << SPCR_RFIC_FIELD |
+ SPCR_FIE_BIT | SPCR_RFIE_BIT |
+ SPCR_ORIE_BIT | SPCR_SPE_BIT,
+ MASK_RFIC_SPCR_BITS | PCH_ALL);
+ else
/* set receive threshold to maximum */
pch_spi_setclr_reg(data->master, PCH_SPCR,
- PCH_RX_THOLD_MAX << SPCR_TFIC_FIELD,
- ~MASK_TFIC_SPCR_BITS);
- /* enable FI interrupt */
- pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_FIE_BIT, 0);
- }
-
- dev_dbg(&data->master->dev,
- "%s:invoking pch_spi_set_enable to enable SPI\n", __func__);
-
- /* SPI set enable */
- pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, SPCR_SPE_BIT, 0);
+ PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
+ SPCR_FIE_BIT | SPCR_ORIE_BIT |
+ SPCR_SPE_BIT,
+ MASK_RFIC_SPCR_BITS | PCH_ALL);
/* Wait until the transfer completes; go to sleep after
initiating the transfer. */
@@ -710,15 +760,13 @@ static void pch_spi_set_ir(struct pch_spi_data *data)
dev_dbg(&data->master->dev,
"%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
- data->transfer_active = false;
- dev_dbg(&data->master->dev,
- "%s set data->transfer_active = false\n", __func__);
-
/* clear all interrupts */
pch_spi_writereg(data->master, PCH_SPSR,
pch_spi_readreg(data->master, PCH_SPSR));
- /* disable interrupts */
- pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ /* Disable interrupts and SPI transfer */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
+ /* clear FIFO */
+ pch_spi_clear_fifo(data->master);
}
static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
@@ -742,6 +790,327 @@ static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
}
}
+static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
+{
+ int j;
+ u8 *rx_buf;
+ u16 *rx_sbuf;
+ const u8 *rx_dma_buf;
+ const u16 *rx_dma_sbuf;
+
+ /* copy Rx Data */
+ if (!data->cur_trans->rx_buf)
+ return;
+
+ if (bpw == 8) {
+ rx_buf = data->cur_trans->rx_buf;
+ rx_dma_buf = data->dma.rx_buf_virt;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_buf++ = *rx_dma_buf++ & 0xFF;
+ } else {
+ rx_sbuf = data->cur_trans->rx_buf;
+ rx_dma_sbuf = data->dma.rx_buf_virt;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_sbuf++ = *rx_dma_sbuf++;
+ }
+}
+
+static void pch_spi_start_transfer(struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+ unsigned long flags;
+
+ dma = &data->dma;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* disable interrupts, SPI set enable */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* Wait until the transfer completes; go to sleep after
+ initiating the transfer. */
+ dev_dbg(&data->master->dev,
+ "%s:waiting for transfer to get over\n", __func__);
+ wait_event_interruptible(data->wait, data->transfer_complete);
+
+ dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
+ DMA_FROM_DEVICE);
+ async_tx_ack(dma->desc_rx);
+ async_tx_ack(dma->desc_tx);
+ kfree(dma->sg_tx_p);
+ kfree(dma->sg_rx_p);
+
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
+ dev_dbg(&data->master->dev,
+ "%s:no more control over SSN-writing 0 to SSNXCR.", __func__);
+
+ /* clear fifo threshold, disable interrupts, disable SPI transfer */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
+ SPCR_SPE_BIT);
+ /* clear all interrupts */
+ pch_spi_writereg(data->master, PCH_SPSR,
+ pch_spi_readreg(data->master, PCH_SPSR));
+ /* clear FIFO */
+ pch_spi_clear_fifo(data->master);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static void pch_dma_rx_complete(void *arg)
+{
+ struct pch_spi_data *data = arg;
+
+ /* transfer is completed;inform pch_spi_process_messages_dma */
+ data->transfer_complete = true;
+ wake_up_interruptible(&data->wait);
+}
+
+static bool pch_spi_filter(struct dma_chan *chan, void *slave)
+{
+ struct pch_dma_slave *param = slave;
+
+ if ((chan->chan_id == param->chan_id) &&
+ (param->dma_dev == chan->device->dev)) {
+ chan->private = param;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct pci_dev *dma_dev;
+ struct pch_dma_slave *param;
+ struct pch_spi_dma_ctrl *dma;
+ unsigned int width;
+
+ if (bpw == 8)
+ width = PCH_DMA_WIDTH_1_BYTE;
+ else
+ width = PCH_DMA_WIDTH_2_BYTES;
+
+ dma = &data->dma;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Get DMA's dev information */
+ dma_dev = pci_get_bus_and_slot(2, PCI_DEVFN(12, 0));
+
+ /* Set Tx DMA */
+ param = &dma->param_tx;
+ param->dma_dev = &dma_dev->dev;
+ param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
+ param->tx_reg = data->io_base_addr + PCH_SPDWR;
+ param->width = width;
+ chan = dma_request_channel(mask, pch_spi_filter, param);
+ if (!chan) {
+ dev_err(&data->master->dev,
+ "ERROR: dma_request_channel FAILS(Tx)\n");
+ data->use_dma = 0;
+ return;
+ }
+ dma->chan_tx = chan;
+
+ /* Set Rx DMA */
+ param = &dma->param_rx;
+ param->dma_dev = &dma_dev->dev;
+ param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
+ param->rx_reg = data->io_base_addr + PCH_SPDRR;
+ param->width = width;
+ chan = dma_request_channel(mask, pch_spi_filter, param);
+ if (!chan) {
+ dev_err(&data->master->dev,
+ "ERROR: dma_request_channel FAILS(Rx)\n");
+ dma_release_channel(dma->chan_tx);
+ dma->chan_tx = NULL;
+ data->use_dma = 0;
+ return;
+ }
+ dma->chan_rx = chan;
+}
+
+static void pch_spi_release_dma(struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ if (dma->chan_tx) {
+ dma_release_channel(dma->chan_tx);
+ dma->chan_tx = NULL;
+ }
+ if (dma->chan_rx) {
+ dma_release_channel(dma->chan_rx);
+ dma->chan_rx = NULL;
+ }
+ return;
+}
+
+static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+{
+ const u8 *tx_buf;
+ const u16 *tx_sbuf;
+ u8 *tx_dma_buf;
+ u16 *tx_dma_sbuf;
+ struct scatterlist *sg;
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
+ int num;
+ int i;
+ int size;
+ int rem;
+ unsigned long flags;
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+
+ /* set baud rate if needed */
+ if (data->cur_trans->speed_hz) {
+ dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ /* set bits per word if needed */
+ if (data->cur_trans->bits_per_word &&
+ (data->current_msg->spi->bits_per_word !=
+ data->cur_trans->bits_per_word)) {
+ dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_set_bits_per_word(data->master,
+ data->cur_trans->bits_per_word);
+ spin_unlock_irqrestore(&data->lock, flags);
+ *bpw = data->cur_trans->bits_per_word;
+ } else {
+ *bpw = data->current_msg->spi->bits_per_word;
+ }
+ data->bpw_len = data->cur_trans->len / (*bpw / 8);
+
+ /* copy Tx Data */
+ if (data->cur_trans->tx_buf != NULL) {
+ if (*bpw == 8) {
+ tx_buf = data->cur_trans->tx_buf;
+ tx_dma_buf = dma->tx_buf_virt;
+ for (i = 0; i < data->bpw_len; i++)
+ *tx_dma_buf++ = *tx_buf++;
+ } else {
+ tx_sbuf = data->cur_trans->tx_buf;
+ tx_dma_sbuf = dma->tx_buf_virt;
+ for (i = 0; i < data->bpw_len; i++)
+ *tx_dma_sbuf++ = *tx_sbuf++;
+ }
+ }
+ if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
+ size = PCH_DMA_TRANS_SIZE;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
+ } else {
+ num = 1;
+ size = data->bpw_len;
+ rem = data->bpw_len;
+ }
+ dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
+ __func__, num, size, rem);
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* set receive fifo threshold and transmit fifo threshold */
+ pch_spi_setclr_reg(data->master, PCH_SPCR,
+ ((size - 1) << SPCR_RFIC_FIELD) |
+ ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) <<
+ SPCR_TFIC_FIELD),
+ MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* RX */
+ dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+ sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
+ /* offset, length setting */
+ sg = dma->sg_rx_p;
+ for (i = 0; i < num; i++, sg++) {
+ if (i == 0) {
+ sg->offset = 0;
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
+ sg->offset);
+ sg_dma_len(sg) = rem;
+ } else {
+ sg->offset = rem + size * (i - 1);
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
+ }
+ sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
+ }
+ sg = dma->sg_rx_p;
+ desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
+ num, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
+ __func__);
+ return;
+ }
+ dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
+ desc_rx->callback = pch_dma_rx_complete;
+ desc_rx->callback_param = data;
+ dma->nent = num;
+ dma->desc_rx = desc_rx;
+
+ /* TX */
+ dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+ sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
+ /* offset, length setting */
+ sg = dma->sg_tx_p;
+ for (i = 0; i < num; i++, sg++) {
+ if (i == 0) {
+ sg->offset = 0;
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
+ sg->offset);
+ sg_dma_len(sg) = rem;
+ } else {
+ sg->offset = rem + size * (i - 1);
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
+ }
+ sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
+ }
+ sg = dma->sg_tx_p;
+ desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
+ sg, num, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
+ __func__);
+ return;
+ }
+ dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
+ desc_tx->callback = NULL;
+ desc_tx->callback_param = data;
+ dma->nent = num;
+ dma->desc_tx = desc_tx;
+
+ dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
+ "0x2 to SSNXCR\n", __func__);
+
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+ desc_rx->tx_submit(desc_rx);
+ desc_tx->tx_submit(desc_tx);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* reset transfer complete flag */
+ data->transfer_complete = false;
+}
static void pch_spi_process_messages(struct work_struct *pwork)
{
@@ -753,13 +1122,10 @@ static void pch_spi_process_messages(struct work_struct *pwork)
dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
spin_lock(&data->lock);
-
/* check if suspend has been initiated;if yes flush queue */
if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
- dev_dbg(&data->master->dev,
- "%s suspend/remove initiated,flushing queue\n",
- __func__);
-
+ dev_dbg(&data->master->dev, "%s suspend/remove initiated,"
+ "flushing queue\n", __func__);
list_for_each_entry(pmsg, data->queue.next, queue) {
pmsg->status = -EIO;
@@ -793,53 +1159,42 @@ static void pch_spi_process_messages(struct work_struct *pwork)
spin_unlock(&data->lock);
+ if (data->use_dma)
+ pch_spi_request_dma(data,
+ data->current_msg->spi->bits_per_word);
do {
/* If we are already processing a message get the next
transfer structure from the message otherwise retrieve
the 1st transfer request from the message. */
spin_lock(&data->lock);
-
if (data->cur_trans == NULL) {
data->cur_trans =
- list_entry(data->current_msg->transfers.
- next, struct spi_transfer,
- transfer_list);
- dev_dbg(&data->master->dev,
- "%s :Getting 1st transfer message\n", __func__);
+ list_entry(data->current_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ dev_dbg(&data->master->dev, "%s "
+ ":Getting 1st transfer message\n", __func__);
} else {
data->cur_trans =
- list_entry(data->cur_trans->transfer_list.next,
- struct spi_transfer,
- transfer_list);
- dev_dbg(&data->master->dev,
- "%s :Getting next transfer message\n",
- __func__);
+ list_entry(data->cur_trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ dev_dbg(&data->master->dev, "%s "
+ ":Getting next transfer message\n", __func__);
}
-
spin_unlock(&data->lock);
- pch_spi_set_tx(data, &bpw, &pmsg);
-
- /* Control interrupt*/
- pch_spi_set_ir(data);
-
- /* Disable SPI transfer */
- pch_spi_setclr_reg(data->current_chip->master, PCH_SPCR, 0,
- SPCR_SPE_BIT);
-
- /* clear FIFO */
- pch_spi_clear_fifo(data->master);
-
- /* copy Rx Data */
- pch_spi_copy_rx_data(data, bpw);
-
- /* free memory */
- kfree(data->pkt_rx_buff);
- data->pkt_rx_buff = NULL;
-
- kfree(data->pkt_tx_buff);
- data->pkt_tx_buff = NULL;
-
+ if (data->use_dma) {
+ pch_spi_handle_dma(data, &bpw);
+ pch_spi_start_transfer(data);
+ pch_spi_copy_rx_data_for_dma(data, bpw);
+ } else {
+ pch_spi_set_tx(data, &bpw);
+ pch_spi_set_ir(data);
+ pch_spi_copy_rx_data(data, bpw);
+ kfree(data->pkt_rx_buff);
+ data->pkt_rx_buff = NULL;
+ kfree(data->pkt_tx_buff);
+ data->pkt_tx_buff = NULL;
+ }
/* increment message count */
data->current_msg->actual_length += data->cur_trans->len;
@@ -860,125 +1215,60 @@ static void pch_spi_process_messages(struct work_struct *pwork)
/* No more transfer in this message. */
if ((data->cur_trans->transfer_list.next) ==
&(data->current_msg->transfers)) {
- pch_spi_nomore_transfer(data, pmsg);
+ pch_spi_nomore_transfer(data);
}
spin_unlock(&data->lock);
} while (data->cur_trans != NULL);
+
+ if (data->use_dma)
+ pch_spi_release_dma(data);
}
-static void pch_spi_free_resources(struct pch_spi_board_data *board_dat)
+static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
{
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* free workqueue */
- if (board_dat->data->wk != NULL) {
- destroy_workqueue(board_dat->data->wk);
- board_dat->data->wk = NULL;
+ if (data->wk != NULL) {
+ destroy_workqueue(data->wk);
+ data->wk = NULL;
dev_dbg(&board_dat->pdev->dev,
"%s destroy_workqueue invoked successfully\n",
__func__);
}
-
- /* disable interrupts & free IRQ */
- if (board_dat->irq_reg_sts) {
- /* disable interrupts */
- pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
- PCH_ALL);
-
- /* free IRQ */
- free_irq(board_dat->pdev->irq, board_dat);
-
- dev_dbg(&board_dat->pdev->dev,
- "%s free_irq invoked successfully\n", __func__);
-
- board_dat->irq_reg_sts = false;
- }
-
- /* unmap PCI base address */
- if (board_dat->data->io_remap_addr != 0) {
- pci_iounmap(board_dat->pdev, board_dat->data->io_remap_addr);
-
- board_dat->data->io_remap_addr = 0;
-
- dev_dbg(&board_dat->pdev->dev,
- "%s pci_iounmap invoked successfully\n", __func__);
- }
-
- /* release PCI region */
- if (board_dat->pci_req_sts) {
- pci_release_regions(board_dat->pdev);
- dev_dbg(&board_dat->pdev->dev,
- "%s pci_release_regions invoked successfully\n",
- __func__);
- board_dat->pci_req_sts = false;
- }
}
-static int pch_spi_get_resources(struct pch_spi_board_data *board_dat)
+static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
{
- void __iomem *io_remap_addr;
- int retval;
+ int retval = 0;
+
dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
/* create workqueue */
- board_dat->data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
- if (!board_dat->data->wk) {
+ data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
+ if (!data->wk) {
dev_err(&board_dat->pdev->dev,
"%s create_singlet hread_workqueue failed\n", __func__);
retval = -EBUSY;
goto err_return;
}
- dev_dbg(&board_dat->pdev->dev,
- "%s create_singlethread_workqueue success\n", __func__);
-
- retval = pci_request_regions(board_dat->pdev, KBUILD_MODNAME);
- if (retval != 0) {
- dev_err(&board_dat->pdev->dev,
- "%s request_region failed\n", __func__);
- goto err_return;
- }
-
- board_dat->pci_req_sts = true;
-
- io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
- if (io_remap_addr == 0) {
- dev_err(&board_dat->pdev->dev,
- "%s pci_iomap failed\n", __func__);
- retval = -ENOMEM;
- goto err_return;
- }
-
- /* calculate base address for all channels */
- board_dat->data->io_remap_addr = io_remap_addr;
-
/* reset PCH SPI h/w */
- pch_spi_reset(board_dat->data->master);
+ pch_spi_reset(data->master);
dev_dbg(&board_dat->pdev->dev,
"%s pch_spi_reset invoked successfully\n", __func__);
- /* register IRQ */
- retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
- IRQF_SHARED, KBUILD_MODNAME, board_dat);
- if (retval != 0) {
- dev_err(&board_dat->pdev->dev,
- "%s request_irq failed\n", __func__);
- goto err_return;
- }
-
- dev_dbg(&board_dat->pdev->dev, "%s request_irq returned=%d\n",
- __func__, retval);
-
- board_dat->irq_reg_sts = true;
dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
err_return:
if (retval != 0) {
dev_err(&board_dat->pdev->dev,
"%s FAIL:invoking pch_spi_free_resources\n", __func__);
- pch_spi_free_resources(board_dat);
+ pch_spi_free_resources(board_dat, data);
}
dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
@@ -986,255 +1276,387 @@ err_return:
return retval;
}
-static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ if (dma->tx_buf_dma)
+ dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
+ dma->tx_buf_virt, dma->tx_buf_dma);
+ if (dma->rx_buf_dma)
+ dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
+ dma->rx_buf_virt, dma->rx_buf_dma);
+ return;
+}
- struct spi_master *master;
-
- struct pch_spi_board_data *board_dat;
- int retval;
-
- dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
-
- /* allocate memory for private data */
- board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
- if (board_dat == NULL) {
- dev_err(&pdev->dev,
- " %s memory allocation for private data failed\n",
- __func__);
- retval = -ENOMEM;
- goto err_kmalloc;
- }
+static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ /* Get Consistent memory for Tx DMA */
+ dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+ PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
+ /* Get Consistent memory for Rx DMA */
+ dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+ PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
+}
- dev_dbg(&pdev->dev,
- "%s memory allocation for private data success\n", __func__);
+static int __devinit pch_spi_pd_probe(struct platform_device *plat_dev)
+{
+ int ret;
+ struct spi_master *master;
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
+ struct pch_spi_data *data;
- /* enable PCI device */
- retval = pci_enable_device(pdev);
- if (retval != 0) {
- dev_err(&pdev->dev, "%s pci_enable_device FAILED\n", __func__);
+ dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
- goto err_pci_en_device;
+ master = spi_alloc_master(&board_dat->pdev->dev,
+ sizeof(struct pch_spi_data));
+ if (!master) {
+ dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
+ plat_dev->id);
+ return -ENOMEM;
}
- dev_dbg(&pdev->dev, "%s pci_enable_device returned=%d\n",
- __func__, retval);
+ data = spi_master_get_devdata(master);
+ data->master = master;
- board_dat->pdev = pdev;
+ platform_set_drvdata(plat_dev, data);
- /* alllocate memory for SPI master */
- master = spi_alloc_master(&pdev->dev, sizeof(struct pch_spi_data));
- if (master == NULL) {
- retval = -ENOMEM;
- dev_err(&pdev->dev, "%s Fail.\n", __func__);
- goto err_spi_alloc_master;
+ /* baseaddress + address offset) */
+ data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
+ PCH_ADDRESS_SIZE * plat_dev->id;
+ data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0) +
+ PCH_ADDRESS_SIZE * plat_dev->id;
+ if (!data->io_remap_addr) {
+ dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
+ ret = -ENOMEM;
+ goto err_pci_iomap;
}
- dev_dbg(&pdev->dev,
- "%s spi_alloc_master returned non NULL\n", __func__);
+ dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
+ plat_dev->id, data->io_remap_addr);
/* initialize members of SPI master */
master->bus_num = -1;
master->num_chipselect = PCH_MAX_CS;
master->setup = pch_spi_setup;
master->transfer = pch_spi_transfer;
- dev_dbg(&pdev->dev,
- "%s transfer member of SPI master initialized\n", __func__);
-
- board_dat->data = spi_master_get_devdata(master);
-
- board_dat->data->master = master;
- board_dat->data->n_curnt_chip = 255;
- board_dat->data->board_dat = board_dat;
- board_dat->data->status = STATUS_RUNNING;
-
- INIT_LIST_HEAD(&board_dat->data->queue);
- spin_lock_init(&board_dat->data->lock);
- INIT_WORK(&board_dat->data->work, pch_spi_process_messages);
- init_waitqueue_head(&board_dat->data->wait);
- /* allocate resources for PCH SPI */
- retval = pch_spi_get_resources(board_dat);
- if (retval) {
- dev_err(&pdev->dev, "%s fail(retval=%d)\n", __func__, retval);
+ data->board_dat = board_dat;
+ data->plat_dev = plat_dev;
+ data->n_curnt_chip = 255;
+ data->status = STATUS_RUNNING;
+ data->ch = plat_dev->id;
+ data->use_dma = use_dma;
+
+ INIT_LIST_HEAD(&data->queue);
+ spin_lock_init(&data->lock);
+ INIT_WORK(&data->work, pch_spi_process_messages);
+ init_waitqueue_head(&data->wait);
+
+ ret = pch_spi_get_resources(board_dat, data);
+ if (ret) {
+ dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
goto err_spi_get_resources;
}
- dev_dbg(&pdev->dev, "%s pch_spi_get_resources returned=%d\n",
- __func__, retval);
-
- /* save private data in dev */
- pci_set_drvdata(pdev, board_dat);
- dev_dbg(&pdev->dev, "%s invoked pci_set_drvdata\n", __func__);
+ ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
+ IRQF_SHARED, KBUILD_MODNAME, data);
+ if (ret) {
+ dev_err(&plat_dev->dev,
+ "%s request_irq failed\n", __func__);
+ goto err_request_irq;
+ }
+ data->irq_reg_sts = true;
- /* set master mode */
pch_spi_set_master_mode(master);
- dev_dbg(&pdev->dev,
- "%s invoked pch_spi_set_master_mode\n", __func__);
- /* Register the controller with the SPI core. */
- retval = spi_register_master(master);
- if (retval != 0) {
- dev_err(&pdev->dev,
+ ret = spi_register_master(master);
+ if (ret != 0) {
+ dev_err(&plat_dev->dev,
"%s spi_register_master FAILED\n", __func__);
- goto err_spi_reg_master;
+ goto err_spi_register_master;
}
- dev_dbg(&pdev->dev, "%s spi_register_master returned=%d\n",
- __func__, retval);
-
+ if (use_dma) {
+ dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+ pch_alloc_dma_buf(board_dat, data);
+ }
return 0;
-err_spi_reg_master:
- spi_unregister_master(master);
+err_spi_register_master:
+ free_irq(board_dat->pdev->irq, board_dat);
+err_request_irq:
+ pch_spi_free_resources(board_dat, data);
err_spi_get_resources:
-err_spi_alloc_master:
+ pci_iounmap(board_dat->pdev, data->io_remap_addr);
+err_pci_iomap:
spi_master_put(master);
- pci_disable_device(pdev);
-err_pci_en_device:
- kfree(board_dat);
-err_kmalloc:
- return retval;
+
+ return ret;
}
-static void pch_spi_remove(struct pci_dev *pdev)
+static int __devexit pch_spi_pd_remove(struct platform_device *plat_dev)
{
- struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(plat_dev);
int count;
+ unsigned long flags;
- dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+ dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
+ __func__, plat_dev->id, board_dat->pdev->irq);
- if (!board_dat) {
- dev_err(&pdev->dev,
- "%s pci_get_drvdata returned NULL\n", __func__);
- return;
- }
+ if (use_dma)
+ pch_free_dma_buf(board_dat, data);
/* check for any pending messages; no action is taken if the queue
* is still full; but at least we tried. Unload anyway */
count = 500;
- spin_lock(&board_dat->data->lock);
- board_dat->data->status = STATUS_EXITING;
- while ((list_empty(&board_dat->data->queue) == 0) && --count) {
+ spin_lock_irqsave(&data->lock, flags);
+ data->status = STATUS_EXITING;
+ while ((list_empty(&data->queue) == 0) && --count) {
dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
__func__);
- spin_unlock(&board_dat->data->lock);
+ spin_unlock_irqrestore(&data->lock, flags);
msleep(PCH_SLEEP_TIME);
- spin_lock(&board_dat->data->lock);
+ spin_lock_irqsave(&data->lock, flags);
}
- spin_unlock(&board_dat->data->lock);
-
- /* Free resources allocated for PCH SPI */
- pch_spi_free_resources(board_dat);
-
- spi_unregister_master(board_dat->data->master);
-
- /* free memory for private data */
- kfree(board_dat);
+ spin_unlock_irqrestore(&data->lock, flags);
- pci_set_drvdata(pdev, NULL);
+ pch_spi_free_resources(board_dat, data);
+ /* disable interrupts & free IRQ */
+ if (data->irq_reg_sts) {
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ data->irq_reg_sts = false;
+ free_irq(board_dat->pdev->irq, data);
+ }
- /* disable PCI device */
- pci_disable_device(pdev);
+ pci_iounmap(board_dat->pdev, data->io_remap_addr);
+ spi_unregister_master(data->master);
+ spi_master_put(data->master);
+ platform_set_drvdata(plat_dev, NULL);
- dev_dbg(&pdev->dev, "%s invoked pci_disable_device\n", __func__);
+ return 0;
}
-
#ifdef CONFIG_PM
-static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+static int pch_spi_pd_suspend(struct platform_device *pd_dev,
+ pm_message_t state)
{
u8 count;
- int retval;
-
- struct pch_spi_board_data *board_dat = pci_get_drvdata(pdev);
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(pd_dev);
- dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+ dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
if (!board_dat) {
- dev_err(&pdev->dev,
+ dev_err(&pd_dev->dev,
"%s pci_get_drvdata returned NULL\n", __func__);
return -EFAULT;
}
- retval = 0;
- board_dat->suspend_sts = true;
-
/* check if the current message is processed:
Only after thats done the transfer will be suspended */
count = 255;
while ((--count) > 0) {
- if (!(board_dat->data->bcurrent_msg_processing)) {
- dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_"
- "msg_processing = false\n", __func__);
+ if (!(data->bcurrent_msg_processing))
break;
- } else {
- dev_dbg(&pdev->dev, "%s board_dat->data->bCurrent_msg_"
- "processing = true\n", __func__);
- }
msleep(PCH_SLEEP_TIME);
}
/* Free IRQ */
- if (board_dat->irq_reg_sts) {
+ if (data->irq_reg_sts) {
/* disable all interrupts */
- pch_spi_setclr_reg(board_dat->data->master, PCH_SPCR, 0,
- PCH_ALL);
- pch_spi_reset(board_dat->data->master);
-
- free_irq(board_dat->pdev->irq, board_dat);
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ pch_spi_reset(data->master);
+ free_irq(board_dat->pdev->irq, data);
- board_dat->irq_reg_sts = false;
- dev_dbg(&pdev->dev,
+ data->irq_reg_sts = false;
+ dev_dbg(&pd_dev->dev,
"%s free_irq invoked successfully.\n", __func__);
}
+ return 0;
+}
+
+static int pch_spi_pd_resume(struct platform_device *pd_dev)
+{
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(pd_dev);
+ int retval;
+
+ if (!board_dat) {
+ dev_err(&pd_dev->dev,
+ "%s pci_get_drvdata returned NULL\n", __func__);
+ return -EFAULT;
+ }
+
+ if (!data->irq_reg_sts) {
+ /* register IRQ */
+ retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
+ IRQF_SHARED, KBUILD_MODNAME, data);
+ if (retval < 0) {
+ dev_err(&pd_dev->dev,
+ "%s request_irq failed\n", __func__);
+ return retval;
+ }
+
+ /* reset PCH SPI h/w */
+ pch_spi_reset(data->master);
+ pch_spi_set_master_mode(data->master);
+ data->irq_reg_sts = true;
+ }
+ return 0;
+}
+#else
+#define pch_spi_pd_suspend NULL
+#define pch_spi_pd_resume NULL
+#endif
+
+static struct platform_driver pch_spi_pd_driver = {
+ .driver = {
+ .name = "pch-spi",
+ .owner = THIS_MODULE,
+ },
+ .probe = pch_spi_pd_probe,
+ .remove = __devexit_p(pch_spi_pd_remove),
+ .suspend = pch_spi_pd_suspend,
+ .resume = pch_spi_pd_resume
+};
+
+static int __devinit pch_spi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct pch_spi_board_data *board_dat;
+ struct platform_device *pd_dev = NULL;
+ int retval;
+ int i;
+ struct pch_pd_dev_save *pd_dev_save;
+
+ pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
+ if (!pd_dev_save) {
+ dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__);
+ return -ENOMEM;
+ }
+
+ board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
+ if (!board_dat) {
+ dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__);
+ retval = -ENOMEM;
+ goto err_no_mem;
+ }
+
+ retval = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (retval) {
+ dev_err(&pdev->dev, "%s request_region failed\n", __func__);
+ goto pci_request_regions;
+ }
+
+ board_dat->pdev = pdev;
+ board_dat->num = id->driver_data;
+ pd_dev_save->num = id->driver_data;
+ pd_dev_save->board_dat = board_dat;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
+ goto pci_enable_device;
+ }
+
+ for (i = 0; i < board_dat->num; i++) {
+ pd_dev = platform_device_alloc("pch-spi", i);
+ if (!pd_dev) {
+ dev_err(&pdev->dev, "platform_device_alloc failed\n");
+ goto err_platform_device;
+ }
+ pd_dev_save->pd_save[i] = pd_dev;
+ pd_dev->dev.parent = &pdev->dev;
+
+ retval = platform_device_add_data(pd_dev, board_dat,
+ sizeof(*board_dat));
+ if (retval) {
+ dev_err(&pdev->dev,
+ "platform_device_add_data failed\n");
+ platform_device_put(pd_dev);
+ goto err_platform_device;
+ }
+
+ retval = platform_device_add(pd_dev);
+ if (retval) {
+ dev_err(&pdev->dev, "platform_device_add failed\n");
+ platform_device_put(pd_dev);
+ goto err_platform_device;
+ }
+ }
+
+ pci_set_drvdata(pdev, pd_dev_save);
+
+ return 0;
+
+err_platform_device:
+ pci_disable_device(pdev);
+pci_enable_device:
+ pci_release_regions(pdev);
+pci_request_regions:
+ kfree(board_dat);
+err_no_mem:
+ kfree(pd_dev_save);
+
+ return retval;
+}
+
+static void __devexit pch_spi_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
+
+ for (i = 0; i < pd_dev_save->num; i++)
+ platform_device_unregister(pd_dev_save->pd_save[i]);
+
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ kfree(pd_dev_save->board_dat);
+ kfree(pd_dev_save);
+}
+
+#ifdef CONFIG_PM
+static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+ struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+
+ pd_dev_save->board_dat->suspend_sts = true;
+
/* save config space */
retval = pci_save_state(pdev);
-
if (retval == 0) {
- dev_dbg(&pdev->dev, "%s pci_save_state returned=%d\n",
- __func__, retval);
- /* disable PM notifications */
pci_enable_wake(pdev, PCI_D3hot, 0);
- dev_dbg(&pdev->dev,
- "%s pci_enable_wake invoked successfully\n", __func__);
- /* disable PCI device */
pci_disable_device(pdev);
- dev_dbg(&pdev->dev,
- "%s pci_disable_device invoked successfully\n",
- __func__);
- /* move device to D3hot state */
pci_set_power_state(pdev, PCI_D3hot);
- dev_dbg(&pdev->dev,
- "%s pci_set_power_state invoked successfully\n",
- __func__);
} else {
dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
}
- dev_dbg(&pdev->dev, "%s return=%d\n", __func__, retval);
-
return retval;
}
static int pch_spi_resume(struct pci_dev *pdev)
{
int retval;
-
- struct pch_spi_board_data *board = pci_get_drvdata(pdev);
+ struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
- if (!board) {
- dev_err(&pdev->dev,
- "%s pci_get_drvdata returned NULL\n", __func__);
- return -EFAULT;
- }
-
- /* move device to DO power state */
pci_set_power_state(pdev, PCI_D0);
-
- /* restore state */
pci_restore_state(pdev);
retval = pci_enable_device(pdev);
@@ -1242,34 +1664,12 @@ static int pch_spi_resume(struct pci_dev *pdev)
dev_err(&pdev->dev,
"%s pci_enable_device failed\n", __func__);
} else {
- /* disable PM notifications */
pci_enable_wake(pdev, PCI_D3hot, 0);
- /* register IRQ handler */
- if (!board->irq_reg_sts) {
- /* register IRQ */
- retval = request_irq(board->pdev->irq, pch_spi_handler,
- IRQF_SHARED, KBUILD_MODNAME,
- board);
- if (retval < 0) {
- dev_err(&pdev->dev,
- "%s request_irq failed\n", __func__);
- return retval;
- }
- board->irq_reg_sts = true;
-
- /* reset PCH SPI h/w */
- pch_spi_reset(board->data->master);
- pch_spi_set_master_mode(board->data->master);
-
- /* set suspend status to false */
- board->suspend_sts = false;
-
- }
+ /* set suspend status to false */
+ pd_dev_save->board_dat->suspend_sts = false;
}
- dev_dbg(&pdev->dev, "%s returning=%d\n", __func__, retval);
-
return retval;
}
#else
@@ -1289,15 +1689,29 @@ static struct pci_driver pch_spi_pcidev = {
static int __init pch_spi_init(void)
{
- return pci_register_driver(&pch_spi_pcidev);
+ int ret;
+ ret = platform_driver_register(&pch_spi_pd_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&pch_spi_pcidev);
+ if (ret)
+ return ret;
+
+ return 0;
}
module_init(pch_spi_init);
static void __exit pch_spi_exit(void)
{
pci_unregister_driver(&pch_spi_pcidev);
+ platform_driver_unregister(&pch_spi_pd_driver);
}
module_exit(pch_spi_exit);
+module_param(use_dma, int, 0644);
+MODULE_PARM_DESC(use_dma,
+ "to use DMA for data transfers pass 1 else 0; default 1");
+
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Topcliff PCH SPI PCI Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7xxx IOH SPI Driver");
diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi-txx9.c
index dfa024b633e..f0a2ab0428a 100644
--- a/drivers/spi/spi_txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -1,5 +1,5 @@
/*
- * spi_txx9.c - TXx9 SPI controller driver.
+ * TXx9 SPI controller driver.
*
* Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
* Copyright (C) 2000-2001 Toshiba Corporation
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/spi-xilinx.c
index 4d2c75df886..4d2c75df886 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/spi-xilinx.c
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 2e13a14bba3..4d1b9f517ce 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1,5 +1,5 @@
/*
- * spi.c - SPI init/core code
+ * SPI init/core code
*
* Copyright (C) 2005 David Brownell
*
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c
deleted file mode 100644
index be991359bf9..00000000000
--- a/drivers/spi/spi_s3c24xx_gpio.c
+++ /dev/null
@@ -1,201 +0,0 @@
-/* linux/drivers/spi/spi_s3c24xx_gpio.c
- *
- * Copyright (c) 2006 Ben Dooks
- * Copyright (c) 2006 Simtec Electronics
- *
- * S3C24XX GPIO based SPI driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-
-#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
-
-#include <mach/regs-gpio.h>
-#include <mach/spi-gpio.h>
-#include <mach/hardware.h>
-
-struct s3c2410_spigpio {
- struct spi_bitbang bitbang;
-
- struct s3c2410_spigpio_info *info;
- struct platform_device *dev;
-};
-
-static inline struct s3c2410_spigpio *spidev_to_sg(struct spi_device *spi)
-{
- return spi_master_get_devdata(spi->master);
-}
-
-static inline void setsck(struct spi_device *dev, int on)
-{
- struct s3c2410_spigpio *sg = spidev_to_sg(dev);
- s3c2410_gpio_setpin(sg->info->pin_clk, on ? 1 : 0);
-}
-
-static inline void setmosi(struct spi_device *dev, int on)
-{
- struct s3c2410_spigpio *sg = spidev_to_sg(dev);
- s3c2410_gpio_setpin(sg->info->pin_mosi, on ? 1 : 0);
-}
-
-static inline u32 getmiso(struct spi_device *dev)
-{
- struct s3c2410_spigpio *sg = spidev_to_sg(dev);
- return s3c2410_gpio_getpin(sg->info->pin_miso) ? 1 : 0;
-}
-
-#define spidelay(x) ndelay(x)
-
-#include "spi_bitbang_txrx.h"
-
-
-static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
-{
- return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
-}
-
-static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
-{
- return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
-}
-
-static u32 s3c2410_spigpio_txrx_mode2(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
-{
- return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
-}
-
-static u32 s3c2410_spigpio_txrx_mode3(struct spi_device *spi,
- unsigned nsecs, u32 word, u8 bits)
-{
- return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
-}
-
-
-static void s3c2410_spigpio_chipselect(struct spi_device *dev, int value)
-{
- struct s3c2410_spigpio *sg = spidev_to_sg(dev);
-
- if (sg->info && sg->info->chip_select)
- (sg->info->chip_select)(sg->info, value);
-}
-
-static int s3c2410_spigpio_probe(struct platform_device *dev)
-{
- struct s3c2410_spigpio_info *info;
- struct spi_master *master;
- struct s3c2410_spigpio *sp;
- int ret;
-
- master = spi_alloc_master(&dev->dev, sizeof(struct s3c2410_spigpio));
- if (master == NULL) {
- dev_err(&dev->dev, "failed to allocate spi master\n");
- ret = -ENOMEM;
- goto err;
- }
-
- sp = spi_master_get_devdata(master);
-
- platform_set_drvdata(dev, sp);
-
- /* copy in the plkatform data */
- info = sp->info = dev->dev.platform_data;
-
- /* setup spi bitbang adaptor */
- sp->bitbang.master = spi_master_get(master);
- sp->bitbang.master->bus_num = info->bus_num;
- sp->bitbang.master->num_chipselect = info->num_chipselect;
- sp->bitbang.chipselect = s3c2410_spigpio_chipselect;
-
- sp->bitbang.txrx_word[SPI_MODE_0] = s3c2410_spigpio_txrx_mode0;
- sp->bitbang.txrx_word[SPI_MODE_1] = s3c2410_spigpio_txrx_mode1;
- sp->bitbang.txrx_word[SPI_MODE_2] = s3c2410_spigpio_txrx_mode2;
- sp->bitbang.txrx_word[SPI_MODE_3] = s3c2410_spigpio_txrx_mode3;
-
- /* set state of spi pins, always assume that the clock is
- * available, but do check the MOSI and MISO. */
- s3c2410_gpio_setpin(info->pin_clk, 0);
- s3c2410_gpio_cfgpin(info->pin_clk, S3C2410_GPIO_OUTPUT);
-
- if (info->pin_mosi < S3C2410_GPH10) {
- s3c2410_gpio_setpin(info->pin_mosi, 0);
- s3c2410_gpio_cfgpin(info->pin_mosi, S3C2410_GPIO_OUTPUT);
- }
-
- if (info->pin_miso != S3C2410_GPA0 && info->pin_miso < S3C2410_GPH10)
- s3c2410_gpio_cfgpin(info->pin_miso, S3C2410_GPIO_INPUT);
-
- ret = spi_bitbang_start(&sp->bitbang);
- if (ret)
- goto err_no_bitbang;
-
- return 0;
-
- err_no_bitbang:
- spi_master_put(sp->bitbang.master);
- err:
- return ret;
-
-}
-
-static int s3c2410_spigpio_remove(struct platform_device *dev)
-{
- struct s3c2410_spigpio *sp = platform_get_drvdata(dev);
-
- spi_bitbang_stop(&sp->bitbang);
- spi_master_put(sp->bitbang.master);
-
- return 0;
-}
-
-/* all gpio should be held over suspend/resume, so we should
- * not need to deal with this
-*/
-
-#define s3c2410_spigpio_suspend NULL
-#define s3c2410_spigpio_resume NULL
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:spi_s3c24xx_gpio");
-
-static struct platform_driver s3c2410_spigpio_drv = {
- .probe = s3c2410_spigpio_probe,
- .remove = s3c2410_spigpio_remove,
- .suspend = s3c2410_spigpio_suspend,
- .resume = s3c2410_spigpio_resume,
- .driver = {
- .name = "spi_s3c24xx_gpio",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c2410_spigpio_init(void)
-{
- return platform_driver_register(&s3c2410_spigpio_drv);
-}
-
-static void __exit s3c2410_spigpio_exit(void)
-{
- platform_driver_unregister(&s3c2410_spigpio_drv);
-}
-
-module_init(s3c2410_spigpio_init);
-module_exit(s3c2410_spigpio_exit);
-
-MODULE_DESCRIPTION("S3C24XX SPI Driver");
-MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d9fd8621136..830adbed1d7 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -1,5 +1,5 @@
/*
- * spidev.c -- simple synchronous userspace interface to SPI devices
+ * Simple synchronous userspace interface to SPI devices
*
* Copyright (C) 2006 SWAPP
* Andrea Paterniani <a.paterniani@swapp-eng.it>
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
index 57dcbc2d711..abe8ecbcdf0 100644
--- a/drivers/target/loopback/Kconfig
+++ b/drivers/target/loopback/Kconfig
@@ -3,9 +3,3 @@ config LOOPBACK_TARGET
help
Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
fabric loopback module.
-
-config LOOPBACK_TARGET_CDB_DEBUG
- bool "TCM loopback fabric module CDB debug code"
- depends on LOOPBACK_TARGET
- help
- Say Y here to enable the TCM loopback fabric module CDB debug code
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 70c2e7fa666..aa2d6799723 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -31,7 +31,6 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
@@ -80,7 +79,7 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
if (!tl_cmd) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
+ pr_err("Unable to allocate struct tcm_loop_cmd\n");
set_host_byte(sc, DID_ERROR);
return NULL;
}
@@ -118,17 +117,16 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
* Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
*/
if (scsi_bidi_cmnd(sc))
- T_TASK(se_cmd)->t_tasks_bidi = 1;
+ se_cmd->t_tasks_bidi = 1;
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
- if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) {
+ if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
set_host_byte(sc, DID_NO_CONNECT);
return NULL;
}
- transport_device_setup_cmd(se_cmd);
return se_cmd;
}
@@ -143,17 +141,17 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- void *mem_ptr, *mem_bidi_ptr = NULL;
- u32 sg_no_bidi = 0;
+ struct scatterlist *sgl_bidi = NULL;
+ u32 sgl_bidi_count = 0;
int ret;
/*
* Allocate the necessary tasks to complete the received CDB+data
*/
- ret = transport_generic_allocate_tasks(se_cmd, tl_cmd->sc->cmnd);
- if (ret == -1) {
+ ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
+ if (ret == -ENOMEM) {
/* Out of Resources */
return PYX_TRANSPORT_LU_COMM_FAILURE;
- } else if (ret == -2) {
+ } else if (ret == -EINVAL) {
/*
* Handle case for SAM_STAT_RESERVATION_CONFLICT
*/
@@ -165,35 +163,21 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
*/
return PYX_TRANSPORT_USE_SENSE_REASON;
}
+
/*
- * Setup the struct scatterlist memory from the received
- * struct scsi_cmnd.
+ * For BIDI commands, pass in the extra READ buffer
+ * to transport_generic_map_mem_to_cmd() below..
*/
- if (scsi_sg_count(sc)) {
- se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM;
- mem_ptr = (void *)scsi_sglist(sc);
- /*
- * For BIDI commands, pass in the extra READ buffer
- * to transport_generic_map_mem_to_cmd() below..
- */
- if (T_TASK(se_cmd)->t_tasks_bidi) {
- struct scsi_data_buffer *sdb = scsi_in(sc);
+ if (se_cmd->t_tasks_bidi) {
+ struct scsi_data_buffer *sdb = scsi_in(sc);
- mem_bidi_ptr = (void *)sdb->table.sgl;
- sg_no_bidi = sdb->table.nents;
- }
- } else {
- /*
- * Used for DMA_NONE
- */
- mem_ptr = NULL;
+ sgl_bidi = sdb->table.sgl;
+ sgl_bidi_count = sdb->table.nents;
}
- /*
- * Map the SG memory into struct se_mem->page linked list using the same
- * physical memory at sg->page_link.
- */
- ret = transport_generic_map_mem_to_cmd(se_cmd, mem_ptr,
- scsi_sg_count(sc), mem_bidi_ptr, sg_no_bidi);
+
+ /* Tell the core about our preallocated memory */
+ ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+ scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
if (ret < 0)
return PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -216,13 +200,10 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)
* Release the struct se_cmd, which will make a callback to release
* struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
*/
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
}
-/*
- * Called from struct target_core_fabric_ops->release_cmd_to_pool()
- */
-static void tcm_loop_deallocate_core_cmd(struct se_cmd *se_cmd)
+static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
{
struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
struct tcm_loop_cmd, tl_se_cmd);
@@ -300,7 +281,7 @@ static int tcm_loop_queuecommand(
struct tcm_loop_hba *tl_hba;
struct tcm_loop_tpg *tl_tpg;
- TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
+ pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
" scsi_buf_len: %u\n", sc->device->host->host_no,
sc->device->id, sc->device->channel, sc->device->lun,
sc->cmnd[0], scsi_bufflen(sc));
@@ -350,7 +331,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
*/
tl_nexus = tl_hba->tl_nexus;
if (!tl_nexus) {
- printk(KERN_ERR "Unable to perform device reset without"
+ pr_err("Unable to perform device reset without"
" active I_T Nexus\n");
return FAILED;
}
@@ -363,13 +344,13 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
if (!tl_cmd) {
- printk(KERN_ERR "Unable to allocate memory for tl_cmd\n");
+ pr_err("Unable to allocate memory for tl_cmd\n");
return FAILED;
}
tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
if (!tl_tmr) {
- printk(KERN_ERR "Unable to allocate memory for tl_tmr\n");
+ pr_err("Unable to allocate memory for tl_tmr\n");
goto release;
}
init_waitqueue_head(&tl_tmr->tl_tmr_wait);
@@ -384,14 +365,14 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
/*
* Allocate the LUN_RESET TMR
*/
- se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, (void *)tl_tmr,
+ se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr,
TMR_LUN_RESET);
if (IS_ERR(se_cmd->se_tmr_req))
goto release;
/*
* Locate the underlying TCM struct se_lun from sc->device->lun
*/
- if (transport_get_lun_for_tmr(se_cmd, sc->device->lun) < 0)
+ if (transport_lookup_tmr_lun(se_cmd, sc->device->lun) < 0)
goto release;
/*
* Queue the TMR to TCM Core and sleep waiting for tcm_loop_queue_tm_rsp()
@@ -407,7 +388,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
SUCCESS : FAILED;
release:
if (se_cmd)
- transport_generic_free_cmd(se_cmd, 1, 1, 0);
+ transport_generic_free_cmd(se_cmd, 1, 0);
else
kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
kfree(tl_tmr);
@@ -454,7 +435,7 @@ static int tcm_loop_driver_probe(struct device *dev)
sh = scsi_host_alloc(&tcm_loop_driver_template,
sizeof(struct tcm_loop_hba));
if (!sh) {
- printk(KERN_ERR "Unable to allocate struct scsi_host\n");
+ pr_err("Unable to allocate struct scsi_host\n");
return -ENODEV;
}
tl_hba->sh = sh;
@@ -473,7 +454,7 @@ static int tcm_loop_driver_probe(struct device *dev)
error = scsi_add_host(sh, &tl_hba->dev);
if (error) {
- printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
+ pr_err("%s: scsi_add_host failed\n", __func__);
scsi_host_put(sh);
return -ENODEV;
}
@@ -514,7 +495,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host
ret = device_register(&tl_hba->dev);
if (ret) {
- printk(KERN_ERR "device_register() failed for"
+ pr_err("device_register() failed for"
" tl_hba->dev: %d\n", ret);
return -ENODEV;
}
@@ -532,24 +513,24 @@ static int tcm_loop_alloc_core_bus(void)
tcm_loop_primary = root_device_register("tcm_loop_0");
if (IS_ERR(tcm_loop_primary)) {
- printk(KERN_ERR "Unable to allocate tcm_loop_primary\n");
+ pr_err("Unable to allocate tcm_loop_primary\n");
return PTR_ERR(tcm_loop_primary);
}
ret = bus_register(&tcm_loop_lld_bus);
if (ret) {
- printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n");
+ pr_err("bus_register() failed for tcm_loop_lld_bus\n");
goto dev_unreg;
}
ret = driver_register(&tcm_loop_driverfs);
if (ret) {
- printk(KERN_ERR "driver_register() failed for"
+ pr_err("driver_register() failed for"
"tcm_loop_driverfs\n");
goto bus_unreg;
}
- printk(KERN_INFO "Initialized TCM Loop Core Bus\n");
+ pr_debug("Initialized TCM Loop Core Bus\n");
return ret;
bus_unreg:
@@ -565,7 +546,7 @@ static void tcm_loop_release_core_bus(void)
bus_unregister(&tcm_loop_lld_bus);
root_device_unregister(tcm_loop_primary);
- printk(KERN_INFO "Releasing TCM Loop Core BUS\n");
+ pr_debug("Releasing TCM Loop Core BUS\n");
}
static char *tcm_loop_get_fabric_name(void)
@@ -593,7 +574,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
case SCSI_PROTOCOL_ISCSI:
return iscsi_get_fabric_proto_ident(se_tpg);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -649,7 +630,7 @@ static u32 tcm_loop_get_pr_transport_id(
return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
format_code, buf);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -679,7 +660,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
format_code);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -713,7 +694,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
port_nexus_ptr);
default:
- printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+ pr_err("Unknown tl_proto_id: 0x%02x, using"
" SAS emulation\n", tl_hba->tl_proto_id);
break;
}
@@ -762,7 +743,7 @@ static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
if (!tl_nacl) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n");
+ pr_err("Unable to allocate struct tcm_loop_nacl\n");
return NULL;
}
@@ -784,16 +765,6 @@ static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
return 1;
}
-static void tcm_loop_new_cmd_failure(struct se_cmd *se_cmd)
-{
- /*
- * Since TCM_loop is already passing struct scatterlist data from
- * struct scsi_cmnd, no more Linux/SCSI failure dependent state need
- * to be handled here.
- */
- return;
-}
-
static int tcm_loop_is_state_remove(struct se_cmd *se_cmd)
{
/*
@@ -882,7 +853,7 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
+ pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
sc->result = SAM_STAT_GOOD;
@@ -897,14 +868,14 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd)
struct tcm_loop_cmd, tl_se_cmd);
struct scsi_cmnd *sc = tl_cmd->sc;
- TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p"
+ pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
" cdb: 0x%02x\n", sc, sc->cmnd[0]);
if (se_cmd->sense_buffer &&
((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
- memcpy((void *)sc->sense_buffer, (void *)se_cmd->sense_buffer,
+ memcpy(sc->sense_buffer, se_cmd->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
sc->result = SAM_STAT_CHECK_CONDITION;
set_driver_byte(sc, DRIVER_SENSE);
@@ -972,7 +943,7 @@ static int tcm_loop_port_link(
*/
scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n");
+ pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
return 0;
}
@@ -990,7 +961,7 @@ static void tcm_loop_port_unlink(
sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
se_lun->unpacked_lun);
if (!sd) {
- printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:"
+ pr_err("Unable to locate struct scsi_device for %d:%d:"
"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
return;
}
@@ -1003,7 +974,7 @@ static void tcm_loop_port_unlink(
atomic_dec(&tl_tpg->tl_tpg_port_count);
smp_mb__after_atomic_dec();
- printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n");
+ pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
}
/* End items for tcm_loop_port_cit */
@@ -1020,14 +991,14 @@ static int tcm_loop_make_nexus(
int ret = -ENOMEM;
if (tl_tpg->tl_hba->tl_nexus) {
- printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
+ pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
return -EEXIST;
}
se_tpg = &tl_tpg->tl_se_tpg;
tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
if (!tl_nexus) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n");
+ pr_err("Unable to allocate struct tcm_loop_nexus\n");
return -ENOMEM;
}
/*
@@ -1054,9 +1025,9 @@ static int tcm_loop_make_nexus(
* transport_register_session()
*/
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
- tl_nexus->se_sess, (void *)tl_nexus);
+ tl_nexus->se_sess, tl_nexus);
tl_tpg->tl_hba->tl_nexus = tl_nexus;
- printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+ pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
name);
return 0;
@@ -1082,13 +1053,13 @@ static int tcm_loop_drop_nexus(
return -ENODEV;
if (atomic_read(&tpg->tl_tpg_port_count)) {
- printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with"
+ pr_err("Unable to remove TCM_Loop I_T Nexus with"
" active TPG port count: %d\n",
atomic_read(&tpg->tl_tpg_port_count));
return -EPERM;
}
- printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+ pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
tl_nexus->se_sess->se_node_acl->initiatorname);
/*
@@ -1144,7 +1115,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
* tcm_loop_make_nexus()
*/
if (strlen(page) >= TL_WWN_ADDR_LEN) {
- printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
+ pr_err("Emulated NAA Sas Address: %s, exceeds"
" max: %d\n", page, TL_WWN_ADDR_LEN);
return -EINVAL;
}
@@ -1153,7 +1124,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "naa.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
- printk(KERN_ERR "Passed SAS Initiator Port %s does not"
+ pr_err("Passed SAS Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@@ -1164,7 +1135,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "fc.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
- printk(KERN_ERR "Passed FCP Initiator Port %s does not"
+ pr_err("Passed FCP Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@@ -1175,7 +1146,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
ptr = strstr(i_port, "iqn.");
if (ptr) {
if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
- printk(KERN_ERR "Passed iSCSI Initiator Port %s does not"
+ pr_err("Passed iSCSI Initiator Port %s does not"
" match target port protoid: %s\n", i_port,
tcm_loop_dump_proto_id(tl_hba));
return -EINVAL;
@@ -1183,7 +1154,7 @@ static ssize_t tcm_loop_tpg_store_nexus(
port_ptr = &i_port[0];
goto check_newline;
}
- printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:"
+ pr_err("Unable to locate prefix for emulated Initiator Port:"
" %s\n", i_port);
return -EINVAL;
/*
@@ -1223,15 +1194,15 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
tpgt_str = strstr(name, "tpgt_");
if (!tpgt_str) {
- printk(KERN_ERR "Unable to locate \"tpgt_#\" directory"
+ pr_err("Unable to locate \"tpgt_#\" directory"
" group\n");
return ERR_PTR(-EINVAL);
}
tpgt_str += 5; /* Skip ahead of "tpgt_" */
tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
- if (tpgt > TL_TPGS_PER_HBA) {
- printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
+ if (tpgt >= TL_TPGS_PER_HBA) {
+ pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
" %u\n", tpgt, TL_TPGS_PER_HBA);
return ERR_PTR(-EINVAL);
}
@@ -1242,12 +1213,12 @@ struct se_portal_group *tcm_loop_make_naa_tpg(
* Register the tl_tpg as a emulated SAS TCM Target Endpoint
*/
ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
- wwn, &tl_tpg->tl_se_tpg, (void *)tl_tpg,
+ wwn, &tl_tpg->tl_se_tpg, tl_tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return ERR_PTR(-ENOMEM);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s"
+ pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
@@ -1274,7 +1245,7 @@ void tcm_loop_drop_naa_tpg(
*/
core_tpg_deregister(se_tpg);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s"
+ pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
config_item_name(&wwn->wwn_group.cg_item), tpgt);
}
@@ -1295,7 +1266,7 @@ struct se_wwn *tcm_loop_make_scsi_hba(
tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
if (!tl_hba) {
- printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n");
+ pr_err("Unable to allocate struct tcm_loop_hba\n");
return ERR_PTR(-ENOMEM);
}
/*
@@ -1314,22 +1285,21 @@ struct se_wwn *tcm_loop_make_scsi_hba(
goto check_len;
}
ptr = strstr(name, "iqn.");
- if (ptr) {
- tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
- goto check_len;
+ if (!ptr) {
+ pr_err("Unable to locate prefix for emulated Target "
+ "Port: %s\n", name);
+ ret = -EINVAL;
+ goto out;
}
-
- printk(KERN_ERR "Unable to locate prefix for emulated Target Port:"
- " %s\n", name);
- return ERR_PTR(-EINVAL);
+ tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
check_len:
if (strlen(name) >= TL_WWN_ADDR_LEN) {
- printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
+ pr_err("Emulated NAA %s Address: %s, exceeds"
" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
TL_WWN_ADDR_LEN);
- kfree(tl_hba);
- return ERR_PTR(-EINVAL);
+ ret = -EINVAL;
+ goto out;
}
snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
@@ -1344,7 +1314,7 @@ check_len:
sh = tl_hba->sh;
tcm_loop_hba_no_cnt++;
- printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target"
+ pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
" %s Address: %s at Linux/SCSI Host ID: %d\n",
tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
@@ -1367,7 +1337,7 @@ void tcm_loop_drop_scsi_hba(
*/
device_unregister(&tl_hba->dev);
- printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target"
+ pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
" SAS Address: %s at Linux/SCSI Host ID: %d\n",
config_item_name(&wwn->wwn_group.cg_item), host_no);
}
@@ -1402,9 +1372,9 @@ static int tcm_loop_register_configfs(void)
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
- if (!fabric) {
- printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
- return -1;
+ if (IS_ERR(fabric)) {
+ pr_err("tcm_loop_register_configfs() failed!\n");
+ return PTR_ERR(fabric);
}
/*
* Setup the fabric API of function pointers used by target_core_mod
@@ -1436,19 +1406,11 @@ static int tcm_loop_register_configfs(void)
&tcm_loop_tpg_release_fabric_acl;
fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
/*
- * Since tcm_loop is mapping physical memory from Linux/SCSI
- * struct scatterlist arrays for each struct scsi_cmnd I/O,
- * we do not need TCM to allocate a iovec array for
- * virtual memory address mappings
- */
- fabric->tf_ops.alloc_cmd_iovecs = NULL;
- /*
* Used for setting up remaining TCM resources in process context
*/
fabric->tf_ops.new_cmd_map = &tcm_loop_new_cmd_map;
fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
- fabric->tf_ops.release_cmd_to_pool = &tcm_loop_deallocate_core_cmd;
- fabric->tf_ops.release_cmd_direct = &tcm_loop_deallocate_core_cmd;
+ fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
fabric->tf_ops.close_session = &tcm_loop_close_session;
fabric->tf_ops.stop_session = &tcm_loop_stop_session;
@@ -1465,7 +1427,6 @@ static int tcm_loop_register_configfs(void)
&tcm_loop_set_default_node_attributes;
fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
- fabric->tf_ops.new_cmd_failure = &tcm_loop_new_cmd_failure;
fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
fabric->tf_ops.queue_status = &tcm_loop_queue_status;
fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
@@ -1503,7 +1464,7 @@ static int tcm_loop_register_configfs(void)
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
- printk(KERN_ERR "target_fabric_configfs_register() for"
+ pr_err("target_fabric_configfs_register() for"
" TCM_Loop failed!\n");
target_fabric_configfs_free(fabric);
return -1;
@@ -1512,7 +1473,7 @@ static int tcm_loop_register_configfs(void)
* Setup our local pointer to *fabric.
*/
tcm_loop_fabric_configfs = fabric;
- printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->"
+ pr_debug("TCM_LOOP[0] - Set fabric ->"
" tcm_loop_fabric_configfs\n");
return 0;
}
@@ -1524,7 +1485,7 @@ static void tcm_loop_deregister_configfs(void)
target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
tcm_loop_fabric_configfs = NULL;
- printk(KERN_INFO "TCM_LOOP[0] - Cleared"
+ pr_debug("TCM_LOOP[0] - Cleared"
" tcm_loop_fabric_configfs\n");
}
@@ -1537,7 +1498,7 @@ static int __init tcm_loop_fabric_init(void)
__alignof__(struct tcm_loop_cmd),
0, NULL);
if (!tcm_loop_cmd_cache) {
- printk(KERN_ERR "kmem_cache_create() for"
+ pr_debug("kmem_cache_create() for"
" tcm_loop_cmd_cache failed\n");
return -ENOMEM;
}
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 7e9f7ab4554..6b76c7a22bb 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -16,12 +16,6 @@
*/
#define TL_SCSI_MAX_CMD_LEN 32
-#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
-# define TL_CDB_DEBUG(x...) printk(KERN_INFO x)
-#else
-# define TL_CDB_DEBUG(x...)
-#endif
-
struct tcm_loop_cmd {
/* State of Linux/SCSI CDB+Data descriptor */
u32 sc_cmd_state;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 47abb42d9c3..98c98a3a025 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -46,6 +46,14 @@ static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port, int explict, int offline);
+static u16 alua_lu_gps_counter;
+static u32 alua_lu_gps_count;
+
+static DEFINE_SPINLOCK(lu_gps_lock);
+static LIST_HEAD(lu_gps_list);
+
+struct t10_alua_lu_gp *default_lu_gp;
+
/*
* REPORT_TARGET_PORT_GROUPS
*
@@ -53,16 +61,18 @@ static int core_alua_set_tg_pt_secondary_state(
*/
int core_emulate_report_target_port_groups(struct se_cmd *cmd)
{
- struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
+ struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
Target port group descriptor */
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ buf = transport_kmap_first_data_page(cmd);
+
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
/*
* PREF: Preferred target port bit, determine if this
@@ -124,7 +134,7 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/
@@ -133,6 +143,8 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
buf[2] = ((rd_len >> 8) & 0xff);
buf[3] = (rd_len & 0xff);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -143,45 +155,53 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)
*/
int core_emulate_set_target_port_groups(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
- struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
- struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
+ struct se_device *dev = cmd->se_dev;
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
+ struct se_port *port, *l_port = cmd->se_lun->lun_sep;
+ struct se_node_acl *nacl = cmd->se_sess->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
- unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
+ unsigned char *buf;
+ unsigned char *ptr;
u32 len = 4; /* Skip over RESERVED area in header */
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
- if (!(l_port))
+ if (!l_port)
return PYX_TRANSPORT_LU_COMM_FAILURE;
+
+ buf = transport_kmap_first_data_page(cmd);
+
/*
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
- if (!(l_tg_pt_gp_mem)) {
- printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ if (!l_tg_pt_gp_mem) {
+ pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
+ rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
- if (!(l_tg_pt_gp)) {
+ if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+ rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (!(rc)) {
- printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
+ if (!rc) {
+ pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ goto out;
}
+ ptr = &buf[4]; /* Skip over RESERVED area in header */
+
while (len < cmd->data_length) {
alua_access_state = (ptr[0] & 0x0f);
/*
@@ -201,7 +221,8 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
}
rc = -1;
/*
@@ -224,11 +245,11 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* Locate the matching target port group ID from
* the global tg_pt_gp list
*/
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &T10_ALUA(su_dev)->tg_pt_gps_list,
+ &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
@@ -236,24 +257,26 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
rc = core_alua_do_port_transition(tg_pt_gp,
dev, l_port, nacl,
alua_access_state, 1);
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
break;
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* If not matching target port group ID can be located
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
- if (rc != 0)
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ if (rc != 0) {
+ rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
} else {
/*
* Extact the RELATIVE TARGET PORT IDENTIFIER to identify
@@ -287,14 +310,19 @@ int core_emulate_set_target_port_groups(struct se_cmd *cmd)
* be located, throw an exception with ASCQ:
* INVALID_PARAMETER_LIST
*/
- if (rc != 0)
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ if (rc != 0) {
+ rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ goto out;
+ }
}
ptr += 4;
len += 4;
}
+out:
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -464,13 +492,13 @@ static int core_alua_state_check(
unsigned char *cdb,
u8 *alua_ascq)
{
- struct se_lun *lun = SE_LUN(cmd);
+ struct se_lun *lun = cmd->se_lun;
struct se_port *port = lun->lun_sep;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
- if (!(port))
+ if (!port)
return 0;
/*
* First, check for a struct se_port specific secondary ALUA target port
@@ -478,7 +506,7 @@ static int core_alua_state_check(
*/
if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
- printk(KERN_INFO "ALUA: Got secondary offline status for local"
+ pr_debug("ALUA: Got secondary offline status for local"
" target port\n");
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
return 1;
@@ -520,9 +548,9 @@ static int core_alua_state_check(
*/
case ALUA_ACCESS_STATE_OFFLINE:
default:
- printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
+ pr_err("Unknown ALUA access state: 0x%02x\n",
out_alua_state);
- return -1;
+ return -EINVAL;
}
return 0;
@@ -552,8 +580,8 @@ static int core_alua_check_transition(int state, int *primary)
*primary = 0;
break;
default:
- printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
- return -1;
+ pr_err("Unknown ALUA access state: 0x%02x\n", state);
+ return -EINVAL;
}
return 0;
@@ -610,7 +638,7 @@ int core_alua_check_nonop_delay(
* The ALUA Active/NonOptimized access state delay can be disabled
* in via configfs with a value of zero
*/
- if (!(cmd->alua_nonop_delay))
+ if (!cmd->alua_nonop_delay)
return 0;
/*
* struct se_cmd->alua_nonop_delay gets set by a target port group
@@ -639,7 +667,7 @@ static int core_alua_write_tpg_metadata(
file = filp_open(path, flags, 0600);
if (IS_ERR(file) || !file || !file->f_dentry) {
- printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
+ pr_err("filp_open(%s) for ALUA metadata failed\n",
path);
return -ENODEV;
}
@@ -653,7 +681,7 @@ static int core_alua_write_tpg_metadata(
set_fs(old_fs);
if (ret < 0) {
- printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
+ pr_err("Error writing ALUA metadata file: %s\n", path);
filp_close(file, NULL);
return -EIO;
}
@@ -750,7 +778,7 @@ static int core_alua_do_transition_tg_pt(
* se_deve->se_lun_acl pointer may be NULL for a
* entry created without explict Node+MappedLUN ACLs
*/
- if (!(lacl))
+ if (!lacl)
continue;
if (explict &&
@@ -792,7 +820,7 @@ static int core_alua_do_transition_tg_pt(
*/
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
- printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explict) ? "explict" :
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
@@ -823,8 +851,8 @@ int core_alua_do_port_transition(
return -EINVAL;
md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
- if (!(md_buf)) {
- printk("Unable to allocate buf for ALUA metadata\n");
+ if (!md_buf) {
+ pr_err("Unable to allocate buf for ALUA metadata\n");
return -ENOMEM;
}
@@ -839,7 +867,7 @@ int core_alua_do_port_transition(
* we only do transition on the passed *l_tp_pt_gp, and not
* on all of the matching target port groups IDs in default_lu_gp.
*/
- if (!(lu_gp->lu_gp_id)) {
+ if (!lu_gp->lu_gp_id) {
/*
* core_alua_do_transition_tg_pt() will always return
* success.
@@ -866,12 +894,12 @@ int core_alua_do_port_transition(
smp_mb__after_atomic_inc();
spin_unlock(&lu_gp->lu_gp_lock);
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
- &T10_ALUA(su_dev)->tg_pt_gps_list,
+ &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
/*
* If the target behavior port asymmetric access state
@@ -893,7 +921,7 @@ int core_alua_do_port_transition(
}
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* core_alua_do_transition_tg_pt() will always return
* success.
@@ -901,11 +929,11 @@ int core_alua_do_port_transition(
core_alua_do_transition_tg_pt(tg_pt_gp, port,
nacl, md_buf, new_state, explict);
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock);
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
@@ -913,7 +941,7 @@ int core_alua_do_port_transition(
}
spin_unlock(&lu_gp->lu_gp_lock);
- printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
+ pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
" Group IDs: %hu %s transition to primary state: %s\n",
config_item_name(&lu_gp->lu_gp_group.cg_item),
l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
@@ -942,11 +970,11 @@ static int core_alua_update_tpg_secondary_metadata(
memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
- TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
- if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
+ if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
- TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
@@ -954,7 +982,7 @@ static int core_alua_update_tpg_secondary_metadata(
port->sep_tg_pt_secondary_stat);
snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
- TPG_TFO(se_tpg)->get_fabric_name(), wwn,
+ se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
port->sep_lun->unpacked_lun);
return core_alua_write_tpg_metadata(path, md_buf, len);
@@ -973,11 +1001,11 @@ static int core_alua_set_tg_pt_secondary_state(
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (!(tg_pt_gp)) {
+ if (!tg_pt_gp) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_ERR "Unable to complete secondary state"
+ pr_err("Unable to complete secondary state"
" transition\n");
- return -1;
+ return -EINVAL;
}
trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
/*
@@ -994,7 +1022,7 @@ static int core_alua_set_tg_pt_secondary_state(
ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
- printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+ pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
" to secondary access state: %s\n", (explict) ? "explict" :
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
@@ -1012,10 +1040,10 @@ static int core_alua_set_tg_pt_secondary_state(
*/
if (port->sep_tg_pt_secondary_write_md) {
md_buf = kzalloc(md_buf_len, GFP_KERNEL);
- if (!(md_buf)) {
- printk(KERN_ERR "Unable to allocate md_buf for"
+ if (!md_buf) {
+ pr_err("Unable to allocate md_buf for"
" secondary ALUA access metadata\n");
- return -1;
+ return -ENOMEM;
}
mutex_lock(&port->sep_tg_pt_md_mutex);
core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
@@ -1034,19 +1062,19 @@ core_alua_allocate_lu_gp(const char *name, int def_group)
struct t10_alua_lu_gp *lu_gp;
lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
- if (!(lu_gp)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
+ if (!lu_gp) {
+ pr_err("Unable to allocate struct t10_alua_lu_gp\n");
return ERR_PTR(-ENOMEM);
}
- INIT_LIST_HEAD(&lu_gp->lu_gp_list);
+ INIT_LIST_HEAD(&lu_gp->lu_gp_node);
INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
spin_lock_init(&lu_gp->lu_gp_lock);
atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
if (def_group) {
- lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;
+ lu_gp->lu_gp_id = alua_lu_gps_counter++;
lu_gp->lu_gp_valid_id = 1;
- se_global->alua_lu_gps_count++;
+ alua_lu_gps_count++;
}
return lu_gp;
@@ -1060,41 +1088,41 @@ int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
* The lu_gp->lu_gp_id may only be set once..
*/
if (lu_gp->lu_gp_valid_id) {
- printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
+ pr_warn("ALUA LU Group already has a valid ID,"
" ignoring request\n");
- return -1;
+ return -EINVAL;
}
- spin_lock(&se_global->lu_gps_lock);
- if (se_global->alua_lu_gps_count == 0x0000ffff) {
- printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
+ spin_lock(&lu_gps_lock);
+ if (alua_lu_gps_count == 0x0000ffff) {
+ pr_err("Maximum ALUA alua_lu_gps_count:"
" 0x0000ffff reached\n");
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
- return -1;
+ return -ENOSPC;
}
again:
lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
- se_global->alua_lu_gps_counter++;
+ alua_lu_gps_counter++;
- list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
+ list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
- if (!(lu_gp_id))
+ if (!lu_gp_id)
goto again;
- printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
+ pr_warn("ALUA Logical Unit Group ID: %hu"
" already exists, ignoring request\n",
lu_gp_id);
- spin_unlock(&se_global->lu_gps_lock);
- return -1;
+ spin_unlock(&lu_gps_lock);
+ return -EINVAL;
}
}
lu_gp->lu_gp_id = lu_gp_id_tmp;
lu_gp->lu_gp_valid_id = 1;
- list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
- se_global->alua_lu_gps_count++;
- spin_unlock(&se_global->lu_gps_lock);
+ list_add_tail(&lu_gp->lu_gp_node, &lu_gps_list);
+ alua_lu_gps_count++;
+ spin_unlock(&lu_gps_lock);
return 0;
}
@@ -1105,8 +1133,8 @@ core_alua_allocate_lu_gp_mem(struct se_device *dev)
struct t10_alua_lu_gp_member *lu_gp_mem;
lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
- if (!(lu_gp_mem)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
+ if (!lu_gp_mem) {
+ pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
@@ -1130,11 +1158,11 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* no associations can be made while we are releasing
* struct t10_alua_lu_gp.
*/
- spin_lock(&se_global->lu_gps_lock);
+ spin_lock(&lu_gps_lock);
atomic_set(&lu_gp->lu_gp_shutdown, 1);
- list_del(&lu_gp->lu_gp_list);
- se_global->alua_lu_gps_count--;
- spin_unlock(&se_global->lu_gps_lock);
+ list_del(&lu_gp->lu_gp_node);
+ alua_lu_gps_count--;
+ spin_unlock(&lu_gps_lock);
/*
* Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
* in target_core_configfs.c:target_core_store_alua_lu_gp() to be
@@ -1165,9 +1193,9 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
* we want to re-assocate a given lu_gp_mem with default_lu_gp.
*/
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
- if (lu_gp != se_global->default_lu_gp)
+ if (lu_gp != default_lu_gp)
__core_alua_attach_lu_gp_mem(lu_gp_mem,
- se_global->default_lu_gp);
+ default_lu_gp);
else
lu_gp_mem->lu_gp = NULL;
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
@@ -1182,7 +1210,7 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
void core_alua_free_lu_gp_mem(struct se_device *dev)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -1190,7 +1218,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev)
return;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem))
+ if (!lu_gp_mem)
return;
while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
@@ -1198,7 +1226,7 @@ void core_alua_free_lu_gp_mem(struct se_device *dev)
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if ((lu_gp)) {
+ if (lu_gp) {
spin_lock(&lu_gp->lu_gp_lock);
if (lu_gp_mem->lu_gp_assoc) {
list_del(&lu_gp_mem->lu_gp_mem_list);
@@ -1218,27 +1246,27 @@ struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
struct t10_alua_lu_gp *lu_gp;
struct config_item *ci;
- spin_lock(&se_global->lu_gps_lock);
- list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
- if (!(lu_gp->lu_gp_valid_id))
+ spin_lock(&lu_gps_lock);
+ list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
+ if (!lu_gp->lu_gp_valid_id)
continue;
ci = &lu_gp->lu_gp_group.cg_item;
- if (!(strcmp(config_item_name(ci), name))) {
+ if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&lu_gp->lu_gp_ref_cnt);
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
return lu_gp;
}
}
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
return NULL;
}
void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
{
- spin_lock(&se_global->lu_gps_lock);
+ spin_lock(&lu_gps_lock);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
- spin_unlock(&se_global->lu_gps_lock);
+ spin_unlock(&lu_gps_lock);
}
/*
@@ -1279,8 +1307,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp;
tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
- if (!(tg_pt_gp)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
+ if (!tg_pt_gp) {
+ pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
return NULL;
}
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
@@ -1304,14 +1332,14 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
if (def_group) {
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
tg_pt_gp->tg_pt_gp_id =
- T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+ su_dev->t10_alua.alua_tg_pt_gps_counter++;
tg_pt_gp->tg_pt_gp_valid_id = 1;
- T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
+ su_dev->t10_alua.alua_tg_pt_gps_count++;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &T10_ALUA(su_dev)->tg_pt_gps_list);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ &su_dev->t10_alua.tg_pt_gps_list);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
}
return tg_pt_gp;
@@ -1328,42 +1356,42 @@ int core_alua_set_tg_pt_gp_id(
* The tg_pt_gp->tg_pt_gp_id may only be set once..
*/
if (tg_pt_gp->tg_pt_gp_valid_id) {
- printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
+ pr_warn("ALUA TG PT Group already has a valid ID,"
" ignoring request\n");
- return -1;
+ return -EINVAL;
}
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
- printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
+ pr_err("Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
- return -1;
+ return -ENOSPC;
}
again:
tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
- T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
+ su_dev->t10_alua.alua_tg_pt_gps_counter++;
- list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
- if (!(tg_pt_gp_id))
+ if (!tg_pt_gp_id)
goto again;
- printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
+ pr_err("ALUA Target Port Group ID: %hu already"
" exists, ignoring request\n", tg_pt_gp_id);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- return -1;
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
+ return -EINVAL;
}
}
tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
tg_pt_gp->tg_pt_gp_valid_id = 1;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
- &T10_ALUA(su_dev)->tg_pt_gps_list);
- T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ &su_dev->t10_alua.tg_pt_gps_list);
+ su_dev->t10_alua.alua_tg_pt_gps_count++;
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return 0;
}
@@ -1375,8 +1403,8 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
GFP_KERNEL);
- if (!(tg_pt_gp_mem)) {
- printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
+ if (!tg_pt_gp_mem) {
+ pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@@ -1403,10 +1431,10 @@ void core_alua_free_tg_pt_gp(
* no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_del(&tg_pt_gp->tg_pt_gp_list);
- T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ su_dev->t10_alua.alua_tg_pt_gps_counter--;
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
@@ -1438,9 +1466,9 @@ void core_alua_free_tg_pt_gp(
* default_tg_pt_gp.
*/
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
+ if (tg_pt_gp != su_dev->t10_alua.default_tg_pt_gp) {
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- T10_ALUA(su_dev)->default_tg_pt_gp);
+ su_dev->t10_alua.default_tg_pt_gp);
} else
tg_pt_gp_mem->tg_pt_gp = NULL;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -1455,7 +1483,7 @@ void core_alua_free_tg_pt_gp(
void core_alua_free_tg_pt_gp_mem(struct se_port *port)
{
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
@@ -1463,7 +1491,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
return;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem))
+ if (!tg_pt_gp_mem)
return;
while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
@@ -1471,7 +1499,7 @@ void core_alua_free_tg_pt_gp_mem(struct se_port *port)
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if ((tg_pt_gp)) {
+ if (tg_pt_gp) {
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
if (tg_pt_gp_mem->tg_pt_gp_assoc) {
list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@@ -1493,19 +1521,19 @@ static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *ci;
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
- list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
+ list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
tg_pt_gp_list) {
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
continue;
ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
- if (!(strcmp(config_item_name(ci), name))) {
+ if (!strcmp(config_item_name(ci), name)) {
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return tg_pt_gp;
}
}
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
return NULL;
}
@@ -1515,9 +1543,9 @@ static void core_alua_put_tg_pt_gp_from_name(
{
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
- spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
- spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
+ spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
}
/*
@@ -1555,7 +1583,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
{
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct config_item *tg_pt_ci;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0;
@@ -1564,12 +1592,12 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
return len;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem))
+ if (!tg_pt_gp_mem)
return len;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if ((tg_pt_gp)) {
+ if (tg_pt_gp) {
tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
" %hu\nTG Port Primary Access State: %s\nTG Port "
@@ -1605,16 +1633,16 @@ ssize_t core_alua_store_tg_pt_gp_info(
tpg = port->sep_tpg;
lun = port->sep_lun;
- if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
- printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
- " %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
+ pr_warn("SPC3_ALUA_EMULATED not enabled for"
+ " %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
return -EINVAL;
}
if (count > TG_PT_GROUP_NAME_BUF) {
- printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
+ pr_err("ALUA Target Port Group alias too large!\n");
return -EINVAL;
}
memset(buf, 0, TG_PT_GROUP_NAME_BUF);
@@ -1631,31 +1659,31 @@ ssize_t core_alua_store_tg_pt_gp_info(
*/
tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
strstrip(buf));
- if (!(tg_pt_gp_new))
+ if (!tg_pt_gp_new)
return -ENODEV;
}
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem)) {
+ if (!tg_pt_gp_mem) {
if (tg_pt_gp_new)
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
- printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
+ pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
return -EINVAL;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if ((tg_pt_gp)) {
+ if (tg_pt_gp) {
/*
* Clearing an existing tg_pt_gp association, and replacing
* with the default_tg_pt_gp.
*/
- if (!(tg_pt_gp_new)) {
- printk(KERN_INFO "Target_Core_ConfigFS: Moving"
+ if (!tg_pt_gp_new) {
+ pr_debug("Target_Core_ConfigFS: Moving"
" %s/tpgt_%hu/%s from ALUA Target Port Group:"
" alua/%s, ID: %hu back to"
" default_tg_pt_gp\n",
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item),
config_item_name(
&tg_pt_gp->tg_pt_gp_group.cg_item),
@@ -1663,7 +1691,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- T10_ALUA(su_dev)->default_tg_pt_gp);
+ su_dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
return count;
@@ -1679,10 +1707,10 @@ ssize_t core_alua_store_tg_pt_gp_info(
*/
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+ pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
" Target Port Group: alua/%s, ID: %hu\n", (move) ?
- "Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ "Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item),
config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
tg_pt_gp_new->tg_pt_gp_id);
@@ -1716,11 +1744,11 @@ ssize_t core_alua_store_access_type(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_access_type\n");
+ pr_err("Unable to extract alua_access_type\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
- printk(KERN_ERR "Illegal value for alua_access_type:"
+ pr_err("Illegal value for alua_access_type:"
" %lu\n", tmp);
return -EINVAL;
}
@@ -1754,11 +1782,11 @@ ssize_t core_alua_store_nonop_delay_msecs(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
+ pr_err("Unable to extract nonop_delay_msecs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
- printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
+ pr_err("Passed nonop_delay_msecs: %lu, exceeds"
" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_NONOP_DELAY_MSECS);
return -EINVAL;
@@ -1785,11 +1813,11 @@ ssize_t core_alua_store_trans_delay_msecs(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
+ pr_err("Unable to extract trans_delay_msecs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
- printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
+ pr_err("Passed trans_delay_msecs: %lu, exceeds"
" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_TRANS_DELAY_MSECS);
return -EINVAL;
@@ -1816,11 +1844,11 @@ ssize_t core_alua_store_preferred_bit(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract preferred ALUA value\n");
+ pr_err("Unable to extract preferred ALUA value\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
+ pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_pref = (int)tmp;
@@ -1830,7 +1858,7 @@ ssize_t core_alua_store_preferred_bit(
ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
{
- if (!(lun->lun_sep))
+ if (!lun->lun_sep)
return -ENODEV;
return sprintf(page, "%d\n",
@@ -1846,22 +1874,22 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp;
int ret;
- if (!(lun->lun_sep))
+ if (!lun->lun_sep)
return -ENODEV;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
+ pr_err("Unable to extract alua_tg_pt_offline value\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
+ pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
tmp);
return -EINVAL;
}
tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
- if (!(tg_pt_gp_mem)) {
- printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
+ if (!tg_pt_gp_mem) {
+ pr_err("Unable to locate *tg_pt_gp_mem\n");
return -EINVAL;
}
@@ -1890,13 +1918,13 @@ ssize_t core_alua_store_secondary_status(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
+ pr_err("Unable to extract alua_tg_pt_status\n");
return -EINVAL;
}
if ((tmp != ALUA_STATUS_NONE) &&
(tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
(tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
- printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
+ pr_err("Illegal value for alua_tg_pt_status: %lu\n",
tmp);
return -EINVAL;
}
@@ -1923,11 +1951,11 @@ ssize_t core_alua_store_secondary_write_metadata(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
+ pr_err("Unable to extract alua_tg_pt_write_md\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
+ pr_err("Illegal value for alua_tg_pt_write_md:"
" %lu\n", tmp);
return -EINVAL;
}
@@ -1939,7 +1967,7 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev, int force_pt)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_alua *alua = T10_ALUA(su_dev);
+ struct t10_alua *alua = &su_dev->t10_alua;
struct t10_alua_lu_gp_member *lu_gp_mem;
/*
* If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
@@ -1947,44 +1975,44 @@ int core_setup_alua(struct se_device *dev, int force_pt)
* cause a problem because libata and some SATA RAID HBAs appear
* under Linux/SCSI, but emulate SCSI logic themselves.
*/
- if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
+ if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
alua->alua_type = SPC_ALUA_PASSTHROUGH;
alua->alua_state_check = &core_alua_state_check_nop;
- printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
- " emulation\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
+ " emulation\n", dev->transport->name);
return 0;
}
/*
* If SPC-3 or above is reported by real or emulated struct se_device,
* use emulated ALUA.
*/
- if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
- printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
- " device\n", TRANSPORT(dev)->name);
+ if (dev->transport->get_device_rev(dev) >= SCSI_3) {
+ pr_debug("%s: Enabling ALUA Emulation for SPC-3"
+ " device\n", dev->transport->name);
/*
* Associate this struct se_device with the default ALUA
* LUN Group.
*/
lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
- if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
- return -1;
+ if (IS_ERR(lu_gp_mem))
+ return PTR_ERR(lu_gp_mem);
alua->alua_type = SPC3_ALUA_EMULATED;
alua->alua_state_check = &core_alua_state_check;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
__core_alua_attach_lu_gp_mem(lu_gp_mem,
- se_global->default_lu_gp);
+ default_lu_gp);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
- printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
+ pr_debug("%s: Adding to default ALUA LU Group:"
" core/alua/lu_gps/default_lu_gp\n",
- TRANSPORT(dev)->name);
+ dev->transport->name);
} else {
alua->alua_type = SPC2_ALUA_DISABLED;
alua->alua_state_check = &core_alua_state_check_nop;
- printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
- " device\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Disabling ALUA Emulation for SPC-2"
+ " device\n", dev->transport->name);
}
return 0;
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 7f19c8b7b84..8ae09a1bdf7 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -23,6 +23,7 @@
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/kernel.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -64,20 +65,22 @@ target_fill_alua_data(struct se_port *port, unsigned char *buf)
static int
target_emulate_inquiry_std(struct se_cmd *cmd)
{
- struct se_lun *lun = SE_LUN(cmd);
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
+ struct se_lun *lun = cmd->se_lun;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
/*
* Make sure we at least have 6 bytes of INQUIRY response
* payload going back for EVPD=0
*/
if (cmd->data_length < 6) {
- printk(KERN_ERR "SCSI Inquiry payload length: %u"
+ pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=0\n", cmd->data_length);
- return -1;
+ return -EINVAL;
}
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = dev->transport->get_device_type(dev);
if (buf[0] == TYPE_TAPE)
buf[1] = 0x80;
@@ -86,12 +89,12 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
- if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
+ if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
target_fill_alua_data(lun->lun_sep, buf);
if (cmd->data_length < 8) {
buf[4] = 1; /* Set additional length to 1 */
- return 0;
+ goto out;
}
buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
@@ -102,40 +105,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
*/
if (cmd->data_length < 36) {
buf[4] = 3; /* Set additional length to 3 */
- return 0;
+ goto out;
}
snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
snprintf((unsigned char *)&buf[16], 16, "%s",
- &DEV_T10_WWN(dev)->model[0]);
+ &dev->se_sub_dev->t10_wwn.model[0]);
snprintf((unsigned char *)&buf[32], 4, "%s",
- &DEV_T10_WWN(dev)->revision[0]);
+ &dev->se_sub_dev->t10_wwn.revision[0]);
buf[4] = 31; /* Set additional length to 31 */
- return 0;
-}
-
-/* supported vital product data pages */
-static int
-target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
-{
- buf[1] = 0x00;
- if (cmd->data_length < 8)
- return 0;
-
- buf[4] = 0x0;
- /*
- * Only report the INQUIRY EVPD=1 pages after a valid NAA
- * Registered Extended LUN WWN has been set via ConfigFS
- * during device creation/restart.
- */
- if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
- SDF_EMULATED_VPD_UNIT_SERIAL) {
- buf[3] = 3;
- buf[5] = 0x80;
- buf[6] = 0x83;
- buf[7] = 0x86;
- }
+out:
+ transport_kunmap_first_data_page(cmd);
return 0;
}
@@ -143,16 +124,15 @@ target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
u16 len = 0;
- buf[1] = 0x80;
if (dev->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len;
unit_serial_len =
- strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+ strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
if (((len + 4) + unit_serial_len) > cmd->data_length) {
@@ -162,7 +142,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
return 0;
}
len += sprintf((unsigned char *)&buf[4], "%s",
- &DEV_T10_WWN(dev)->unit_serial[0]);
+ &dev->se_sub_dev->t10_wwn.unit_serial[0]);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
@@ -176,21 +156,18 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
- struct se_lun *lun = SE_LUN(cmd);
+ struct se_device *dev = cmd->se_dev;
+ struct se_lun *lun = cmd->se_lun;
struct se_port *port = NULL;
struct se_portal_group *tpg = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
- unsigned char binary, binary_new;
- unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
+ unsigned char *prod = &dev->se_sub_dev->t10_wwn.model[0];
u32 prod_len;
u32 unit_serial_len, off = 0;
- int i;
u16 len = 0, id_len;
- buf[1] = 0x83;
off = 4;
/*
@@ -210,11 +187,11 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
/* CODE SET == Binary */
buf[off++] = 0x1;
- /* Set ASSOICATION == addressed logical unit: 0)b */
+ /* Set ASSOCIATION == addressed logical unit: 0)b */
buf[off] = 0x00;
/* Identifier/Designator type == NAA identifier */
- buf[off++] = 0x3;
+ buf[off++] |= 0x3;
off++;
/* Identifier/Designator length */
@@ -237,16 +214,9 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
* VENDOR_SPECIFIC_IDENTIFIER and
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/
- binary = transport_asciihex_to_binaryhex(
- &DEV_T10_WWN(dev)->unit_serial[0]);
- buf[off++] |= (binary & 0xf0) >> 4;
- for (i = 0; i < 24; i += 2) {
- binary_new = transport_asciihex_to_binaryhex(
- &DEV_T10_WWN(dev)->unit_serial[i+2]);
- buf[off] = (binary & 0x0f) << 4;
- buf[off++] |= (binary_new & 0xf0) >> 4;
- binary = binary_new;
- }
+ buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12);
+
len = 20;
off = (len + 4);
@@ -263,7 +233,7 @@ check_t10_vend_desc:
if (dev->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
unit_serial_len =
- strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
+ strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
if ((len + (id_len + 4) +
@@ -274,7 +244,7 @@ check_t10_vend_desc:
}
id_len += sprintf((unsigned char *)&buf[off+12],
"%s:%s", prod,
- &DEV_T10_WWN(dev)->unit_serial[0]);
+ &dev->se_sub_dev->t10_wwn.unit_serial[0]);
}
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
@@ -312,10 +282,10 @@ check_port:
goto check_tpgi;
}
buf[off] =
- (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOICATION == target port: 01b */
+ /* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == Relative target port identifer */
buf[off++] |= 0x4;
@@ -335,7 +305,7 @@ check_port:
* section 7.5.1 Table 362
*/
check_tpgi:
- if (T10_ALUA(dev->se_sub_dev)->alua_type !=
+ if (dev->se_sub_dev->t10_alua.alua_type !=
SPC3_ALUA_EMULATED)
goto check_scsi_name;
@@ -349,7 +319,7 @@ check_tpgi:
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
- if (!(tg_pt_gp)) {
+ if (!tg_pt_gp) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
goto check_lu_gp;
}
@@ -357,10 +327,10 @@ check_tpgi:
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
buf[off] =
- (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOICATION == target port: 01b */
+ /* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == Target port group identifier */
buf[off++] |= 0x5;
@@ -380,12 +350,12 @@ check_lu_gp:
goto check_scsi_name;
}
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem))
+ if (!lu_gp_mem)
goto check_scsi_name;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if (!(lu_gp)) {
+ if (!lu_gp) {
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
goto check_scsi_name;
}
@@ -409,7 +379,7 @@ check_lu_gp:
* section 7.5.1 Table 362
*/
check_scsi_name:
- scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
+ scsi_name_len = strlen(tpg->se_tpg_tfo->tpg_get_wwn(tpg));
/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
scsi_name_len += 10;
/* Check for 4-byte padding */
@@ -424,10 +394,10 @@ check_scsi_name:
goto set_len;
}
buf[off] =
- (TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
+ (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
buf[off] = 0x80; /* Set PIV=1 */
- /* Set ASSOICATION == target port: 01b */
+ /* Set ASSOCIATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == SCSI name string */
buf[off++] |= 0x8;
@@ -438,9 +408,9 @@ check_scsi_name:
* Target Port, this means "<iSCSI name>,t,0x<TPGT> in
* UTF-8 encoding.
*/
- tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+ tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
- TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt);
scsi_name_len += 1 /* Include NULL terminator */;
/*
* The null-terminated, null-padded (see 4.4.2) SCSI
@@ -471,13 +441,12 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
if (cmd->data_length < 60)
return 0;
- buf[1] = 0x86;
buf[2] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
- if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
+ if (cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
buf[6] = 0x01;
return 0;
}
@@ -486,7 +455,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
static int
target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
int have_tp = 0;
/*
@@ -494,27 +463,29 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning.
*/
- if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
have_tp = 1;
if (cmd->data_length < (0x10 + 4)) {
- printk(KERN_INFO "Received data_length: %u"
+ pr_debug("Received data_length: %u"
" too small for EVPD 0xb0\n",
cmd->data_length);
- return -1;
+ return -EINVAL;
}
if (have_tp && cmd->data_length < (0x3c + 4)) {
- printk(KERN_INFO "Received data_length: %u"
+ pr_debug("Received data_length: %u"
" too small for TPE=1 EVPD 0xb0\n",
cmd->data_length);
have_tp = 0;
}
buf[0] = dev->transport->get_device_type(dev);
- buf[1] = 0xb0;
buf[3] = have_tp ? 0x3c : 0x10;
+ /* Set WSNZ to 1 */
+ buf[4] = 0x01;
+
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/
@@ -523,12 +494,12 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP or the initiator sent a too
@@ -540,35 +511,51 @@ target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM UNMAP LBA COUNT
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count, &buf[20]);
/*
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count,
&buf[24]);
/*
* Set OPTIMAL UNMAP GRANULARITY
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity, &buf[28]);
/*
* UNMAP GRANULARITY ALIGNMENT
*/
- put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
+ put_unaligned_be32(dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment,
&buf[32]);
- if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
+ if (dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment != 0)
buf[32] |= 0x80; /* Set the UGAVALID bit */
return 0;
}
+/* Block Device Characteristics VPD page */
+static int
+target_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf)
+{
+ struct se_device *dev = cmd->se_dev;
+
+ buf[0] = dev->transport->get_device_type(dev);
+ buf[3] = 0x3c;
+
+ if (cmd->data_length >= 5 &&
+ dev->se_sub_dev->se_dev_attrib.is_nonrot)
+ buf[5] = 1;
+
+ return 0;
+}
+
/* Thin Provisioning VPD */
static int
target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
/*
* From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
@@ -579,7 +566,6 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* defined in table 162.
*/
buf[0] = dev->transport->get_device_type(dev);
- buf[1] = 0xb2;
/*
* Set Hardcoded length mentioned above for DP=0
@@ -602,7 +588,7 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* the UNMAP command (see 5.25). A TPU bit set to zero indicates
* that the device server does not support the UNMAP command.
*/
- if (DEV_ATTRIB(dev)->emulate_tpu != 0)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu != 0)
buf[5] = 0x80;
/*
@@ -611,18 +597,59 @@ target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
* A TPWS bit set to zero indicates that the device server does not
* support the use of the WRITE SAME (16) command to unmap LBAs.
*/
- if (DEV_ATTRIB(dev)->emulate_tpws != 0)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpws != 0)
buf[5] |= 0x40;
return 0;
}
static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf);
+
+static struct {
+ uint8_t page;
+ int (*emulate)(struct se_cmd *, unsigned char *);
+} evpd_handlers[] = {
+ { .page = 0x00, .emulate = target_emulate_evpd_00 },
+ { .page = 0x80, .emulate = target_emulate_evpd_80 },
+ { .page = 0x83, .emulate = target_emulate_evpd_83 },
+ { .page = 0x86, .emulate = target_emulate_evpd_86 },
+ { .page = 0xb0, .emulate = target_emulate_evpd_b0 },
+ { .page = 0xb1, .emulate = target_emulate_evpd_b1 },
+ { .page = 0xb2, .emulate = target_emulate_evpd_b2 },
+};
+
+/* supported vital product data pages */
+static int
+target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
+{
+ int p;
+
+ if (cmd->data_length < 8)
+ return 0;
+ /*
+ * Only report the INQUIRY EVPD=1 pages after a valid NAA
+ * Registered Extended LUN WWN has been set via ConfigFS
+ * during device creation/restart.
+ */
+ if (cmd->se_dev->se_sub_dev->su_dev_flags &
+ SDF_EMULATED_VPD_UNIT_SERIAL) {
+ buf[3] = ARRAY_SIZE(evpd_handlers);
+ for (p = 0; p < min_t(int, ARRAY_SIZE(evpd_handlers),
+ cmd->data_length - 4); ++p)
+ buf[p + 4] = evpd_handlers[p].page;
+ }
+
+ return 0;
+}
+
+static int
target_emulate_inquiry(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
- unsigned char *cdb = cmd->t_task->t_task_cdb;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
+ unsigned char *cdb = cmd->t_task_cdb;
+ int p, ret;
if (!(cdb[1] & 0x1))
return target_emulate_inquiry_std(cmd);
@@ -635,38 +662,33 @@ target_emulate_inquiry(struct se_cmd *cmd)
* payload length left for the next outgoing EVPD metadata
*/
if (cmd->data_length < 4) {
- printk(KERN_ERR "SCSI Inquiry payload length: %u"
+ pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
- return -1;
+ return -EINVAL;
}
+
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = dev->transport->get_device_type(dev);
- switch (cdb[2]) {
- case 0x00:
- return target_emulate_evpd_00(cmd, buf);
- case 0x80:
- return target_emulate_evpd_80(cmd, buf);
- case 0x83:
- return target_emulate_evpd_83(cmd, buf);
- case 0x86:
- return target_emulate_evpd_86(cmd, buf);
- case 0xb0:
- return target_emulate_evpd_b0(cmd, buf);
- case 0xb2:
- return target_emulate_evpd_b2(cmd, buf);
- default:
- printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
- return -1;
- }
+ for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p)
+ if (cdb[2] == evpd_handlers[p].page) {
+ buf[1] = cdb[2];
+ ret = evpd_handlers[p].emulate(cmd, buf);
+ transport_kunmap_first_data_page(cmd);
+ return ret;
+ }
- return 0;
+ transport_kunmap_first_data_page(cmd);
+ pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ return -EINVAL;
}
static int
target_emulate_readcapacity(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
unsigned long long blocks_long = dev->transport->get_blocks(dev);
u32 blocks;
@@ -675,30 +697,36 @@ target_emulate_readcapacity(struct se_cmd *cmd)
else
blocks = (u32)blocks_long;
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff;
- buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
- buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
- buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
- buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
+ buf[4] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+ buf[5] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+ buf[6] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+ buf[7] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
/*
* Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
*/
- if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
put_unaligned_be32(0xFFFFFFFF, &buf[0]);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
static int
target_emulate_readcapacity_16(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = (blocks >> 56) & 0xff;
buf[1] = (blocks >> 48) & 0xff;
buf[2] = (blocks >> 40) & 0xff;
@@ -707,17 +735,19 @@ target_emulate_readcapacity_16(struct se_cmd *cmd)
buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff;
- buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
- buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
- buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
- buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
+ buf[8] = (dev->se_sub_dev->se_dev_attrib.block_size >> 24) & 0xff;
+ buf[9] = (dev->se_sub_dev->se_dev_attrib.block_size >> 16) & 0xff;
+ buf[10] = (dev->se_sub_dev->se_dev_attrib.block_size >> 8) & 0xff;
+ buf[11] = dev->se_sub_dev->se_dev_attrib.block_size & 0xff;
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
- if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
buf[14] = 0x80;
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -737,6 +767,35 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
p[1] = 0x0a;
p[2] = 2;
/*
+ * From spc4r23, 7.4.7 Control mode page
+ *
+ * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies
+ * restrictions on the algorithm used for reordering commands
+ * having the SIMPLE task attribute (see SAM-4).
+ *
+ * Table 368 -- QUEUE ALGORITHM MODIFIER field
+ * Code Description
+ * 0h Restricted reordering
+ * 1h Unrestricted reordering allowed
+ * 2h to 7h Reserved
+ * 8h to Fh Vendor specific
+ *
+ * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that
+ * the device server shall order the processing sequence of commands
+ * having the SIMPLE task attribute such that data integrity is maintained
+ * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol
+ * requests is halted at any time, the final value of all data observable
+ * on the medium shall be the same as if all the commands had been processed
+ * with the ORDERED task attribute).
+ *
+ * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the
+ * device server may reorder the processing sequence of commands having the
+ * SIMPLE task attribute in any manner. Any data integrity exposures related to
+ * command sequence order shall be explicitly handled by the application client
+ * through the selection of appropriate ommands and task attributes.
+ */
+ p[3] = (dev->se_sub_dev->se_dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10;
+ /*
* From spc4r17, section 7.4.6 Control mode Page
*
* Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
@@ -765,8 +824,8 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
- p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
- (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
+ p[4] = (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 :
+ (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
@@ -779,7 +838,7 @@ target_modesense_control(struct se_device *dev, unsigned char *p)
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
+ p[5] = (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? 0x40 : 0x00;
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
@@ -792,7 +851,7 @@ target_modesense_caching(struct se_device *dev, unsigned char *p)
{
p[0] = 0x08;
p[1] = 0x12;
- if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
+ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0)
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
@@ -830,9 +889,9 @@ target_modesense_dpofua(unsigned char *buf, int type)
static int
target_emulate_modesense(struct se_cmd *cmd, int ten)
{
- struct se_device *dev = SE_DEV(cmd);
- char *cdb = cmd->t_task->t_task_cdb;
- unsigned char *rbuf = cmd->t_task->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ char *cdb = cmd->t_task_cdb;
+ unsigned char *rbuf;
int type = dev->transport->get_device_type(dev);
int offset = (ten) ? 8 : 4;
int length = 0;
@@ -856,7 +915,7 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
length += target_modesense_control(dev, &buf[offset+length]);
break;
default:
- printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
+ pr_err("Got Unknown Mode Page: 0x%02x\n",
cdb[2] & 0x3f);
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
}
@@ -867,13 +926,13 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
buf[0] = (offset >> 8) & 0xff;
buf[1] = offset & 0xff;
- if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[3], type);
- if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
- (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+ if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
target_modesense_dpofua(&buf[3], type);
if ((offset + 2) > cmd->data_length)
@@ -883,19 +942,22 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
offset -= 1;
buf[0] = offset & 0xff;
- if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
+ if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[2], type);
- if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
- (DEV_ATTRIB(dev)->emulate_fua_write > 0))
+ if ((dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0) &&
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0))
target_modesense_dpofua(&buf[2], type);
if ((offset + 1) > cmd->data_length)
offset = cmd->data_length;
}
+
+ rbuf = transport_kmap_first_data_page(cmd);
memcpy(rbuf, buf, offset);
+ transport_kunmap_first_data_page(cmd);
return 0;
}
@@ -903,16 +965,20 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
static int
target_emulate_request_sense(struct se_cmd *cmd)
{
- unsigned char *cdb = cmd->t_task->t_task_cdb;
- unsigned char *buf = cmd->t_task->t_task_buf;
+ unsigned char *cdb = cmd->t_task_cdb;
+ unsigned char *buf;
u8 ua_asc = 0, ua_ascq = 0;
+ int err = 0;
if (cdb[1] & 0x01) {
- printk(KERN_ERR "REQUEST_SENSE description emulation not"
+ pr_err("REQUEST_SENSE description emulation not"
" supported\n");
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
+
+ buf = transport_kmap_first_data_page(cmd);
+
+ if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
/*
* CURRENT ERROR, UNIT ATTENTION
*/
@@ -924,7 +990,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
*/
if (cmd->data_length <= 18) {
buf[7] = 0x00;
- return 0;
+ err = -EINVAL;
+ goto end;
}
/*
* The Additional Sense Code (ASC) from the UNIT ATTENTION
@@ -944,7 +1011,8 @@ target_emulate_request_sense(struct se_cmd *cmd)
*/
if (cmd->data_length <= 18) {
buf[7] = 0x00;
- return 0;
+ err = -EINVAL;
+ goto end;
}
/*
* NO ADDITIONAL SENSE INFORMATION
@@ -953,6 +1021,9 @@ target_emulate_request_sense(struct se_cmd *cmd)
buf[7] = 0x0A;
}
+end:
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -963,13 +1034,13 @@ target_emulate_request_sense(struct se_cmd *cmd)
static int
target_emulate_unmap(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
- struct se_device *dev = SE_DEV(cmd);
- unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
- unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ unsigned char *buf, *ptr = NULL;
+ unsigned char *cdb = &cmd->t_task_cdb[0];
sector_t lba;
unsigned int size = cmd->data_length, range;
- int ret, offset;
+ int ret = 0, offset;
unsigned short dl, bd_dl;
/* First UNMAP block descriptor starts at 8 byte offset */
@@ -977,21 +1048,24 @@ target_emulate_unmap(struct se_task *task)
size -= 8;
dl = get_unaligned_be16(&cdb[0]);
bd_dl = get_unaligned_be16(&cdb[2]);
+
+ buf = transport_kmap_first_data_page(cmd);
+
ptr = &buf[offset];
- printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+ pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
while (size) {
lba = get_unaligned_be64(&ptr[0]);
range = get_unaligned_be32(&ptr[8]);
- printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
+ pr_debug("UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
- printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
+ pr_err("blkdev_issue_discard() failed: %d\n",
ret);
- return -1;
+ goto err;
}
ptr += 16;
@@ -1000,7 +1074,10 @@ target_emulate_unmap(struct se_task *task)
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
- return 0;
+err:
+ transport_kunmap_first_data_page(cmd);
+
+ return ret;
}
/*
@@ -1008,23 +1085,36 @@ target_emulate_unmap(struct se_task *task)
* Note this is not used for TCM/pSCSI passthrough
*/
static int
-target_emulate_write_same(struct se_task *task)
+target_emulate_write_same(struct se_task *task, int write_same32)
{
- struct se_cmd *cmd = TASK_CMD(task);
- struct se_device *dev = SE_DEV(cmd);
- sector_t lba = cmd->t_task->t_task_lba;
- unsigned int range;
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
+ sector_t range;
+ sector_t lba = cmd->t_task_lba;
+ unsigned int num_blocks;
int ret;
+ /*
+ * Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict
+ * range when non zero is supplied, otherwise calculate the remaining
+ * range based on ->get_blocks() - starting LBA.
+ */
+ if (write_same32)
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
+ else
+ num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
- range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
+ if (num_blocks != 0)
+ range = num_blocks;
+ else
+ range = (dev->transport->get_blocks(dev) - lba);
- printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
- (unsigned long long)lba, range);
+ pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
+ (unsigned long long)lba, (unsigned long long)range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
- printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
- return -1;
+ pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
+ return ret;
}
task->task_scsi_status = GOOD;
@@ -1035,12 +1125,12 @@ target_emulate_write_same(struct se_task *task)
int
transport_emulate_control_cdb(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
- struct se_device *dev = SE_DEV(cmd);
+ struct se_cmd *cmd = task->task_se_cmd;
+ struct se_device *dev = cmd->se_dev;
unsigned short service_action;
int ret = 0;
- switch (cmd->t_task->t_task_cdb[0]) {
+ switch (cmd->t_task_cdb[0]) {
case INQUIRY:
ret = target_emulate_inquiry(cmd);
break;
@@ -1054,13 +1144,13 @@ transport_emulate_control_cdb(struct se_task *task)
ret = target_emulate_modesense(cmd, 1);
break;
case SERVICE_ACTION_IN:
- switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
+ switch (cmd->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
ret = target_emulate_readcapacity_16(cmd);
break;
default:
- printk(KERN_ERR "Unsupported SA: 0x%02x\n",
- cmd->t_task->t_task_cdb[1] & 0x1f);
+ pr_err("Unsupported SA: 0x%02x\n",
+ cmd->t_task_cdb[1] & 0x1f);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
break;
@@ -1069,7 +1159,7 @@ transport_emulate_control_cdb(struct se_task *task)
break;
case UNMAP:
if (!dev->transport->do_discard) {
- printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
+ pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
@@ -1077,27 +1167,27 @@ transport_emulate_control_cdb(struct se_task *task)
break;
case WRITE_SAME_16:
if (!dev->transport->do_discard) {
- printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
+ pr_err("WRITE_SAME_16 emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
- ret = target_emulate_write_same(task);
+ ret = target_emulate_write_same(task, 0);
break;
case VARIABLE_LENGTH_CMD:
service_action =
- get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
+ get_unaligned_be16(&cmd->t_task_cdb[8]);
switch (service_action) {
case WRITE_SAME_32:
if (!dev->transport->do_discard) {
- printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
+ pr_err("WRITE_SAME_32 SA emulation not"
" supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
- ret = target_emulate_write_same(task);
+ ret = target_emulate_write_same(task, 1);
break;
default:
- printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
+ pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
" 0x%02x\n", service_action);
break;
}
@@ -1105,8 +1195,7 @@ transport_emulate_control_cdb(struct se_task *task)
case SYNCHRONIZE_CACHE:
case 0x91: /* SYNCHRONIZE_CACHE_16: */
if (!dev->transport->do_sync_cache) {
- printk(KERN_ERR
- "SYNCHRONIZE_CACHE emulation not supported"
+ pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
@@ -1123,8 +1212,8 @@ transport_emulate_control_cdb(struct se_task *task)
case WRITE_FILEMARKS:
break;
default:
- printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
- cmd->t_task->t_task_cdb[0], dev->transport->name);
+ pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
+ cmd->t_task_cdb[0], dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 25c1f49a7d8..b2575d8568c 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -37,6 +37,7 @@
#include <linux/parser.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
+#include <linux/spinlock.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
@@ -52,6 +53,8 @@
#include "target_core_rd.h"
#include "target_core_stat.h"
+extern struct t10_alua_lu_gp *default_lu_gp;
+
static struct list_head g_tf_list;
static struct mutex g_tf_lock;
@@ -61,6 +64,13 @@ struct target_core_configfs_attribute {
ssize_t (*store)(void *, const char *, size_t);
};
+static struct config_group target_core_hbagroup;
+static struct config_group alua_group;
+static struct config_group alua_lu_gps_group;
+
+static DEFINE_SPINLOCK(se_device_lock);
+static LIST_HEAD(se_dev_list);
+
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
@@ -94,12 +104,12 @@ static struct target_fabric_configfs *target_core_get_fabric(
{
struct target_fabric_configfs *tf;
- if (!(name))
+ if (!name)
return NULL;
mutex_lock(&g_tf_lock);
list_for_each_entry(tf, &g_tf_list, tf_list) {
- if (!(strcmp(tf->tf_name, name))) {
+ if (!strcmp(tf->tf_name, name)) {
atomic_inc(&tf->tf_access_cnt);
mutex_unlock(&g_tf_lock);
return tf;
@@ -120,7 +130,7 @@ static struct config_group *target_core_register_fabric(
struct target_fabric_configfs *tf;
int ret;
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
/*
* Ensure that TCM subsystem plugins are loaded at this point for
@@ -140,7 +150,7 @@ static struct config_group *target_core_register_fabric(
* registered, but simply provids auto loading logic for modules with
* mkdir(2) system calls with known TCM fabric modules.
*/
- if (!(strncmp(name, "iscsi", 5))) {
+ if (!strncmp(name, "iscsi", 5)) {
/*
* Automatically load the LIO Target fabric module when the
* following is called:
@@ -149,11 +159,11 @@ static struct config_group *target_core_register_fabric(
*/
ret = request_module("iscsi_target_mod");
if (ret < 0) {
- printk(KERN_ERR "request_module() failed for"
+ pr_err("request_module() failed for"
" iscsi_target_mod.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
- } else if (!(strncmp(name, "loopback", 8))) {
+ } else if (!strncmp(name, "loopback", 8)) {
/*
* Automatically load the tcm_loop fabric module when the
* following is called:
@@ -162,25 +172,25 @@ static struct config_group *target_core_register_fabric(
*/
ret = request_module("tcm_loop");
if (ret < 0) {
- printk(KERN_ERR "request_module() failed for"
+ pr_err("request_module() failed for"
" tcm_loop.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
}
tf = target_core_get_fabric(name);
- if (!(tf)) {
- printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
+ if (!tf) {
+ pr_err("target_core_get_fabric() failed for %s\n",
name);
return ERR_PTR(-EINVAL);
}
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
" %s\n", tf->tf_name);
/*
* On a successful target_core_get_fabric() look, the returned
* struct target_fabric_configfs *tf will contain a usage reference.
*/
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+ pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
&TF_CIT_TMPL(tf)->tfc_wwn_cit);
tf->tf_group.default_groups = tf->tf_default_groups;
@@ -192,14 +202,14 @@ static struct config_group *target_core_register_fabric(
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
&TF_CIT_TMPL(tf)->tfc_discovery_cit);
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
" %s\n", tf->tf_group.cg_item.ci_name);
/*
* Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
*/
tf->tf_ops.tf_subsys = tf->tf_subsys;
tf->tf_fabric = &tf->tf_group.cg_item;
- printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
+ pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
" for %s\n", name);
return &tf->tf_group;
@@ -218,18 +228,18 @@ static void target_core_deregister_fabric(
struct config_item *df_item;
int i;
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
" tf list\n", config_item_name(item));
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
" %s\n", tf->tf_name);
atomic_dec(&tf->tf_access_cnt);
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
" tf->tf_fabric for %s\n", tf->tf_name);
tf->tf_fabric = NULL;
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
" %s\n", config_item_name(item));
tf_group = &tf->tf_group;
@@ -296,23 +306,19 @@ struct target_fabric_configfs *target_fabric_configfs_init(
{
struct target_fabric_configfs *tf;
- if (!(fabric_mod)) {
- printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
- return NULL;
- }
if (!(name)) {
- printk(KERN_ERR "Unable to locate passed fabric name\n");
- return NULL;
+ pr_err("Unable to locate passed fabric name\n");
+ return ERR_PTR(-EINVAL);
}
if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
- printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
+ pr_err("Passed name: %s exceeds TARGET_FABRIC"
"_NAME_SIZE\n", name);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
- if (!(tf))
- return NULL;
+ if (!tf)
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&tf->tf_list);
atomic_set(&tf->tf_access_cnt, 0);
@@ -330,9 +336,9 @@ struct target_fabric_configfs *target_fabric_configfs_init(
list_add_tail(&tf->tf_list, &g_tf_list);
mutex_unlock(&g_tf_lock);
- printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
">>>>>>>>>>>>>>\n");
- printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
+ pr_debug("Initialized struct target_fabric_configfs: %p for"
" %s\n", tf, tf->tf_name);
return tf;
}
@@ -361,140 +367,132 @@ static int target_fabric_tf_ops_check(
{
struct target_core_fabric_ops *tfo = &tf->tf_ops;
- if (!(tfo->get_fabric_name)) {
- printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
- return -EINVAL;
- }
- if (!(tfo->get_fabric_proto_ident)) {
- printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
- return -EINVAL;
- }
- if (!(tfo->tpg_get_wwn)) {
- printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
+ if (!tfo->get_fabric_name) {
+ pr_err("Missing tfo->get_fabric_name()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_tag)) {
- printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
+ if (!tfo->get_fabric_proto_ident) {
+ pr_err("Missing tfo->get_fabric_proto_ident()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_default_depth)) {
- printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
+ if (!tfo->tpg_get_wwn) {
+ pr_err("Missing tfo->tpg_get_wwn()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_pr_transport_id)) {
- printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
+ if (!tfo->tpg_get_tag) {
+ pr_err("Missing tfo->tpg_get_tag()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_pr_transport_id_len)) {
- printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
+ if (!tfo->tpg_get_default_depth) {
+ pr_err("Missing tfo->tpg_get_default_depth()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_demo_mode)) {
- printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
+ if (!tfo->tpg_get_pr_transport_id) {
+ pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_demo_mode_cache)) {
- printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
+ if (!tfo->tpg_get_pr_transport_id_len) {
+ pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_demo_mode_write_protect)) {
- printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
+ if (!tfo->tpg_check_demo_mode) {
+ pr_err("Missing tfo->tpg_check_demo_mode()\n");
return -EINVAL;
}
- if (!(tfo->tpg_check_prod_mode_write_protect)) {
- printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
+ if (!tfo->tpg_check_demo_mode_cache) {
+ pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
return -EINVAL;
}
- if (!(tfo->tpg_alloc_fabric_acl)) {
- printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
+ if (!tfo->tpg_check_demo_mode_write_protect) {
+ pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
return -EINVAL;
}
- if (!(tfo->tpg_release_fabric_acl)) {
- printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
+ if (!tfo->tpg_check_prod_mode_write_protect) {
+ pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
return -EINVAL;
}
- if (!(tfo->tpg_get_inst_index)) {
- printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
+ if (!tfo->tpg_alloc_fabric_acl) {
+ pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
return -EINVAL;
}
- if (!(tfo->release_cmd_to_pool)) {
- printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
+ if (!tfo->tpg_release_fabric_acl) {
+ pr_err("Missing tfo->tpg_release_fabric_acl()\n");
return -EINVAL;
}
- if (!(tfo->release_cmd_direct)) {
- printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
+ if (!tfo->tpg_get_inst_index) {
+ pr_err("Missing tfo->tpg_get_inst_index()\n");
return -EINVAL;
}
- if (!(tfo->shutdown_session)) {
- printk(KERN_ERR "Missing tfo->shutdown_session()\n");
+ if (!tfo->release_cmd) {
+ pr_err("Missing tfo->release_cmd()\n");
return -EINVAL;
}
- if (!(tfo->close_session)) {
- printk(KERN_ERR "Missing tfo->close_session()\n");
+ if (!tfo->shutdown_session) {
+ pr_err("Missing tfo->shutdown_session()\n");
return -EINVAL;
}
- if (!(tfo->stop_session)) {
- printk(KERN_ERR "Missing tfo->stop_session()\n");
+ if (!tfo->close_session) {
+ pr_err("Missing tfo->close_session()\n");
return -EINVAL;
}
- if (!(tfo->fall_back_to_erl0)) {
- printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
+ if (!tfo->stop_session) {
+ pr_err("Missing tfo->stop_session()\n");
return -EINVAL;
}
- if (!(tfo->sess_logged_in)) {
- printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
+ if (!tfo->fall_back_to_erl0) {
+ pr_err("Missing tfo->fall_back_to_erl0()\n");
return -EINVAL;
}
- if (!(tfo->sess_get_index)) {
- printk(KERN_ERR "Missing tfo->sess_get_index()\n");
+ if (!tfo->sess_logged_in) {
+ pr_err("Missing tfo->sess_logged_in()\n");
return -EINVAL;
}
- if (!(tfo->write_pending)) {
- printk(KERN_ERR "Missing tfo->write_pending()\n");
+ if (!tfo->sess_get_index) {
+ pr_err("Missing tfo->sess_get_index()\n");
return -EINVAL;
}
- if (!(tfo->write_pending_status)) {
- printk(KERN_ERR "Missing tfo->write_pending_status()\n");
+ if (!tfo->write_pending) {
+ pr_err("Missing tfo->write_pending()\n");
return -EINVAL;
}
- if (!(tfo->set_default_node_attributes)) {
- printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
+ if (!tfo->write_pending_status) {
+ pr_err("Missing tfo->write_pending_status()\n");
return -EINVAL;
}
- if (!(tfo->get_task_tag)) {
- printk(KERN_ERR "Missing tfo->get_task_tag()\n");
+ if (!tfo->set_default_node_attributes) {
+ pr_err("Missing tfo->set_default_node_attributes()\n");
return -EINVAL;
}
- if (!(tfo->get_cmd_state)) {
- printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
+ if (!tfo->get_task_tag) {
+ pr_err("Missing tfo->get_task_tag()\n");
return -EINVAL;
}
- if (!(tfo->new_cmd_failure)) {
- printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
+ if (!tfo->get_cmd_state) {
+ pr_err("Missing tfo->get_cmd_state()\n");
return -EINVAL;
}
- if (!(tfo->queue_data_in)) {
- printk(KERN_ERR "Missing tfo->queue_data_in()\n");
+ if (!tfo->queue_data_in) {
+ pr_err("Missing tfo->queue_data_in()\n");
return -EINVAL;
}
- if (!(tfo->queue_status)) {
- printk(KERN_ERR "Missing tfo->queue_status()\n");
+ if (!tfo->queue_status) {
+ pr_err("Missing tfo->queue_status()\n");
return -EINVAL;
}
- if (!(tfo->queue_tm_rsp)) {
- printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
+ if (!tfo->queue_tm_rsp) {
+ pr_err("Missing tfo->queue_tm_rsp()\n");
return -EINVAL;
}
- if (!(tfo->set_fabric_sense_len)) {
- printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
+ if (!tfo->set_fabric_sense_len) {
+ pr_err("Missing tfo->set_fabric_sense_len()\n");
return -EINVAL;
}
- if (!(tfo->get_fabric_sense_len)) {
- printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
+ if (!tfo->get_fabric_sense_len) {
+ pr_err("Missing tfo->get_fabric_sense_len()\n");
return -EINVAL;
}
- if (!(tfo->is_state_remove)) {
- printk(KERN_ERR "Missing tfo->is_state_remove()\n");
+ if (!tfo->is_state_remove) {
+ pr_err("Missing tfo->is_state_remove()\n");
return -EINVAL;
}
/*
@@ -502,20 +500,20 @@ static int target_fabric_tf_ops_check(
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
* target_core_fabric_configfs.c WWN+TPG group context code.
*/
- if (!(tfo->fabric_make_wwn)) {
- printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
+ if (!tfo->fabric_make_wwn) {
+ pr_err("Missing tfo->fabric_make_wwn()\n");
return -EINVAL;
}
- if (!(tfo->fabric_drop_wwn)) {
- printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
+ if (!tfo->fabric_drop_wwn) {
+ pr_err("Missing tfo->fabric_drop_wwn()\n");
return -EINVAL;
}
- if (!(tfo->fabric_make_tpg)) {
- printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
+ if (!tfo->fabric_make_tpg) {
+ pr_err("Missing tfo->fabric_make_tpg()\n");
return -EINVAL;
}
- if (!(tfo->fabric_drop_tpg)) {
- printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
+ if (!tfo->fabric_drop_tpg) {
+ pr_err("Missing tfo->fabric_drop_tpg()\n");
return -EINVAL;
}
@@ -533,22 +531,15 @@ static int target_fabric_tf_ops_check(
int target_fabric_configfs_register(
struct target_fabric_configfs *tf)
{
- struct config_group *su_group;
int ret;
- if (!(tf)) {
- printk(KERN_ERR "Unable to locate target_fabric_configfs"
+ if (!tf) {
+ pr_err("Unable to locate target_fabric_configfs"
" pointer\n");
return -EINVAL;
}
- if (!(tf->tf_subsys)) {
- printk(KERN_ERR "Unable to target struct config_subsystem"
- " pointer\n");
- return -EINVAL;
- }
- su_group = &tf->tf_subsys->su_group;
- if (!(su_group)) {
- printk(KERN_ERR "Unable to locate target struct config_group"
+ if (!tf->tf_subsys) {
+ pr_err("Unable to target struct config_subsystem"
" pointer\n");
return -EINVAL;
}
@@ -556,7 +547,7 @@ int target_fabric_configfs_register(
if (ret < 0)
return ret;
- printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
">>>>>>>>>>\n");
return 0;
}
@@ -565,48 +556,39 @@ EXPORT_SYMBOL(target_fabric_configfs_register);
void target_fabric_configfs_deregister(
struct target_fabric_configfs *tf)
{
- struct config_group *su_group;
struct configfs_subsystem *su;
- if (!(tf)) {
- printk(KERN_ERR "Unable to locate passed target_fabric_"
+ if (!tf) {
+ pr_err("Unable to locate passed target_fabric_"
"configfs\n");
return;
}
su = tf->tf_subsys;
- if (!(su)) {
- printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
+ if (!su) {
+ pr_err("Unable to locate passed tf->tf_subsys"
" pointer\n");
return;
}
- su_group = &tf->tf_subsys->su_group;
- if (!(su_group)) {
- printk(KERN_ERR "Unable to locate target struct config_group"
- " pointer\n");
- return;
- }
-
- printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
">>>>>>>>>>>>\n");
mutex_lock(&g_tf_lock);
if (atomic_read(&tf->tf_access_cnt)) {
mutex_unlock(&g_tf_lock);
- printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
+ pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
tf->tf_name);
BUG();
}
list_del(&tf->tf_list);
mutex_unlock(&g_tf_lock);
- printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
+ pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
" %s\n", tf->tf_name);
tf->tf_module = NULL;
tf->tf_subsys = NULL;
kfree(tf);
- printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
+ pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
">>>>>\n");
- return;
}
EXPORT_SYMBOL(target_fabric_configfs_deregister);
@@ -627,11 +609,12 @@ static ssize_t target_core_dev_show_attr_##_name( \
\
spin_lock(&se_dev->se_dev_lock); \
dev = se_dev->se_dev_ptr; \
- if (!(dev)) { \
+ if (!dev) { \
spin_unlock(&se_dev->se_dev_lock); \
return -ENODEV; \
} \
- rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
+ rb = snprintf(page, PAGE_SIZE, "%u\n", \
+ (u32)dev->se_sub_dev->se_dev_attrib._name); \
spin_unlock(&se_dev->se_dev_lock); \
\
return rb; \
@@ -650,14 +633,14 @@ static ssize_t target_core_dev_store_attr_##_name( \
\
spin_lock(&se_dev->se_dev_lock); \
dev = se_dev->se_dev_ptr; \
- if (!(dev)) { \
+ if (!dev) { \
spin_unlock(&se_dev->se_dev_lock); \
return -ENODEV; \
} \
ret = strict_strtoul(page, 0, &val); \
if (ret < 0) { \
spin_unlock(&se_dev->se_dev_lock); \
- printk(KERN_ERR "strict_strtoul() failed with" \
+ pr_err("strict_strtoul() failed with" \
" ret: %d\n", ret); \
return -EINVAL; \
} \
@@ -715,6 +698,12 @@ SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(enforce_pr_isids);
SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
+DEF_DEV_ATTRIB(is_nonrot);
+SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
+
+DEF_DEV_ATTRIB(emulate_rest_reord);
+SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
+
DEF_DEV_ATTRIB_RO(hw_block_size);
SE_DEV_ATTR_RO(hw_block_size);
@@ -763,6 +752,8 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_emulate_tpu.attr,
&target_core_dev_attrib_emulate_tpws.attr,
&target_core_dev_attrib_enforce_pr_isids.attr,
+ &target_core_dev_attrib_is_nonrot.attr,
+ &target_core_dev_attrib_emulate_rest_reord.attr,
&target_core_dev_attrib_hw_block_size.attr,
&target_core_dev_attrib_block_size.attr,
&target_core_dev_attrib_hw_max_sectors.attr,
@@ -819,7 +810,7 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
struct se_device *dev;
dev = se_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
@@ -846,13 +837,13 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* VPD Unit Serial Number that OS dependent multipath can depend on.
*/
if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
- printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
+ pr_err("Underlying SCSI device firmware provided VPD"
" Unit Serial, ignoring request\n");
return -EOPNOTSUPP;
}
if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
- printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
+ pr_err("Emulated VPD Unit Serial exceeds"
" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
return -EOVERFLOW;
}
@@ -863,9 +854,9 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
* could cause negative effects.
*/
dev = su_dev->se_dev_ptr;
- if ((dev)) {
+ if (dev) {
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "Unable to set VPD Unit Serial while"
+ pr_err("Unable to set VPD Unit Serial while"
" active %d $FABRIC_MOD exports exist\n",
atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
@@ -883,7 +874,7 @@ static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
"%s", strstrip(buf));
su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
- printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+ pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
" %s\n", su_dev->t10_wwn.unit_serial);
return count;
@@ -905,19 +896,19 @@ static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
ssize_t len = 0;
dev = se_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
memset(buf, 0, VPD_TMP_BUF_SIZE);
spin_lock(&t10_wwn->t10_vpd_lock);
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
- if (!(vpd->protocol_identifier_set))
+ if (!vpd->protocol_identifier_set)
continue;
transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
- if ((len + strlen(buf) >= PAGE_SIZE))
+ if (len + strlen(buf) >= PAGE_SIZE)
break;
len += sprintf(page+len, "%s", buf);
@@ -952,7 +943,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
ssize_t len = 0; \
\
dev = se_dev->se_dev_ptr; \
- if (!(dev)) \
+ if (!dev) \
return -ENODEV; \
\
spin_lock(&t10_wwn->t10_vpd_lock); \
@@ -962,19 +953,19 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) >= PAGE_SIZE)) \
+ if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) >= PAGE_SIZE)) \
+ if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
- if ((len + strlen(buf) >= PAGE_SIZE)) \
+ if (len + strlen(buf) >= PAGE_SIZE) \
break; \
len += sprintf(page+len, "%s", buf); \
} \
@@ -984,7 +975,7 @@ static ssize_t target_core_dev_wwn_show_attr_##_name( \
}
/*
- * VPD page 0x83 Assoication: Logical Unit
+ * VPD page 0x83 Association: Logical Unit
*/
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
@@ -1083,7 +1074,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
*len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return *len;
@@ -1093,7 +1084,7 @@ static ssize_t target_core_dev_pr_show_spc3_res(
PR_REG_ISID_ID_LEN);
*len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
- TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+ se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
spin_unlock(&dev->dev_reservation_lock);
@@ -1109,13 +1100,13 @@ static ssize_t target_core_dev_pr_show_spc2_res(
spin_lock(&dev->dev_reservation_lock);
se_nacl = dev->dev_reserved_node_acl;
- if (!(se_nacl)) {
+ if (!se_nacl) {
*len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return *len;
}
*len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
- TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
+ se_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
se_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -1128,10 +1119,10 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(
{
ssize_t len = 0;
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- switch (T10_RES(su_dev)->res_type) {
+ switch (su_dev->t10_pr.res_type) {
case SPC3_PERSISTENT_RESERVATIONS:
target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
page, &len);
@@ -1165,15 +1156,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
@@ -1202,13 +1193,13 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
struct se_subsystem_dev *su_dev,
char *page)
{
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
- return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
+ return sprintf(page, "0x%08x\n", su_dev->t10_pr.pr_generation);
}
SE_DEV_PR_ATTR_RO(res_pr_generation);
@@ -1229,15 +1220,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
@@ -1245,7 +1236,7 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
lun = pr_reg->pr_reg_tg_pt_lun;
- tfo = TPG_TFO(se_tpg);
+ tfo = se_tpg->se_tpg_tfo;
len += sprintf(page+len, "SPC-3 Reservation: %s"
" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
@@ -1276,16 +1267,16 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
ssize_t len = 0;
int reg_count = 0, prf_isid;
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
- spin_lock(&T10_RES(su_dev)->registration_lock);
- list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ spin_lock(&su_dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
pr_reg_list) {
memset(buf, 0, 384);
@@ -1299,15 +1290,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
&i_buf[0] : "", pr_reg->pr_res_key,
pr_reg->pr_res_generation);
- if ((len + strlen(buf) >= PAGE_SIZE))
+ if (len + strlen(buf) >= PAGE_SIZE)
break;
len += sprintf(page+len, "%s", buf);
reg_count++;
}
- spin_unlock(&T10_RES(su_dev)->registration_lock);
+ spin_unlock(&su_dev->t10_pr.registration_lock);
- if (!(reg_count))
+ if (!reg_count)
len += sprintf(page+len, "None\n");
return len;
@@ -1327,15 +1318,15 @@ static ssize_t target_core_dev_pr_show_attr_res_pr_type(
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
- if (!(pr_reg)) {
+ if (!pr_reg) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
@@ -1358,10 +1349,10 @@ static ssize_t target_core_dev_pr_show_attr_res_type(
{
ssize_t len = 0;
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- switch (T10_RES(su_dev)->res_type) {
+ switch (su_dev->t10_pr.res_type) {
case SPC3_PERSISTENT_RESERVATIONS:
len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
break;
@@ -1389,14 +1380,14 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
struct se_subsystem_dev *su_dev,
char *page)
{
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
- (T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
+ (su_dev->t10_pr.pr_aptpl_active) ? "Activated" : "Disabled");
}
SE_DEV_PR_ATTR_RO(res_aptpl_active);
@@ -1408,10 +1399,10 @@ static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
struct se_subsystem_dev *su_dev,
char *page)
{
- if (!(su_dev->se_dev_ptr))
+ if (!su_dev->se_dev_ptr)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1460,14 +1451,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
u8 type = 0, scope;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_INFO "Unable to process APTPL metadata while"
+ pr_debug("Unable to process APTPL metadata while"
" active fabric exports exist\n");
return -EINVAL;
}
@@ -1497,7 +1488,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
goto out;
}
if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
- printk(KERN_ERR "APTPL metadata initiator_node="
+ pr_err("APTPL metadata initiator_node="
" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
PR_APTPL_MAX_IPORT_LEN);
ret = -EINVAL;
@@ -1511,7 +1502,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
goto out;
}
if (strlen(isid) >= PR_REG_ISID_LEN) {
- printk(KERN_ERR "APTPL metadata initiator_isid"
+ pr_err("APTPL metadata initiator_isid"
"= exceeds PR_REG_ISID_LEN: %d\n",
PR_REG_ISID_LEN);
ret = -EINVAL;
@@ -1526,7 +1517,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
ret = strict_strtoull(arg_p, 0, &tmp_ll);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoull() failed for"
+ pr_err("strict_strtoull() failed for"
" sa_res_key=\n");
goto out;
}
@@ -1572,7 +1563,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
goto out;
}
if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
- printk(KERN_ERR "APTPL metadata target_node="
+ pr_err("APTPL metadata target_node="
" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
PR_APTPL_MAX_TPORT_LEN);
ret = -EINVAL;
@@ -1596,20 +1587,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
}
}
- if (!(i_port) || !(t_port) || !(sa_res_key)) {
- printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+ if (!i_port || !t_port || !sa_res_key) {
+ pr_err("Illegal parameters for APTPL registration\n");
ret = -EINVAL;
goto out;
}
if (res_holder && !(type)) {
- printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
+ pr_err("Illegal PR type: 0x%02x for reservation"
" holder\n", type);
ret = -EINVAL;
goto out;
}
- ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
+ ret = core_scsi3_alloc_aptpl_registration(&su_dev->t10_pr, sa_res_key,
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
@@ -1662,7 +1653,7 @@ static ssize_t target_core_show_dev_info(void *p, char *page)
int bl = 0;
ssize_t read_bytes = 0;
- if (!(se_dev->se_dev_ptr))
+ if (!se_dev->se_dev_ptr)
return -ENODEV;
transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
@@ -1688,8 +1679,8 @@ static ssize_t target_core_store_dev_control(
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
- if (!(se_dev->se_dev_su_ptr)) {
- printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
+ if (!se_dev->se_dev_su_ptr) {
+ pr_err("Unable to locate struct se_subsystem_dev>se"
"_dev_su_ptr\n");
return -EINVAL;
}
@@ -1725,7 +1716,7 @@ static ssize_t target_core_store_dev_alias(
ssize_t read_bytes;
if (count > (SE_DEV_ALIAS_LEN-1)) {
- printk(KERN_ERR "alias count: %d exceeds"
+ pr_err("alias count: %d exceeds"
" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
SE_DEV_ALIAS_LEN-1);
return -EINVAL;
@@ -1735,7 +1726,7 @@ static ssize_t target_core_store_dev_alias(
read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
"%s", page);
- printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
+ pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
se_dev->se_dev_alias);
@@ -1771,7 +1762,7 @@ static ssize_t target_core_store_dev_udev_path(
ssize_t read_bytes;
if (count > (SE_UDEV_PATH_LEN-1)) {
- printk(KERN_ERR "udev_path count: %d exceeds"
+ pr_err("udev_path count: %d exceeds"
" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
SE_UDEV_PATH_LEN-1);
return -EINVAL;
@@ -1781,7 +1772,7 @@ static ssize_t target_core_store_dev_udev_path(
read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
- printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+ pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
se_dev->se_dev_udev_path);
@@ -1809,13 +1800,13 @@ static ssize_t target_core_store_dev_enable(
char *ptr;
ptr = strstr(page, "1");
- if (!(ptr)) {
- printk(KERN_ERR "For dev_enable ops, only valid value"
+ if (!ptr) {
+ pr_err("For dev_enable ops, only valid value"
" is \"1\"\n");
return -EINVAL;
}
- if ((se_dev->se_dev_ptr)) {
- printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
+ if (se_dev->se_dev_ptr) {
+ pr_err("se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
}
@@ -1830,7 +1821,7 @@ static ssize_t target_core_store_dev_enable(
return -EINVAL;
se_dev->se_dev_ptr = dev;
- printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
+ pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
" %p\n", se_dev->se_dev_ptr);
return count;
@@ -1854,22 +1845,22 @@ static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
+ if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
return len;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem)) {
- printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+ if (!lu_gp_mem) {
+ pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
" pointer\n");
return -EINVAL;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if ((lu_gp)) {
+ if (lu_gp) {
lu_ci = &lu_gp->lu_gp_group.cg_item;
len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
config_item_name(lu_ci), lu_gp->lu_gp_id);
@@ -1893,17 +1884,17 @@ static ssize_t target_core_store_alua_lu_gp(
int move = 0;
dev = su_dev->se_dev_ptr;
- if (!(dev))
+ if (!dev)
return -ENODEV;
- if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
- printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
+ if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
+ pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&su_dev->se_dev_group.cg_item));
return -EINVAL;
}
if (count > LU_GROUP_NAME_BUF) {
- printk(KERN_ERR "ALUA LU Group Alias too large!\n");
+ pr_err("ALUA LU Group Alias too large!\n");
return -EINVAL;
}
memset(buf, 0, LU_GROUP_NAME_BUF);
@@ -1919,27 +1910,27 @@ static ssize_t target_core_store_alua_lu_gp(
* core_alua_get_lu_gp_by_name below().
*/
lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
- if (!(lu_gp_new))
+ if (!lu_gp_new)
return -ENODEV;
}
lu_gp_mem = dev->dev_alua_lu_gp_mem;
- if (!(lu_gp_mem)) {
+ if (!lu_gp_mem) {
if (lu_gp_new)
core_alua_put_lu_gp_from_name(lu_gp_new);
- printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+ pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
" pointer\n");
return -EINVAL;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
- if ((lu_gp)) {
+ if (lu_gp) {
/*
* Clearing an existing lu_gp association, and replacing
* with NULL
*/
- if (!(lu_gp_new)) {
- printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
+ if (!lu_gp_new) {
+ pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
" %hu\n",
config_item_name(&hba->hba_group.cg_item),
@@ -1964,7 +1955,7 @@ static ssize_t target_core_store_alua_lu_gp(
__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
- printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+ pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
" core/alua/lu_gps/%s, ID: %hu\n",
(move) ? "Moving" : "Adding",
config_item_name(&hba->hba_group.cg_item),
@@ -2008,7 +1999,7 @@ static void target_core_dev_release(struct config_item *item)
*`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
*/
if (se_dev->se_dev_ptr) {
- printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+ pr_debug("Target_Core_ConfigFS: Calling se_free_"
"virtual_device() for se_dev_ptr: %p\n",
se_dev->se_dev_ptr);
@@ -2017,14 +2008,14 @@ static void target_core_dev_release(struct config_item *item)
/*
* Release struct se_subsystem_dev->se_dev_su_ptr..
*/
- printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+ pr_debug("Target_Core_ConfigFS: Calling t->free_"
"device() for se_dev_su_ptr: %p\n",
se_dev->se_dev_su_ptr);
t->free_device(se_dev->se_dev_su_ptr);
}
- printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+ pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
"_dev_t: %p\n", se_dev);
kfree(se_dev);
}
@@ -2039,10 +2030,10 @@ static ssize_t target_core_dev_show(struct config_item *item,
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
- if (!(tc_attr->show))
+ if (!tc_attr->show)
return -EINVAL;
- return tc_attr->show((void *)se_dev, page);
+ return tc_attr->show(se_dev, page);
}
static ssize_t target_core_dev_store(struct config_item *item,
@@ -2055,10 +2046,10 @@ static ssize_t target_core_dev_store(struct config_item *item,
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
- if (!(tc_attr->store))
+ if (!tc_attr->store)
return -EINVAL;
- return tc_attr->store((void *)se_dev, page, count);
+ return tc_attr->store(se_dev, page, count);
}
static struct configfs_item_operations target_core_dev_item_ops = {
@@ -2098,7 +2089,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
struct t10_alua_lu_gp *lu_gp,
char *page)
{
- if (!(lu_gp->lu_gp_valid_id))
+ if (!lu_gp->lu_gp_valid_id)
return 0;
return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
@@ -2115,12 +2106,12 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
ret = strict_strtoul(page, 0, &lu_gp_id);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoul() returned %d for"
+ pr_err("strict_strtoul() returned %d for"
" lu_gp_id\n", ret);
return -EINVAL;
}
if (lu_gp_id > 0x0000ffff) {
- printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
+ pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
" 0x0000ffff\n", lu_gp_id);
return -EINVAL;
}
@@ -2129,7 +2120,7 @@ static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
if (ret < 0)
return -EINVAL;
- printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
+ pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
" Group: core/alua/lu_gps/%s to ID: %hu\n",
config_item_name(&alua_lu_gp_cg->cg_item),
lu_gp->lu_gp_id);
@@ -2167,7 +2158,7 @@ static ssize_t target_core_alua_lu_gp_show_attr_members(
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
- printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+ pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
@@ -2231,7 +2222,7 @@ static struct config_group *target_core_alua_create_lu_gp(
config_group_init_type_name(alua_lu_gp_cg, name,
&target_core_alua_lu_gp_cit);
- printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+ pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
" Group: core/alua/lu_gps/%s\n",
config_item_name(alua_lu_gp_ci));
@@ -2246,7 +2237,7 @@ static void target_core_alua_drop_lu_gp(
struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
struct t10_alua_lu_gp, lu_gp_group);
- printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+ pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
" Group: core/alua/lu_gps/%s, ID: %hu\n",
config_item_name(item), lu_gp->lu_gp_id);
/*
@@ -2305,22 +2296,22 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
unsigned long tmp;
int new_state, ret;
- if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
- printk(KERN_ERR "Unable to do implict ALUA on non valid"
+ if (!tg_pt_gp->tg_pt_gp_valid_id) {
+ pr_err("Unable to do implict ALUA on non valid"
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk("Unable to extract new ALUA access state from"
+ pr_err("Unable to extract new ALUA access state from"
" %s\n", page);
return -EINVAL;
}
new_state = (int)tmp;
if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
- printk(KERN_ERR "Unable to process implict configfs ALUA"
+ pr_err("Unable to process implict configfs ALUA"
" transition while TPGS_IMPLICT_ALUA is diabled\n");
return -EINVAL;
}
@@ -2351,8 +2342,8 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
unsigned long tmp;
int new_status, ret;
- if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
- printk(KERN_ERR "Unable to do set ALUA access status on non"
+ if (!tg_pt_gp->tg_pt_gp_valid_id) {
+ pr_err("Unable to do set ALUA access status on non"
" valid tg_pt_gp ID: %hu\n",
tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
@@ -2360,7 +2351,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract new ALUA access status"
+ pr_err("Unable to extract new ALUA access status"
" from %s\n", page);
return -EINVAL;
}
@@ -2369,7 +2360,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
if ((new_status != ALUA_STATUS_NONE) &&
(new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
(new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
- printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
+ pr_err("Illegal ALUA access status: 0x%02x\n",
new_status);
return -EINVAL;
}
@@ -2420,12 +2411,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract alua_write_metadata\n");
+ pr_err("Unable to extract alua_write_metadata\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
- printk(KERN_ERR "Illegal value for alua_write_metadata:"
+ pr_err("Illegal value for alua_write_metadata:"
" %lu\n", tmp);
return -EINVAL;
}
@@ -2507,7 +2498,7 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
- if (!(tg_pt_gp->tg_pt_gp_valid_id))
+ if (!tg_pt_gp->tg_pt_gp_valid_id)
return 0;
return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
@@ -2524,12 +2515,12 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
ret = strict_strtoul(page, 0, &tg_pt_gp_id);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoul() returned %d for"
+ pr_err("strict_strtoul() returned %d for"
" tg_pt_gp_id\n", ret);
return -EINVAL;
}
if (tg_pt_gp_id > 0x0000ffff) {
- printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
+ pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
" 0x0000ffff\n", tg_pt_gp_id);
return -EINVAL;
}
@@ -2538,7 +2529,7 @@ static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
if (ret < 0)
return -EINVAL;
- printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
+ pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
"core/alua/tg_pt_gps/%s to ID: %hu\n",
config_item_name(&alua_tg_pt_gp_cg->cg_item),
tg_pt_gp->tg_pt_gp_id);
@@ -2572,14 +2563,14 @@ static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
lun = port->sep_lun;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
- "/%s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ "/%s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
- printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+ pr_warn("Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
@@ -2645,7 +2636,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
struct config_item *alua_tg_pt_gp_ci = NULL;
tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
- if (!(tg_pt_gp))
+ if (!tg_pt_gp)
return NULL;
alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
@@ -2654,7 +2645,7 @@ static struct config_group *target_core_alua_create_tg_pt_gp(
config_group_init_type_name(alua_tg_pt_gp_cg, name,
&target_core_alua_tg_pt_gp_cit);
- printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
+ pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
" Group: alua/tg_pt_gps/%s\n",
config_item_name(alua_tg_pt_gp_ci));
@@ -2668,7 +2659,7 @@ static void target_core_alua_drop_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
struct t10_alua_tg_pt_gp, tg_pt_gp_group);
- printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
+ pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
" Group: alua/tg_pt_gps/%s, ID: %hu\n",
config_item_name(item), tg_pt_gp->tg_pt_gp_id);
/*
@@ -2759,21 +2750,21 @@ static struct config_group *target_core_make_subdev(
se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
if (!se_dev) {
- printk(KERN_ERR "Unable to allocate memory for"
+ pr_err("Unable to allocate memory for"
" struct se_subsystem_dev\n");
goto unlock;
}
- INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+ INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_reservation.registration_lock);
- spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+ INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
+ INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
+ spin_lock_init(&se_dev->t10_pr.registration_lock);
+ spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
se_dev->t10_wwn.t10_sub_dev = se_dev;
se_dev->t10_alua.t10_sub_dev = se_dev;
se_dev->se_dev_attrib.da_sub_dev = se_dev;
@@ -2783,7 +2774,7 @@ static struct config_group *target_core_make_subdev(
dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
GFP_KERNEL);
- if (!(dev_cg->default_groups))
+ if (!dev_cg->default_groups)
goto out;
/*
* Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
@@ -2794,14 +2785,14 @@ static struct config_group *target_core_make_subdev(
* configfs tree for device object's struct config_group.
*/
se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
- if (!(se_dev->se_dev_su_ptr)) {
- printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+ if (!se_dev->se_dev_su_ptr) {
+ pr_err("Unable to locate subsystem dependent pointer"
" from allocate_virtdevice()\n");
goto out;
}
- spin_lock(&se_global->g_device_lock);
- list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
- spin_unlock(&se_global->g_device_lock);
+ spin_lock(&se_device_lock);
+ list_add_tail(&se_dev->se_dev_node, &se_dev_list);
+ spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
@@ -2826,14 +2817,14 @@ static struct config_group *target_core_make_subdev(
* Add core/$HBA/$DEV/alua/default_tg_pt_gp
*/
tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
- if (!(tg_pt_gp))
+ if (!tg_pt_gp)
goto out;
- tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+ tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(tg_pt_gp_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
+ if (!tg_pt_gp_cg->default_groups) {
+ pr_err("Unable to allocate tg_pt_gp_cg->"
"default_groups\n");
goto out;
}
@@ -2842,28 +2833,28 @@ static struct config_group *target_core_make_subdev(
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
tg_pt_gp_cg->default_groups[1] = NULL;
- T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
+ se_dev->t10_alua.default_tg_pt_gp = tg_pt_gp;
/*
* Add core/$HBA/$DEV/statistics/ default groups
*/
- dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
+ dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
GFP_KERNEL);
if (!dev_stat_grp->default_groups) {
- printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n");
+ pr_err("Unable to allocate dev_stat_grp->default_groups\n");
goto out;
}
target_stat_setup_dev_default_groups(se_dev);
- printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
+ pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
mutex_unlock(&hba->hba_access_mutex);
return &se_dev->se_dev_group;
out:
- if (T10_ALUA(se_dev)->default_tg_pt_gp) {
- core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
- T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+ if (se_dev->t10_alua.default_tg_pt_gp) {
+ core_alua_free_tg_pt_gp(se_dev->t10_alua.default_tg_pt_gp);
+ se_dev->t10_alua.default_tg_pt_gp = NULL;
}
if (dev_stat_grp)
kfree(dev_stat_grp->default_groups);
@@ -2896,11 +2887,11 @@ static void target_core_drop_subdev(
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
- spin_lock(&se_global->g_device_lock);
- list_del(&se_dev->g_se_dev_list);
- spin_unlock(&se_global->g_device_lock);
+ spin_lock(&se_device_lock);
+ list_del(&se_dev->se_dev_node);
+ spin_unlock(&se_device_lock);
- dev_stat_grp = &DEV_STAT_GRP(se_dev)->stat_group;
+ dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
dev_stat_grp->default_groups[i] = NULL;
@@ -2908,7 +2899,7 @@ static void target_core_drop_subdev(
}
kfree(dev_stat_grp->default_groups);
- tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
+ tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
tg_pt_gp_cg->default_groups[i] = NULL;
@@ -2919,7 +2910,7 @@ static void target_core_drop_subdev(
* core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
* directly from target_core_alua_tg_pt_gp_release().
*/
- T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
+ se_dev->t10_alua.default_tg_pt_gp = NULL;
dev_cg = &se_dev->se_dev_group;
for (i = 0; dev_cg->default_groups[i]; i++) {
@@ -2988,13 +2979,13 @@ static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
ret = strict_strtoul(page, 0, &mode_flag);
if (ret < 0) {
- printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
+ pr_err("Unable to extract hba mode flag: %d\n", ret);
return -EINVAL;
}
spin_lock(&hba->device_lock);
- if (!(list_empty(&hba->hba_dev_list))) {
- printk(KERN_ERR "Unable to set hba_mode with active devices\n");
+ if (!list_empty(&hba->hba_dev_list)) {
+ pr_err("Unable to set hba_mode with active devices\n");
spin_unlock(&hba->device_lock);
return -EINVAL;
}
@@ -3053,7 +3044,7 @@ static struct config_group *target_core_call_addhbatotarget(
memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
- printk(KERN_ERR "Passed *name strlen(): %d exceeds"
+ pr_err("Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
TARGET_CORE_NAME_MAX_LEN);
return ERR_PTR(-ENAMETOOLONG);
@@ -3061,8 +3052,8 @@ static struct config_group *target_core_call_addhbatotarget(
snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
str = strstr(buf, "_");
- if (!(str)) {
- printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+ if (!str) {
+ pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
return ERR_PTR(-EINVAL);
}
se_plugin_str = buf;
@@ -3071,7 +3062,7 @@ static struct config_group *target_core_call_addhbatotarget(
* Namely rd_direct and rd_mcp..
*/
str2 = strstr(str+1, "_");
- if ((str2)) {
+ if (str2) {
*str2 = '\0'; /* Terminate for *se_plugin_str */
str2++; /* Skip to start of plugin dependent ID */
str = str2;
@@ -3082,7 +3073,7 @@ static struct config_group *target_core_call_addhbatotarget(
ret = strict_strtoul(str, 0, &plugin_dep_id);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoul() returned %d for"
+ pr_err("strict_strtoul() returned %d for"
" plugin_dep_id\n", ret);
return ERR_PTR(-EINVAL);
}
@@ -3135,7 +3126,7 @@ static int __init target_core_init_configfs(void)
struct t10_alua_lu_gp *lu_gp;
int ret;
- printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
+ pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
" Engine: %s on %s/%s on "UTS_RELEASE"\n",
TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
@@ -3145,10 +3136,9 @@ static int __init target_core_init_configfs(void)
INIT_LIST_HEAD(&g_tf_list);
mutex_init(&g_tf_lock);
- init_scsi_index_table();
- ret = init_se_global();
+ ret = init_se_kmem_caches();
if (ret < 0)
- return -1;
+ return ret;
/*
* Create $CONFIGFS/target/core default group for HBA <-> Storage Object
* and ALUA Logical Unit Group and Target Port Group infrastructure.
@@ -3156,44 +3146,44 @@ static int __init target_core_init_configfs(void)
target_cg = &subsys->su_group;
target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(target_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
+ if (!target_cg->default_groups) {
+ pr_err("Unable to allocate target_cg->default_groups\n");
goto out_global;
}
- config_group_init_type_name(&se_global->target_core_hbagroup,
+ config_group_init_type_name(&target_core_hbagroup,
"core", &target_core_cit);
- target_cg->default_groups[0] = &se_global->target_core_hbagroup;
+ target_cg->default_groups[0] = &target_core_hbagroup;
target_cg->default_groups[1] = NULL;
/*
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
- hba_cg = &se_global->target_core_hbagroup;
+ hba_cg = &target_core_hbagroup;
hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(hba_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
+ if (!hba_cg->default_groups) {
+ pr_err("Unable to allocate hba_cg->default_groups\n");
goto out_global;
}
- config_group_init_type_name(&se_global->alua_group,
+ config_group_init_type_name(&alua_group,
"alua", &target_core_alua_cit);
- hba_cg->default_groups[0] = &se_global->alua_group;
+ hba_cg->default_groups[0] = &alua_group;
hba_cg->default_groups[1] = NULL;
/*
* Add ALUA Logical Unit Group and Target Port Group ConfigFS
* groups under /sys/kernel/config/target/core/alua/
*/
- alua_cg = &se_global->alua_group;
+ alua_cg = &alua_group;
alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(alua_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
+ if (!alua_cg->default_groups) {
+ pr_err("Unable to allocate alua_cg->default_groups\n");
goto out_global;
}
- config_group_init_type_name(&se_global->alua_lu_gps_group,
+ config_group_init_type_name(&alua_lu_gps_group,
"lu_gps", &target_core_alua_lu_gps_cit);
- alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
+ alua_cg->default_groups[0] = &alua_lu_gps_group;
alua_cg->default_groups[1] = NULL;
/*
* Add core/alua/lu_gps/default_lu_gp
@@ -3202,11 +3192,11 @@ static int __init target_core_init_configfs(void)
if (IS_ERR(lu_gp))
goto out_global;
- lu_gp_cg = &se_global->alua_lu_gps_group;
+ lu_gp_cg = &alua_lu_gps_group;
lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
- if (!(lu_gp_cg->default_groups)) {
- printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
+ if (!lu_gp_cg->default_groups) {
+ pr_err("Unable to allocate lu_gp_cg->default_groups\n");
goto out_global;
}
@@ -3214,17 +3204,17 @@ static int __init target_core_init_configfs(void)
&target_core_alua_lu_gp_cit);
lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
lu_gp_cg->default_groups[1] = NULL;
- se_global->default_lu_gp = lu_gp;
+ default_lu_gp = lu_gp;
/*
* Register the target_core_mod subsystem with configfs.
*/
ret = configfs_register_subsystem(subsys);
if (ret < 0) {
- printk(KERN_ERR "Error %d while registering subsystem %s\n",
+ pr_err("Error %d while registering subsystem %s\n",
ret, subsys->su_group.cg_item.ci_namebuf);
goto out_global;
}
- printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
+ pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
" Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
/*
@@ -3244,9 +3234,9 @@ out:
core_dev_release_virtual_lun0();
rd_module_exit();
out_global:
- if (se_global->default_lu_gp) {
- core_alua_free_lu_gp(se_global->default_lu_gp);
- se_global->default_lu_gp = NULL;
+ if (default_lu_gp) {
+ core_alua_free_lu_gp(default_lu_gp);
+ default_lu_gp = NULL;
}
if (lu_gp_cg)
kfree(lu_gp_cg->default_groups);
@@ -3255,8 +3245,8 @@ out_global:
if (hba_cg)
kfree(hba_cg->default_groups);
kfree(target_cg->default_groups);
- release_se_global();
- return -1;
+ release_se_kmem_caches();
+ return ret;
}
static void __exit target_core_exit_configfs(void)
@@ -3266,10 +3256,9 @@ static void __exit target_core_exit_configfs(void)
struct config_item *item;
int i;
- se_global->in_shutdown = 1;
subsys = target_core_subsystem[0];
- lu_gp_cg = &se_global->alua_lu_gps_group;
+ lu_gp_cg = &alua_lu_gps_group;
for (i = 0; lu_gp_cg->default_groups[i]; i++) {
item = &lu_gp_cg->default_groups[i]->cg_item;
lu_gp_cg->default_groups[i] = NULL;
@@ -3278,7 +3267,7 @@ static void __exit target_core_exit_configfs(void)
kfree(lu_gp_cg->default_groups);
lu_gp_cg->default_groups = NULL;
- alua_cg = &se_global->alua_group;
+ alua_cg = &alua_group;
for (i = 0; alua_cg->default_groups[i]; i++) {
item = &alua_cg->default_groups[i]->cg_item;
alua_cg->default_groups[i] = NULL;
@@ -3287,7 +3276,7 @@ static void __exit target_core_exit_configfs(void)
kfree(alua_cg->default_groups);
alua_cg->default_groups = NULL;
- hba_cg = &se_global->target_core_hbagroup;
+ hba_cg = &target_core_hbagroup;
for (i = 0; hba_cg->default_groups[i]; i++) {
item = &hba_cg->default_groups[i]->cg_item;
hba_cg->default_groups[i] = NULL;
@@ -3302,17 +3291,15 @@ static void __exit target_core_exit_configfs(void)
configfs_unregister_subsystem(subsys);
kfree(subsys->su_group.default_groups);
- core_alua_free_lu_gp(se_global->default_lu_gp);
- se_global->default_lu_gp = NULL;
+ core_alua_free_lu_gp(default_lu_gp);
+ default_lu_gp = NULL;
- printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
+ pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
" Infrastructure\n");
core_dev_release_virtual_lun0();
rd_module_exit();
- release_se_global();
-
- return;
+ release_se_kmem_caches();
}
MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index ba698ea62bb..b38b6c993e6 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1,7 +1,7 @@
/*******************************************************************************
* Filename: target_core_device.c (based on iscsi_target_device.c)
*
- * This file contains the iSCSI Virtual Device and Disk Transport
+ * This file contains the TCM Virtual Device and Disk Transport
* agnostic related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
@@ -54,177 +54,183 @@
static void se_dev_start(struct se_device *dev);
static void se_dev_stop(struct se_device *dev);
-int transport_get_lun_for_cmd(
- struct se_cmd *se_cmd,
- unsigned char *cdb,
- u32 unpacked_lun)
+static struct se_hba *lun0_hba;
+static struct se_subsystem_dev *lun0_su_dev;
+/* not static, needed by tpg.c */
+struct se_device *g_lun0_dev;
+
+int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
- struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
- struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
+ struct se_device *dev;
unsigned long flags;
- int read_only = 0;
- spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
- deve = se_cmd->se_deve =
- &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
- if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- if (se_cmd) {
- deve->total_cmds++;
- deve->total_bytes += se_cmd->data_length;
-
- if (se_cmd->data_direction == DMA_TO_DEVICE) {
- if (deve->lun_flags &
- TRANSPORT_LUNFLAGS_READ_ONLY) {
- read_only = 1;
- goto out;
- }
- deve->write_bytes += se_cmd->data_length;
- } else if (se_cmd->data_direction ==
- DMA_FROM_DEVICE) {
- deve->read_bytes += se_cmd->data_length;
- }
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
+ se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+ if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
+ struct se_dev_entry *deve = se_cmd->se_deve;
+
+ deve->total_cmds++;
+ deve->total_bytes += se_cmd->data_length;
+
+ if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+ (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+ se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+ " Access for 0x%08x\n",
+ se_cmd->se_tfo->get_fabric_name(),
+ unpacked_lun);
+ spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
+ return -EACCES;
}
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE)
+ deve->write_bytes += se_cmd->data_length;
+ else if (se_cmd->data_direction == DMA_FROM_DEVICE)
+ deve->read_bytes += se_cmd->data_length;
+
deve->deve_cmds++;
- se_lun = se_cmd->se_lun = deve->se_lun;
+ se_lun = deve->se_lun;
+ se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
+ se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
-out:
- spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
if (!se_lun) {
- if (read_only) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ /*
+ * Use the se_portal_group->tpg_virt_lun0 to allow for
+ * REPORT_LUNS, et al to be returned when no active
+ * MappedLUN=0 exists for this Initiator Port.
+ */
+ if (unpacked_lun != 0) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+ pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
- CMD_TFO(se_cmd)->get_fabric_name(),
+ se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
- return -1;
- } else {
- /*
- * Use the se_portal_group->tpg_virt_lun0 to allow for
- * REPORT_LUNS, et al to be returned when no active
- * MappedLUN=0 exists for this Initiator Port.
- */
- if (unpacked_lun != 0) {
- se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
- " Access for 0x%08x\n",
- CMD_TFO(se_cmd)->get_fabric_name(),
- unpacked_lun);
- return -1;
- }
- /*
- * Force WRITE PROTECT for virtual LUN 0
- */
- if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
- (se_cmd->data_direction != DMA_NONE)) {
- se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
- }
-#if 0
- printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
- CMD_TFO(se_cmd)->get_fabric_name());
-#endif
- se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
- se_cmd->orig_fe_lun = 0;
- se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
- se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+ return -ENODEV;
}
+ /*
+ * Force WRITE PROTECT for virtual LUN 0
+ */
+ if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+ (se_cmd->data_direction != DMA_NONE)) {
+ se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -EACCES;
+ }
+
+ se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+ se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
+ se_cmd->orig_fe_lun = 0;
+ se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
* Determine if the struct se_lun is online.
+ * FIXME: Check for LUN_RESET + UNIT Attention
*/
-/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
+ return -ENODEV;
}
- {
- struct se_device *dev = se_lun->lun_se_dev;
- spin_lock_irq(&dev->stats_lock);
+ /* Directly associate cmd with se_dev */
+ se_cmd->se_dev = se_lun->lun_se_dev;
+
+ /* TODO: get rid of this and use atomics for stats */
+ dev = se_lun->lun_se_dev;
+ spin_lock_irqsave(&dev->stats_lock, flags);
dev->num_cmds++;
if (se_cmd->data_direction == DMA_TO_DEVICE)
dev->write_bytes += se_cmd->data_length;
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
dev->read_bytes += se_cmd->data_length;
- spin_unlock_irq(&dev->stats_lock);
- }
+ spin_unlock_irqrestore(&dev->stats_lock, flags);
/*
* Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
* for tracking state of struct se_cmds during LUN shutdown events.
*/
spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
- list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
- atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
-#if 0
- printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
- CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
-#endif
+ list_add_tail(&se_cmd->se_lun_node, &se_lun->lun_cmd_list);
+ atomic_set(&se_cmd->transport_lun_active, 1);
spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
return 0;
}
-EXPORT_SYMBOL(transport_get_lun_for_cmd);
+EXPORT_SYMBOL(transport_lookup_cmd_lun);
-int transport_get_lun_for_tmr(
- struct se_cmd *se_cmd,
- u32 unpacked_lun)
+int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
{
- struct se_device *dev = NULL;
struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
- struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
+ unsigned long flags;
+
+ if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG) {
+ se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
+ se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ return -ENODEV;
+ }
+
+ spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
+ se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+ deve = se_cmd->se_deve;
- spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
- deve = se_cmd->se_deve =
- &SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
- se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
- dev = se_lun->lun_se_dev;
+ se_tmr->tmr_lun = deve->se_lun;
+ se_cmd->se_lun = deve->se_lun;
+ se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
-/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
+ se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
- spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
if (!se_lun) {
- printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+ pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
- CMD_TFO(se_cmd)->get_fabric_name(),
+ se_cmd->se_tfo->get_fabric_name(),
unpacked_lun);
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
+ return -ENODEV;
}
/*
* Determine if the struct se_lun is online.
+ * FIXME: Check for LUN_RESET + UNIT Attention
*/
-/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- return -1;
+ return -ENODEV;
}
- se_tmr->tmr_dev = dev;
- spin_lock(&dev->se_tmr_lock);
- list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
- spin_unlock(&dev->se_tmr_lock);
+ /* Directly associate cmd with se_dev */
+ se_cmd->se_dev = se_lun->lun_se_dev;
+ se_tmr->tmr_dev = se_lun->lun_se_dev;
+
+ spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
+ list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
+ spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
return 0;
}
-EXPORT_SYMBOL(transport_get_lun_for_tmr);
+EXPORT_SYMBOL(transport_lookup_tmr_lun);
/*
* This function is called from core_scsi3_emulate_pro_register_and_move()
@@ -249,17 +255,17 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
continue;
lun = deve->se_lun;
- if (!(lun)) {
- printk(KERN_ERR "%s device entries device pointer is"
+ if (!lun) {
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
port = lun->lun_sep;
- if (!(port)) {
- printk(KERN_ERR "%s device entries device pointer is"
+ if (!port) {
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
if (port->sep_rtpi != rtpi)
@@ -295,9 +301,9 @@ int core_free_device_list_for_node(
continue;
if (!deve->se_lun) {
- printk(KERN_ERR "%s device entries device pointer is"
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
lun = deve->se_lun;
@@ -323,8 +329,6 @@ void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
deve->deve_cmds--;
spin_unlock_irq(&se_nacl->device_list_lock);
-
- return;
}
void core_update_device_list_access(
@@ -344,8 +348,6 @@ void core_update_device_list_access(
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
spin_unlock_irq(&nacl->device_list_lock);
-
- return;
}
/* core_update_device_list_for_node():
@@ -370,7 +372,7 @@ int core_update_device_list_for_node(
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*/
- if (!(enable)) {
+ if (!enable) {
/*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explicitly concerted to MappedLUNs ->
@@ -393,18 +395,18 @@ int core_update_device_list_for_node(
*/
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
if (deve->se_lun_acl != NULL) {
- printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
+ pr_err("struct se_dev_entry->se_lun_acl"
" already set for demo mode -> explict"
" LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
- return -1;
+ return -EINVAL;
}
if (deve->se_lun != lun) {
- printk(KERN_ERR "struct se_dev_entry->se_lun does"
+ pr_err("struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
" -> explict LUN ACL transition\n");
spin_unlock_irq(&nacl->device_list_lock);
- return -1;
+ return -EINVAL;
}
deve->se_lun_acl = lun_acl;
trans = 1;
@@ -492,8 +494,6 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
spin_lock_bh(&tpg->acl_node_lock);
}
spin_unlock_bh(&tpg->acl_node_lock);
-
- return;
}
static struct se_port *core_alloc_port(struct se_device *dev)
@@ -501,9 +501,9 @@ static struct se_port *core_alloc_port(struct se_device *dev)
struct se_port *port, *port_tmp;
port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
- if (!(port)) {
- printk(KERN_ERR "Unable to allocate struct se_port\n");
- return NULL;
+ if (!port) {
+ pr_err("Unable to allocate struct se_port\n");
+ return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&port->sep_alua_list);
INIT_LIST_HEAD(&port->sep_list);
@@ -513,10 +513,10 @@ static struct se_port *core_alloc_port(struct se_device *dev)
spin_lock(&dev->se_port_lock);
if (dev->dev_port_count == 0x0000ffff) {
- printk(KERN_WARNING "Reached dev->dev_port_count =="
+ pr_warn("Reached dev->dev_port_count =="
" 0x0000ffff\n");
spin_unlock(&dev->se_port_lock);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
again:
/*
@@ -532,7 +532,7 @@ again:
* 3h to FFFFh Relative port 3 through 65 535
*/
port->sep_rtpi = dev->dev_rpti_counter++;
- if (!(port->sep_rtpi))
+ if (!port->sep_rtpi)
goto again;
list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
@@ -554,7 +554,7 @@ static void core_export_port(
struct se_port *port,
struct se_lun *lun)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
spin_lock(&dev->se_port_lock);
@@ -567,20 +567,20 @@ static void core_export_port(
list_add_tail(&port->sep_list, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
- if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
+ if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
- printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
+ pr_err("Unable to allocate t10_alua_tg_pt"
"_gp_member_t\n");
return;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
- T10_ALUA(su_dev)->default_tg_pt_gp);
+ su_dev->t10_alua.default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
- printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
+ pr_debug("%s/%s: Adding to default ALUA Target Port"
" Group: alua/default_tg_pt_gp\n",
- TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
+ dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
}
dev->dev_port_count++;
@@ -607,8 +607,6 @@ static void core_release_port(struct se_device *dev, struct se_port *port)
list_del(&port->sep_list);
dev->dev_port_count--;
kfree(port);
-
- return;
}
int core_dev_export(
@@ -619,8 +617,8 @@ int core_dev_export(
struct se_port *port;
port = core_alloc_port(dev);
- if (!(port))
- return -1;
+ if (IS_ERR(port))
+ return PTR_ERR(port);
lun->lun_se_dev = dev;
se_dev_start(dev);
@@ -657,33 +655,35 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
{
struct se_dev_entry *deve;
struct se_lun *se_lun;
- struct se_session *se_sess = SE_SESS(se_cmd);
+ struct se_session *se_sess = se_cmd->se_sess;
struct se_task *se_task;
- unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
+ unsigned char *buf;
u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
- list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
+ list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
break;
- if (!(se_task)) {
- printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
+ if (!se_task) {
+ pr_err("Unable to locate struct se_task for struct se_cmd\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
+ buf = transport_kmap_first_data_page(se_cmd);
+
/*
* If no struct se_session pointer is present, this struct se_cmd is
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
- if (!(se_sess)) {
+ if (!se_sess) {
int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
lun_count = 1;
goto done;
}
- spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
- deve = &SE_NODE_ACL(se_sess)->device_list[i];
+ deve = &se_sess->se_node_acl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
se_lun = deve->se_lun;
@@ -700,12 +700,13 @@ int transport_core_report_lun_response(struct se_cmd *se_cmd)
offset += 8;
cdb_offset += 8;
}
- spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
+ spin_unlock_irq(&se_sess->se_node_acl->device_list_lock);
/*
* See SPC3 r07, page 159.
*/
done:
+ transport_kunmap_first_data_page(se_cmd);
lun_count *= 8;
buf[0] = ((lun_count >> 24) & 0xff);
buf[1] = ((lun_count >> 16) & 0xff);
@@ -744,26 +745,20 @@ void se_release_device_for_hba(struct se_device *dev)
core_scsi3_free_all_registrations(dev);
se_release_vpd_for_dev(dev);
- kfree(dev->dev_status_queue_obj);
- kfree(dev->dev_queue_obj);
kfree(dev);
-
- return;
}
void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
- spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
+ spin_lock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
list_for_each_entry_safe(vpd, vpd_tmp,
- &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
+ &dev->se_sub_dev->t10_wwn.t10_vpd_list, vpd_list) {
list_del(&vpd->vpd_list);
kfree(vpd);
}
- spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
-
- return;
+ spin_unlock(&dev->se_sub_dev->t10_wwn.t10_vpd_lock);
}
/* se_free_virtual_device():
@@ -822,12 +817,13 @@ static void se_dev_stop(struct se_device *dev)
int se_dev_check_online(struct se_device *dev)
{
+ unsigned long flags;
int ret;
- spin_lock_irq(&dev->dev_status_lock);
+ spin_lock_irqsave(&dev->dev_status_lock, flags);
ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
(dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
- spin_unlock_irq(&dev->dev_status_lock);
+ spin_unlock_irqrestore(&dev->dev_status_lock, flags);
return ret;
}
@@ -849,59 +845,61 @@ void se_dev_set_default_attribs(
{
struct queue_limits *limits = &dev_limits->limits;
- DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
- DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
- DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
- DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
- DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
- DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
- DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
- DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
- DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
- DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
- DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ dev->se_sub_dev->se_dev_attrib.emulate_dpo = DA_EMULATE_DPO;
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
+ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
+ dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
+ dev->se_sub_dev->se_dev_attrib.emulate_tas = DA_EMULATE_TAS;
+ dev->se_sub_dev->se_dev_attrib.emulate_tpu = DA_EMULATE_TPU;
+ dev->se_sub_dev->se_dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
+ dev->se_sub_dev->se_dev_attrib.emulate_reservations = DA_EMULATE_RESERVATIONS;
+ dev->se_sub_dev->se_dev_attrib.emulate_alua = DA_EMULATE_ALUA;
+ dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
+ dev->se_sub_dev->se_dev_attrib.is_nonrot = DA_IS_NONROT;
+ dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
/*
* The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
* iblock_create_virtdevice() from struct queue_limits values
* if blk_queue_discard()==1
*/
- DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
- DEV_ATTRIB(dev)->max_unmap_block_desc_count =
- DA_MAX_UNMAP_BLOCK_DESC_COUNT;
- DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
- DEV_ATTRIB(dev)->unmap_granularity_alignment =
+ dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
+ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+ DA_MAX_UNMAP_BLOCK_DESC_COUNT;
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
/*
* block_size is based on subsystem plugin dependent requirements.
*/
- DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
- DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
+ dev->se_sub_dev->se_dev_attrib.hw_block_size = limits->logical_block_size;
+ dev->se_sub_dev->se_dev_attrib.block_size = limits->logical_block_size;
/*
* max_sectors is based on subsystem plugin dependent requirements.
*/
- DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
- DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
+ dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
/*
* Set optimal_sectors from max_sectors, which can be lowered via
* configfs.
*/
- DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
+ dev->se_sub_dev->se_dev_attrib.optimal_sectors = limits->max_sectors;
/*
* queue_depth is based on subsystem plugin dependent requirements.
*/
- DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
- DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
+ dev->se_sub_dev->se_dev_attrib.hw_queue_depth = dev_limits->hw_queue_depth;
+ dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;
}
int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
{
if (task_timeout > DA_TASK_TIMEOUT_MAX) {
- printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
+ pr_err("dev[%p]: Passed task_timeout: %u larger then"
" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
- return -1;
+ return -EINVAL;
} else {
- DEV_ATTRIB(dev)->task_timeout = task_timeout;
- printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
+ dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
+ pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
dev, task_timeout);
}
@@ -912,9 +910,9 @@ int se_dev_set_max_unmap_lba_count(
struct se_device *dev,
u32 max_unmap_lba_count)
{
- DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
- printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
- dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
+ dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
+ pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
return 0;
}
@@ -922,9 +920,10 @@ int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev,
u32 max_unmap_block_desc_count)
{
- DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
- printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
- dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
+ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
+ max_unmap_block_desc_count;
+ pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
return 0;
}
@@ -932,9 +931,9 @@ int se_dev_set_unmap_granularity(
struct se_device *dev,
u32 unmap_granularity)
{
- DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
- printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
- dev, DEV_ATTRIB(dev)->unmap_granularity);
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
+ pr_debug("dev[%p]: Set unmap_granularity: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
return 0;
}
@@ -942,109 +941,109 @@ int se_dev_set_unmap_granularity_alignment(
struct se_device *dev,
u32 unmap_granularity_alignment)
{
- DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
- printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
- dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
+ pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
+ dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
return 0;
}
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->dpo_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
- return -1;
+ if (dev->transport->dpo_emulated == NULL) {
+ pr_err("dev->transport->dpo_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
- return -1;
+ if (dev->transport->dpo_emulated(dev) == 0) {
+ pr_err("dev->transport->dpo_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_dpo = flag;
- printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
- " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
+ dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
+ pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
+ " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_write_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
- return -1;
+ if (dev->transport->fua_write_emulated == NULL) {
+ pr_err("dev->transport->fua_write_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
- return -1;
+ if (dev->transport->fua_write_emulated(dev) == 0) {
+ pr_err("dev->transport->fua_write_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_fua_write = flag;
- printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_fua_write);
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
+ pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
return 0;
}
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_read_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
- return -1;
+ if (dev->transport->fua_read_emulated == NULL) {
+ pr_err("dev->transport->fua_read_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
- return -1;
+ if (dev->transport->fua_read_emulated(dev) == 0) {
+ pr_err("dev->transport->fua_read_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_fua_read = flag;
- printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_fua_read);
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
+ pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
- if (TRANSPORT(dev)->write_cache_emulated == NULL) {
- printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
- return -1;
+ if (dev->transport->write_cache_emulated == NULL) {
+ pr_err("dev->transport->write_cache_emulated is NULL\n");
+ return -EINVAL;
}
- if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
- printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
- return -1;
+ if (dev->transport->write_cache_emulated(dev) == 0) {
+ pr_err("dev->transport->write_cache_emulated not supported\n");
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_write_cache = flag;
- printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_write_cache);
+ dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
+ pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
return 0;
}
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1) && (flag != 2)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ pr_err("dev[%p]: Unable to change SE Device"
" UA_INTRLCK_CTRL while dev_export_obj: %d count"
" exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
- printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
- dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
+ dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
+ pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+ dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
return 0;
}
@@ -1052,19 +1051,19 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
int se_dev_set_emulate_tas(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
+ pr_err("dev[%p]: Unable to change SE Device TAS while"
" dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->emulate_tas = flag;
- printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
- dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
+ dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
+ pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+ dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
return 0;
}
@@ -1072,20 +1071,20 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
- printk(KERN_ERR "Generic Block Discard not supported\n");
+ if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- DEV_ATTRIB(dev)->emulate_tpu = flag;
- printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+ dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
dev, flag);
return 0;
}
@@ -1093,20 +1092,20 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
- printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
- printk(KERN_ERR "Generic Block Discard not supported\n");
+ if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
- DEV_ATTRIB(dev)->emulate_tpws = flag;
- printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+ dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
+ pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
dev, flag);
return 0;
}
@@ -1114,12 +1113,36 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+ dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
+ pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
+ (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
+ return 0;
+}
+
+int se_dev_set_is_nonrot(struct se_device *dev, int flag)
+{
+ if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
- return -1;
+ return -EINVAL;
+ }
+ dev->se_sub_dev->se_dev_attrib.is_nonrot = flag;
+ pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
+ dev, flag);
+ return 0;
+}
+
+int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
+{
+ if (flag != 0) {
+ printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
+ " reordering not implemented\n", dev);
+ return -ENOSYS;
}
- DEV_ATTRIB(dev)->enforce_pr_isids = flag;
- printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
- (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
+ dev->se_sub_dev->se_dev_attrib.emulate_rest_reord = flag;
+ pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
return 0;
}
@@ -1131,44 +1154,44 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
u32 orig_queue_depth = dev->queue_depth;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
+ pr_err("dev[%p]: Unable to change SE Device TCQ while"
" dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- if (!(queue_depth)) {
- printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
+ if (!queue_depth) {
+ pr_err("dev[%p]: Illegal ZERO value for queue"
"_depth\n", dev);
- return -1;
+ return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
- printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ pr_err("dev[%p]: Passed queue_depth: %u"
" exceeds TCM/SE_Device TCQ: %u\n",
dev, queue_depth,
- DEV_ATTRIB(dev)->hw_queue_depth);
- return -1;
+ dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ return -EINVAL;
}
} else {
- if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
- if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
- printk(KERN_ERR "dev[%p]: Passed queue_depth:"
+ if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
+ if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
+ pr_err("dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, queue_depth,
- DEV_ATTRIB(dev)->hw_queue_depth);
- return -1;
+ dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
+ return -EINVAL;
}
}
}
- DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
+ dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
if (queue_depth > orig_queue_depth)
atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
else if (queue_depth < orig_queue_depth)
atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
- printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
+ pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
}
@@ -1178,50 +1201,50 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
int force = 0; /* Force setting for VDEVS */
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ pr_err("dev[%p]: Unable to change SE Device"
" max_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
- if (!(max_sectors)) {
- printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
+ if (!max_sectors) {
+ pr_err("dev[%p]: Illegal ZERO value for"
" max_sectors\n", dev);
- return -1;
+ return -EINVAL;
}
if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
+ pr_err("dev[%p]: Passed max_sectors: %u less than"
" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MIN);
- return -1;
+ return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
+ pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors:"
" %u\n", dev, max_sectors,
- DEV_ATTRIB(dev)->hw_max_sectors);
- return -1;
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ return -EINVAL;
}
} else {
- if (!(force) && (max_sectors >
- DEV_ATTRIB(dev)->hw_max_sectors)) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ if (!force && (max_sectors >
+ dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
+ pr_err("dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors"
": %u, use force=1 to override.\n", dev,
- max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
- return -1;
+ max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
+ return -EINVAL;
}
if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+ pr_err("dev[%p]: Passed max_sectors: %u"
" greater than DA_STATUS_MAX_SECTORS_MAX:"
" %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MAX);
- return -1;
+ return -EINVAL;
}
}
- DEV_ATTRIB(dev)->max_sectors = max_sectors;
- printk("dev[%p]: SE Device max_sectors changed to %u\n",
+ dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
+ pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
dev, max_sectors);
return 0;
}
@@ -1229,25 +1252,25 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+ pr_err("dev[%p]: Unable to change SE Device"
" optimal_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ pr_err("dev[%p]: Passed optimal_sectors cannot be"
" changed for TCM/pSCSI\n", dev);
return -EINVAL;
}
- if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
- printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
+ if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
+ pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
" greater than max_sectors: %u\n", dev,
- optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
+ optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
return -EINVAL;
}
- DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
- printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
+ dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
+ pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
dev, optimal_sectors);
return 0;
}
@@ -1255,31 +1278,31 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
- printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
+ pr_err("dev[%p]: Unable to change SE Device block_size"
" while dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
- return -1;
+ return -EINVAL;
}
if ((block_size != 512) &&
(block_size != 1024) &&
(block_size != 2048) &&
(block_size != 4096)) {
- printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
+ pr_err("dev[%p]: Illegal value for block_device: %u"
" for SE device, must be 512, 1024, 2048 or 4096\n",
dev, block_size);
- return -1;
+ return -EINVAL;
}
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
- printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ pr_err("dev[%p]: Not allowed to change block_size for"
" Physical Device, use for Linux/SCSI to change"
" block_size for underlying hardware\n", dev);
- return -1;
+ return -EINVAL;
}
- DEV_ATTRIB(dev)->block_size = block_size;
- printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
+ dev->se_sub_dev->se_dev_attrib.block_size = block_size;
+ pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
return 0;
}
@@ -1294,13 +1317,13 @@ struct se_lun *core_dev_add_lun(
u32 lun_access = 0;
if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
- printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
+ pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
atomic_read(&dev->dev_access_obj.obj_access_count));
return NULL;
}
lun_p = core_tpg_pre_addlun(tpg, lun);
- if ((IS_ERR(lun_p)) || !(lun_p))
+ if ((IS_ERR(lun_p)) || !lun_p)
return NULL;
if (dev->dev_flags & DF_READ_ONLY)
@@ -1311,15 +1334,15 @@ struct se_lun *core_dev_add_lun(
if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
return NULL;
- printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
- " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
- TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
+ pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+ " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
*/
- if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
+ if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl;
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
@@ -1347,15 +1370,15 @@ int core_dev_del_lun(
int ret = 0;
lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
- if (!(lun))
+ if (!lun)
return ret;
core_tpg_post_dellun(tpg, lun);
- printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
- " device object\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
- TPG_TFO(tpg)->get_fabric_name());
+ pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
+ " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name());
return 0;
}
@@ -1366,21 +1389,21 @@ struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_l
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
"_PER_TPG-1: %u for Target Portal Group: %hu\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
+ pr_err("%s Logical Unit Number: %u is not free on"
" Target Portal Group: %hu, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
@@ -1399,21 +1422,21 @@ static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
"_TPG-1: %u for Target Portal Group: %hu\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
@@ -1432,19 +1455,19 @@ struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_node_acl *nacl;
if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
- printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ pr_err("%s InitiatorName exceeds maximum size.\n",
+ tpg->se_tpg_tfo->get_fabric_name());
*ret = -EOVERFLOW;
return NULL;
}
nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!(nacl)) {
+ if (!nacl) {
*ret = -EINVAL;
return NULL;
}
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
- if (!(lacl)) {
- printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
+ if (!lacl) {
+ pr_err("Unable to allocate memory for struct se_lun_acl.\n");
*ret = -ENOMEM;
return NULL;
}
@@ -1467,16 +1490,16 @@ int core_dev_add_initiator_node_lun_acl(
struct se_node_acl *nacl;
lun = core_dev_get_lun(tpg, unpacked_lun);
- if (!(lun)) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ if (!lun) {
+ pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
return -EINVAL;
}
nacl = lacl->se_lun_nacl;
- if (!(nacl))
+ if (!nacl)
return -EINVAL;
if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
@@ -1495,9 +1518,9 @@ int core_dev_add_initiator_node_lun_acl(
smp_mb__after_atomic_inc();
spin_unlock(&lun->lun_acl_lock);
- printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
- " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
+ pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+ " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
lacl->initiatorname);
/*
@@ -1520,7 +1543,7 @@ int core_dev_del_initiator_node_lun_acl(
struct se_node_acl *nacl;
nacl = lacl->se_lun_nacl;
- if (!(nacl))
+ if (!nacl)
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
@@ -1534,10 +1557,10 @@ int core_dev_del_initiator_node_lun_acl(
lacl->se_lun = NULL;
- printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
+ pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
" InitiatorNode: %s Mapped LUN: %u\n",
- TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
lacl->initiatorname, lacl->mapped_lun);
return 0;
@@ -1547,10 +1570,10 @@ void core_dev_free_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl)
{
- printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
- " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
- TPG_TFO(tpg)->get_fabric_name(),
+ pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+ " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
+ tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun);
kfree(lacl);
@@ -1565,64 +1588,64 @@ int core_dev_setup_virtual_lun0(void)
char buf[16];
int ret;
- hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
+ hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
if (IS_ERR(hba))
return PTR_ERR(hba);
- se_global->g_lun0_hba = hba;
+ lun0_hba = hba;
t = hba->transport;
se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
- if (!(se_dev)) {
- printk(KERN_ERR "Unable to allocate memory for"
+ if (!se_dev) {
+ pr_err("Unable to allocate memory for"
" struct se_subsystem_dev\n");
ret = -ENOMEM;
goto out;
}
- INIT_LIST_HEAD(&se_dev->g_se_dev_list);
+ INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
- INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
- INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
- spin_lock_init(&se_dev->t10_reservation.registration_lock);
- spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
+ INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
+ INIT_LIST_HEAD(&se_dev->t10_pr.aptpl_reg_list);
+ spin_lock_init(&se_dev->t10_pr.registration_lock);
+ spin_lock_init(&se_dev->t10_pr.aptpl_reg_lock);
INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
spin_lock_init(&se_dev->se_dev_lock);
- se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
+ se_dev->t10_pr.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
se_dev->t10_wwn.t10_sub_dev = se_dev;
se_dev->t10_alua.t10_sub_dev = se_dev;
se_dev->se_dev_attrib.da_sub_dev = se_dev;
se_dev->se_dev_hba = hba;
se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
- if (!(se_dev->se_dev_su_ptr)) {
- printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+ if (!se_dev->se_dev_su_ptr) {
+ pr_err("Unable to locate subsystem dependent pointer"
" from allocate_virtdevice()\n");
ret = -ENOMEM;
goto out;
}
- se_global->g_lun0_su_dev = se_dev;
+ lun0_su_dev = se_dev;
memset(buf, 0, 16);
sprintf(buf, "rd_pages=8");
t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
- if (!(dev) || IS_ERR(dev)) {
- ret = -ENOMEM;
+ if (IS_ERR(dev)) {
+ ret = PTR_ERR(dev);
goto out;
}
se_dev->se_dev_ptr = dev;
- se_global->g_lun0_dev = dev;
+ g_lun0_dev = dev;
return 0;
out:
- se_global->g_lun0_su_dev = NULL;
+ lun0_su_dev = NULL;
kfree(se_dev);
- if (se_global->g_lun0_hba) {
- core_delete_hba(se_global->g_lun0_hba);
- se_global->g_lun0_hba = NULL;
+ if (lun0_hba) {
+ core_delete_hba(lun0_hba);
+ lun0_hba = NULL;
}
return ret;
}
@@ -1630,14 +1653,14 @@ out:
void core_dev_release_virtual_lun0(void)
{
- struct se_hba *hba = se_global->g_lun0_hba;
- struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
+ struct se_hba *hba = lun0_hba;
+ struct se_subsystem_dev *su_dev = lun0_su_dev;
- if (!(hba))
+ if (!hba)
return;
- if (se_global->g_lun0_dev)
- se_free_virtual_device(se_global->g_lun0_dev, hba);
+ if (g_lun0_dev)
+ se_free_virtual_device(g_lun0_dev, hba);
kfree(su_dev);
core_delete_hba(hba);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 07ab5a3bb8e..f1654694f4e 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -60,7 +60,7 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf)
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
cit->ct_owner = tf->tf_module; \
- printk("Setup generic %s\n", __stringify(_name)); \
+ pr_debug("Setup generic %s\n", __stringify(_name)); \
}
/* Start of tfc_tpg_mappedlun_cit */
@@ -80,8 +80,8 @@ static int target_fabric_mappedlun_link(
/*
* Ensure that the source port exists
*/
- if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
- printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
+ if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
+ pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
"_tpg does not exist\n");
return -EINVAL;
}
@@ -96,12 +96,12 @@ static int target_fabric_mappedlun_link(
* Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
*/
if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
- printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
+ pr_err("Illegal Initiator ACL SymLink outside of %s\n",
config_item_name(wwn_ci));
return -EINVAL;
}
if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
- printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
+ pr_err("Illegal Initiator ACL Symlink outside of %s"
" TPGT: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci));
return -EINVAL;
@@ -118,7 +118,7 @@ static int target_fabric_mappedlun_link(
lun_access = deve->lun_flags;
else
lun_access =
- (TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
+ (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE;
spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
@@ -147,7 +147,7 @@ static int target_fabric_mappedlun_unlink(
/*
* Determine if the underlying MappedLUN has already been released..
*/
- if (!(deve->se_lun))
+ if (!deve->se_lun)
return 0;
lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
@@ -202,9 +202,9 @@ static ssize_t target_fabric_mappedlun_store_write_protect(
TRANSPORT_LUNFLAGS_READ_WRITE,
lacl->se_lun_nacl);
- printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
+ pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %u Write Protect bit to %s\n",
- TPG_TFO(se_tpg)->get_fabric_name(),
+ se_tpg->se_tpg_tfo->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
return count;
@@ -327,14 +327,14 @@ static struct config_group *target_fabric_make_mappedlun(
int ret = 0;
acl_ci = &group->cg_item;
- if (!(acl_ci)) {
- printk(KERN_ERR "Unable to locatel acl_ci\n");
+ if (!acl_ci) {
+ pr_err("Unable to locatel acl_ci\n");
return NULL;
}
buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
- if (!(buf)) {
- printk(KERN_ERR "Unable to allocate memory for name buf\n");
+ if (!buf) {
+ pr_err("Unable to allocate memory for name buf\n");
return ERR_PTR(-ENOMEM);
}
snprintf(buf, strlen(name) + 1, "%s", name);
@@ -342,7 +342,7 @@ static struct config_group *target_fabric_make_mappedlun(
* Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
*/
if (strstr(buf, "lun_") != buf) {
- printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
+ pr_err("Unable to locate \"lun_\" from buf: %s"
" name: %s\n", buf, name);
ret = -EINVAL;
goto out;
@@ -358,7 +358,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
config_item_name(acl_ci), &ret);
- if (!(lacl)) {
+ if (!lacl) {
ret = -EINVAL;
goto out;
}
@@ -367,7 +367,7 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!lacl_cg->default_groups) {
- printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n");
+ pr_err("Unable to allocate lacl_cg->default_groups\n");
ret = -ENOMEM;
goto out;
}
@@ -379,11 +379,11 @@ static struct config_group *target_fabric_make_mappedlun(
lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group;
lacl_cg->default_groups[1] = NULL;
- ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ ml_stat_grp = &lacl->ml_stat_grps.stat_group;
ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL);
if (!ml_stat_grp->default_groups) {
- printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n");
+ pr_err("Unable to allocate ml_stat_grp->default_groups\n");
ret = -ENOMEM;
goto out;
}
@@ -408,7 +408,7 @@ static void target_fabric_drop_mappedlun(
struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
int i;
- ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ ml_stat_grp = &lacl->ml_stat_grps.stat_group;
for (i = 0; ml_stat_grp->default_groups[i]; i++) {
df_item = &ml_stat_grp->default_groups[i]->cg_item;
ml_stat_grp->default_groups[i] = NULL;
@@ -474,8 +474,8 @@ static struct config_group *target_fabric_make_nodeacl(
struct se_node_acl *se_nacl;
struct config_group *nacl_cg;
- if (!(tf->tf_ops.fabric_make_nodeacl)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
+ if (!tf->tf_ops.fabric_make_nodeacl) {
+ pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
return ERR_PTR(-ENOSYS);
}
@@ -572,13 +572,13 @@ static struct config_group *target_fabric_make_np(
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np;
- if (!(tf->tf_ops.fabric_make_np)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
+ if (!tf->tf_ops.fabric_make_np) {
+ pr_err("tf->tf_ops.fabric_make_np is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
- if (!(se_tpg_np) || IS_ERR(se_tpg_np))
+ if (!se_tpg_np || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
se_tpg_np->tpg_np_parent = se_tpg;
@@ -627,10 +627,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
@@ -641,10 +638,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
@@ -659,10 +653,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_offline_bit(lun, page);
@@ -673,10 +664,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_offline_bit(lun, page, count);
@@ -691,10 +679,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_secondary_status(lun, page);
@@ -705,10 +690,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_secondary_status(lun, page, count);
@@ -723,10 +705,7 @@ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
struct se_lun *lun,
char *page)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page);
@@ -737,10 +716,7 @@ static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
const char *page,
size_t count)
{
- if (!(lun))
- return -ENODEV;
-
- if (!(lun->lun_sep))
+ if (!lun || !lun->lun_sep)
return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count);
@@ -781,13 +757,13 @@ static int target_fabric_port_link(
tf = se_tpg->se_tpg_wwn->wwn_tf;
if (lun->lun_se_dev != NULL) {
- printk(KERN_ERR "Port Symlink already exists\n");
+ pr_err("Port Symlink already exists\n");
return -EEXIST;
}
dev = se_dev->se_dev_ptr;
- if (!(dev)) {
- printk(KERN_ERR "Unable to locate struct se_device pointer from"
+ if (!dev) {
+ pr_err("Unable to locate struct se_device pointer from"
" %s\n", config_item_name(se_dev_ci));
ret = -ENODEV;
goto out;
@@ -795,8 +771,8 @@ static int target_fabric_port_link(
lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
lun->unpacked_lun);
- if ((IS_ERR(lun_p)) || !(lun_p)) {
- printk(KERN_ERR "core_dev_add_lun() failed\n");
+ if (IS_ERR(lun_p) || !lun_p) {
+ pr_err("core_dev_add_lun() failed\n");
ret = -EINVAL;
goto out;
}
@@ -888,7 +864,7 @@ static struct config_group *target_fabric_make_lun(
int errno;
if (strstr(name, "lun_") != name) {
- printk(KERN_ERR "Unable to locate \'_\" in"
+ pr_err("Unable to locate \'_\" in"
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
@@ -896,14 +872,14 @@ static struct config_group *target_fabric_make_lun(
return ERR_PTR(-EINVAL);
lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
- if (!(lun))
+ if (!lun)
return ERR_PTR(-EINVAL);
lun_cg = &lun->lun_group;
lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!lun_cg->default_groups) {
- printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n");
+ pr_err("Unable to allocate lun_cg->default_groups\n");
return ERR_PTR(-ENOMEM);
}
@@ -914,11 +890,11 @@ static struct config_group *target_fabric_make_lun(
lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group;
lun_cg->default_groups[1] = NULL;
- port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ port_stat_grp = &lun->port_stat_grps.stat_group;
port_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
GFP_KERNEL);
if (!port_stat_grp->default_groups) {
- printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n");
+ pr_err("Unable to allocate port_stat_grp->default_groups\n");
errno = -ENOMEM;
goto out;
}
@@ -941,7 +917,7 @@ static void target_fabric_drop_lun(
struct config_group *lun_cg, *port_stat_grp;
int i;
- port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ port_stat_grp = &lun->port_stat_grps.stat_group;
for (i = 0; port_stat_grp->default_groups[i]; i++) {
df_item = &port_stat_grp->default_groups[i]->cg_item;
port_stat_grp->default_groups[i] = NULL;
@@ -1031,13 +1007,13 @@ static struct config_group *target_fabric_make_tpg(
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg;
- if (!(tf->tf_ops.fabric_make_tpg)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
+ if (!tf->tf_ops.fabric_make_tpg) {
+ pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
- if (!(se_tpg) || IS_ERR(se_tpg))
+ if (!se_tpg || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
/*
* Setup default groups from pre-allocated se_tpg->tpg_default_groups
@@ -1130,13 +1106,13 @@ static struct config_group *target_fabric_make_wwn(
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn;
- if (!(tf->tf_ops.fabric_make_wwn)) {
- printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
+ if (!tf->tf_ops.fabric_make_wwn) {
+ pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
return ERR_PTR(-ENOSYS);
}
wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
- if (!(wwn) || IS_ERR(wwn))
+ if (!wwn || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
wwn->wwn_tf = tf;
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 1e193f32489..c4ea3a9a555 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -25,6 +25,7 @@
*
******************************************************************************/
+#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
@@ -61,9 +62,8 @@ u32 sas_get_pr_transport_id(
int *format_code,
unsigned char *buf)
{
- unsigned char binary, *ptr;
- int i;
- u32 off = 4;
+ unsigned char *ptr;
+
/*
* Set PROTOCOL IDENTIFIER to 6h for SAS
*/
@@ -74,10 +74,8 @@ u32 sas_get_pr_transport_id(
*/
ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
- for (i = 0; i < 16; i += 2) {
- binary = transport_asciihex_to_binaryhex(&ptr[i]);
- buf[off++] = binary;
- }
+ hex2bin(&buf[4], ptr, 8);
+
/*
* The SAS Transport ID is a hardcoded 24-byte length
*/
@@ -157,7 +155,7 @@ u32 fc_get_pr_transport_id(
int *format_code,
unsigned char *buf)
{
- unsigned char binary, *ptr;
+ unsigned char *ptr;
int i;
u32 off = 8;
/*
@@ -172,12 +170,11 @@ u32 fc_get_pr_transport_id(
ptr = &se_nacl->initiatorname[0];
for (i = 0; i < 24; ) {
- if (!(strncmp(&ptr[i], ":", 1))) {
+ if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
}
- binary = transport_asciihex_to_binaryhex(&ptr[i]);
- buf[off++] = binary;
+ hex2bin(&buf[off++], &ptr[i], 1);
i += 2;
}
/*
@@ -386,7 +383,7 @@ char *iscsi_parse_pr_out_transport_id(
* Reserved
*/
if ((format_code != 0x00) && (format_code != 0x40)) {
- printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
+ pr_err("Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code);
return NULL;
}
@@ -406,7 +403,7 @@ char *iscsi_parse_pr_out_transport_id(
tid_len += padding;
if ((add_len + 4) != tid_len) {
- printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
+ pr_debug("LIO-Target Extracted add_len: %hu "
"does not match calculated tid_len: %u,"
" using tid_len instead\n", add_len+4, tid_len);
*out_tid_len = tid_len;
@@ -420,8 +417,8 @@ char *iscsi_parse_pr_out_transport_id(
*/
if (format_code == 0x40) {
p = strstr((char *)&buf[4], ",i,0x");
- if (!(p)) {
- printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
+ if (!p) {
+ pr_err("Unable to locate \",i,0x\" seperator"
" for Initiator port identifier: %s\n",
(char *)&buf[4]);
return NULL;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 150c4305f38..bc1b33639b8 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -42,18 +42,6 @@
#include "target_core_file.h"
-#if 1
-#define DEBUG_FD_CACHE(x...) printk(x)
-#else
-#define DEBUG_FD_CACHE(x...)
-#endif
-
-#if 1
-#define DEBUG_FD_FUA(x...) printk(x)
-#else
-#define DEBUG_FD_FUA(x...)
-#endif
-
static struct se_subsystem_api fileio_template;
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
@@ -65,24 +53,21 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
struct fd_host *fd_host;
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
- if (!(fd_host)) {
- printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
- return -1;
+ if (!fd_host) {
+ pr_err("Unable to allocate memory for struct fd_host\n");
+ return -ENOMEM;
}
fd_host->fd_host_id = host_id;
- atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
- atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
- hba->hba_ptr = (void *) fd_host;
+ hba->hba_ptr = fd_host;
- printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+ pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
- " Target Core with TCQ Depth: %d MaxSectors: %u\n",
- hba->hba_id, fd_host->fd_host_id,
- atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
+ pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+ " MaxSectors: %u\n",
+ hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
return 0;
}
@@ -91,7 +76,7 @@ static void fd_detach_hba(struct se_hba *hba)
{
struct fd_host *fd_host = hba->hba_ptr;
- printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+ pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
" Target Core\n", hba->hba_id, fd_host->fd_host_id);
kfree(fd_host);
@@ -104,14 +89,14 @@ static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
- if (!(fd_dev)) {
- printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
+ if (!fd_dev) {
+ pr_err("Unable to allocate memory for struct fd_dev\n");
return NULL;
}
fd_dev->fd_host = fd_host;
- printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
+ pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
return fd_dev;
}
@@ -144,7 +129,7 @@ static struct se_device *fd_create_virtdevice(
set_fs(old_fs);
if (IS_ERR(dev_p)) {
- printk(KERN_ERR "getname(%s) failed: %lu\n",
+ pr_err("getname(%s) failed: %lu\n",
fd_dev->fd_dev_name, IS_ERR(dev_p));
ret = PTR_ERR(dev_p);
goto fail;
@@ -167,12 +152,12 @@ static struct se_device *fd_create_virtdevice(
file = filp_open(dev_p, flags, 0600);
if (IS_ERR(file)) {
- printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+ pr_err("filp_open(%s) failed\n", dev_p);
ret = PTR_ERR(file);
goto fail;
}
if (!file || !file->f_dentry) {
- printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+ pr_err("filp_open(%s) failed\n", dev_p);
goto fail;
}
fd_dev->fd_file = file;
@@ -202,14 +187,14 @@ static struct se_device *fd_create_virtdevice(
fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
- printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
+ pr_debug("FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n",
fd_dev->fd_dev_size,
div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
- printk(KERN_ERR "FILEIO: Missing fd_dev_size="
+ pr_err("FILEIO: Missing fd_dev_size="
" parameter, and no backing struct"
" block_device\n");
goto fail;
@@ -226,15 +211,15 @@ static struct se_device *fd_create_virtdevice(
dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba, &fileio_template,
- se_dev, dev_flags, (void *)fd_dev,
+ se_dev, dev_flags, fd_dev,
&dev_limits, "FILEIO", FD_VERSION);
- if (!(dev))
+ if (!dev)
goto fail;
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth;
- printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+ pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
@@ -272,45 +257,45 @@ static inline struct fd_request *FILE_REQ(struct se_task *task)
static struct se_task *
-fd_alloc_task(struct se_cmd *cmd)
+fd_alloc_task(unsigned char *cdb)
{
struct fd_request *fd_req;
fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
- if (!(fd_req)) {
- printk(KERN_ERR "Unable to allocate struct fd_request\n");
+ if (!fd_req) {
+ pr_err("Unable to allocate struct fd_request\n");
return NULL;
}
- fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
-
return &fd_req->fd_task;
}
static int fd_do_readv(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
- struct file *fd = req->fd_dev->fd_file;
+ struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+ struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
- loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+ loff_t pos = (task->task_lba *
+ task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret = 0, i;
- iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
- if (!(iov)) {
- printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
- return -1;
+ iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+ if (!iov) {
+ pr_err("Unable to allocate fd_do_readv iov[]\n");
+ return -ENOMEM;
}
- for (i = 0; i < task->task_sg_num; i++) {
+ for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
- ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
+ ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
set_fs(old_fs);
kfree(iov);
@@ -321,16 +306,16 @@ static int fd_do_readv(struct se_task *task)
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != task->task_size) {
- printk(KERN_ERR "vfs_readv() returned %d,"
+ pr_err("vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret,
(int)task->task_size);
- return -1;
+ return (ret < 0 ? ret : -EINVAL);
}
} else {
if (ret < 0) {
- printk(KERN_ERR "vfs_readv() returned %d for non"
+ pr_err("vfs_readv() returned %d for non"
" S_ISBLK\n", ret);
- return -1;
+ return ret;
}
}
@@ -340,34 +325,36 @@ static int fd_do_readv(struct se_task *task)
static int fd_do_writev(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
- struct file *fd = req->fd_dev->fd_file;
+ struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+ struct file *fd = dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
- loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
+ loff_t pos = (task->task_lba *
+ task->se_dev->se_sub_dev->se_dev_attrib.block_size);
int ret, i = 0;
- iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
- if (!(iov)) {
- printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
- return -1;
+ iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+ if (!iov) {
+ pr_err("Unable to allocate fd_do_writev iov[]\n");
+ return -ENOMEM;
}
- for (i = 0; i < task->task_sg_num; i++) {
+ for (i = 0; i < task->task_sg_nents; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
- ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
+ ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
set_fs(old_fs);
kfree(iov);
if (ret < 0 || ret != task->task_size) {
- printk(KERN_ERR "vfs_writev() returned %d\n", ret);
- return -1;
+ pr_err("vfs_writev() returned %d\n", ret);
+ return (ret < 0 ? ret : -EINVAL);
}
return 1;
@@ -375,10 +362,10 @@ static int fd_do_writev(struct se_task *task)
static void fd_emulate_sync_cache(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
- int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
+ int immed = (cmd->t_task_cdb[1] & 0x2);
loff_t start, end;
int ret;
@@ -392,11 +379,11 @@ static void fd_emulate_sync_cache(struct se_task *task)
/*
* Determine if we will be flushing the entire device.
*/
- if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
+ if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
start = 0;
end = LLONG_MAX;
} else {
- start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
+ start = cmd->t_task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
if (cmd->data_length)
end = start + cmd->data_length;
else
@@ -405,7 +392,7 @@ static void fd_emulate_sync_cache(struct se_task *task)
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
- printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+ pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (!immed)
transport_complete_sync_cache(cmd, ret == 0);
@@ -446,16 +433,16 @@ static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
- loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
+ loff_t start = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
loff_t end = start + task->task_size;
int ret;
- DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
+ pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
task->task_lba, task->task_size);
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
- printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+ pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
}
static int fd_do_task(struct se_task *task)
@@ -474,9 +461,9 @@ static int fd_do_task(struct se_task *task)
ret = fd_do_writev(task);
if (ret > 0 &&
- DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
- DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
- T_TASK(cmd)->t_tasks_fua) {
+ dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
+ dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ cmd->t_tasks_fua) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
@@ -549,7 +536,7 @@ static ssize_t fd_set_configfs_dev_params(
snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
"%s", arg_p);
kfree(arg_p);
- printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
+ pr_debug("FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH;
break;
@@ -562,23 +549,23 @@ static ssize_t fd_set_configfs_dev_params(
ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
kfree(arg_p);
if (ret < 0) {
- printk(KERN_ERR "strict_strtoull() failed for"
+ pr_err("strict_strtoull() failed for"
" fd_dev_size=\n");
goto out;
}
- printk(KERN_INFO "FILEIO: Referencing Size: %llu"
+ pr_debug("FILEIO: Referencing Size: %llu"
" bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
case Opt_fd_buffered_io:
match_int(args, &arg);
if (arg != 1) {
- printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
+ pr_err("bogus fd_buffered_io=%d value\n", arg);
ret = -EINVAL;
goto out;
}
- printk(KERN_INFO "FILEIO: Using buffered I/O"
+ pr_debug("FILEIO: Using buffered I/O"
" operations for struct fd_dev\n");
fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
@@ -598,8 +585,8 @@ static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
- printk(KERN_ERR "Missing fd_dev_name=\n");
- return -1;
+ pr_err("Missing fd_dev_name=\n");
+ return -EINVAL;
}
return 0;
@@ -654,7 +641,7 @@ static sector_t fd_get_blocks(struct se_device *dev)
{
struct fd_dev *fd_dev = dev->dev_ptr;
unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
- DEV_ATTRIB(dev)->block_size);
+ dev->se_sub_dev->se_dev_attrib.block_size);
return blocks_long;
}
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index ef4de2b4bd4..daebd710b89 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -4,8 +4,6 @@
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
-/* Maximum queuedepth for the FILEIO HBA */
-#define FD_HBA_QUEUE_DEPTH 256
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
@@ -18,8 +16,6 @@ struct fd_request {
struct se_task fd_task;
/* SCSI CDB from iSCSI Command PDU */
unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
- /* FILEIO device */
- struct fd_dev *fd_dev;
} ____cacheline_aligned;
#define FBDF_HAS_PATH 0x01
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 0b8f8da8901..0639b975d6f 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -1,7 +1,7 @@
/*******************************************************************************
* Filename: target_core_hba.c
*
- * This file copntains the iSCSI HBA Transport related functions.
+ * This file contains the TCM HBA Transport related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
@@ -45,6 +45,11 @@
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);
+static u32 hba_id_counter;
+
+static DEFINE_SPINLOCK(hba_lock);
+static LIST_HEAD(hba_list);
+
int transport_subsystem_register(struct se_subsystem_api *sub_api)
{
struct se_subsystem_api *s;
@@ -53,8 +58,8 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
- if (!(strcmp(s->name, sub_api->name))) {
- printk(KERN_ERR "%p is already registered with"
+ if (!strcmp(s->name, sub_api->name)) {
+ pr_err("%p is already registered with"
" duplicate name %s, unable to process"
" request\n", s, s->name);
mutex_unlock(&subsystem_mutex);
@@ -64,7 +69,7 @@ int transport_subsystem_register(struct se_subsystem_api *sub_api)
list_add_tail(&sub_api->sub_api_list, &subsystem_list);
mutex_unlock(&subsystem_mutex);
- printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
+ pr_debug("TCM: Registered subsystem plugin: %s struct module:"
" %p\n", sub_api->name, sub_api->owner);
return 0;
}
@@ -104,21 +109,17 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
if (!hba) {
- printk(KERN_ERR "Unable to allocate struct se_hba\n");
+ pr_err("Unable to allocate struct se_hba\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock);
- spin_lock_init(&hba->hba_queue_lock);
mutex_init(&hba->hba_access_mutex);
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
- atomic_set(&hba->max_queue_depth, 0);
- atomic_set(&hba->left_queue_depth, 0);
-
hba->transport = core_get_backend(plugin_name);
if (!hba->transport) {
ret = -EINVAL;
@@ -129,12 +130,12 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
if (ret < 0)
goto out_module_put;
- spin_lock(&se_global->hba_lock);
- hba->hba_id = se_global->g_hba_id_counter++;
- list_add_tail(&hba->hba_list, &se_global->g_hba_list);
- spin_unlock(&se_global->hba_lock);
+ spin_lock(&hba_lock);
+ hba->hba_id = hba_id_counter++;
+ list_add_tail(&hba->hba_node, &hba_list);
+ spin_unlock(&hba_lock);
- printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
+ pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id);
return hba;
@@ -156,11 +157,11 @@ core_delete_hba(struct se_hba *hba)
hba->transport->detach_hba(hba);
- spin_lock(&se_global->hba_lock);
- list_del(&hba->hba_list);
- spin_unlock(&se_global->hba_lock);
+ spin_lock(&hba_lock);
+ list_del(&hba->hba_node);
+ spin_unlock(&hba_lock);
- printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
+ pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
if (hba->transport->owner)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 86639004af9..7e123410544 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -47,12 +47,6 @@
#include "target_core_iblock.h"
-#if 0
-#define DEBUG_IBLOCK(x...) printk(x)
-#else
-#define DEBUG_IBLOCK(x...)
-#endif
-
static struct se_subsystem_api iblock_template;
static void iblock_bio_done(struct bio *, int);
@@ -66,25 +60,22 @@ static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
struct iblock_hba *ib_host;
ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
- if (!(ib_host)) {
- printk(KERN_ERR "Unable to allocate memory for"
+ if (!ib_host) {
+ pr_err("Unable to allocate memory for"
" struct iblock_hba\n");
return -ENOMEM;
}
ib_host->iblock_host_id = host_id;
- atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
- atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
- hba->hba_ptr = (void *) ib_host;
+ hba->hba_ptr = ib_host;
- printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+ pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
- " Target Core TCQ Depth: %d\n", hba->hba_id,
- ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
+ pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
+ hba->hba_id, ib_host->iblock_host_id);
return 0;
}
@@ -93,7 +84,7 @@ static void iblock_detach_hba(struct se_hba *hba)
{
struct iblock_hba *ib_host = hba->hba_ptr;
- printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
+ pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
kfree(ib_host);
@@ -106,13 +97,13 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
struct iblock_hba *ib_host = hba->hba_ptr;
ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
- if (!(ib_dev)) {
- printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
+ if (!ib_dev) {
+ pr_err("Unable to allocate struct iblock_dev\n");
return NULL;
}
ib_dev->ibd_host = ib_host;
- printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
+ pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
return ib_dev;
}
@@ -131,8 +122,8 @@ static struct se_device *iblock_create_virtdevice(
u32 dev_flags = 0;
int ret = -EINVAL;
- if (!(ib_dev)) {
- printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
+ if (!ib_dev) {
+ pr_err("Unable to locate struct iblock_dev parameter\n");
return ERR_PTR(ret);
}
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
@@ -140,16 +131,16 @@ static struct se_device *iblock_create_virtdevice(
* These settings need to be made tunable..
*/
ib_dev->ibd_bio_set = bioset_create(32, 64);
- if (!(ib_dev->ibd_bio_set)) {
- printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
+ if (!ib_dev->ibd_bio_set) {
+ pr_err("IBLOCK: Unable to create bioset()\n");
return ERR_PTR(-ENOMEM);
}
- printk(KERN_INFO "IBLOCK: Created bio_set()\n");
+ pr_debug("IBLOCK: Created bio_set()\n");
/*
* iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
* must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
*/
- printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
+ pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
@@ -167,42 +158,41 @@ static struct se_device *iblock_create_virtdevice(
limits->logical_block_size = bdev_logical_block_size(bd);
limits->max_hw_sectors = queue_max_hw_sectors(q);
limits->max_sectors = queue_max_sectors(q);
- dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
- dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
+ dev_limits.hw_queue_depth = q->nr_requests;
+ dev_limits.queue_depth = q->nr_requests;
- ib_dev->ibd_major = MAJOR(bd->bd_dev);
- ib_dev->ibd_minor = MINOR(bd->bd_dev);
ib_dev->ibd_bd = bd;
dev = transport_add_device_to_core_hba(hba,
- &iblock_template, se_dev, dev_flags, (void *)ib_dev,
+ &iblock_template, se_dev, dev_flags, ib_dev,
&dev_limits, "IBLOCK", IBLOCK_VERSION);
- if (!(dev))
+ if (!dev)
goto failed;
- ib_dev->ibd_depth = dev->queue_depth;
-
/*
* Check if the underlying struct block_device request_queue supports
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
- DEV_ATTRIB(dev)->max_unmap_lba_count =
+ dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
- DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
- DEV_ATTRIB(dev)->unmap_granularity =
+ dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity =
q->limits.discard_granularity;
- DEV_ATTRIB(dev)->unmap_granularity_alignment =
+ dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
- printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
+ pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
+ if (blk_queue_nonrot(q))
+ dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
+
return dev;
failed:
@@ -211,8 +201,6 @@ failed:
ib_dev->ibd_bio_set = NULL;
}
ib_dev->ibd_bd = NULL;
- ib_dev->ibd_major = 0;
- ib_dev->ibd_minor = 0;
return ERR_PTR(ret);
}
@@ -233,17 +221,16 @@ static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
}
static struct se_task *
-iblock_alloc_task(struct se_cmd *cmd)
+iblock_alloc_task(unsigned char *cdb)
{
struct iblock_req *ib_req;
ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
- if (!(ib_req)) {
- printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
+ if (!ib_req) {
+ pr_err("Unable to allocate memory for struct iblock_req\n");
return NULL;
}
- ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
atomic_set(&ib_req->ib_bio_cnt, 0);
return &ib_req->ib_task;
}
@@ -257,12 +244,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
- if (block_size == DEV_ATTRIB(dev)->block_size)
+ if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
return blocks_long;
switch (block_size) {
case 4096:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 2048:
blocks_long <<= 1;
break;
@@ -276,7 +263,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 2048:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 1;
break;
@@ -291,7 +278,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 1024:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 2;
break;
@@ -306,7 +293,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
}
break;
case 512:
- switch (DEV_ATTRIB(dev)->block_size) {
+ switch (dev->se_sub_dev->se_dev_attrib.block_size) {
case 4096:
blocks_long >>= 3;
break;
@@ -332,9 +319,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
*/
static void iblock_emulate_sync_cache(struct se_task *task)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
- int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
+ int immed = (cmd->t_task_cdb[1] & 0x2);
sector_t error_sector;
int ret;
@@ -351,7 +338,7 @@ static void iblock_emulate_sync_cache(struct se_task *task)
*/
ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
if (ret != 0) {
- printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
+ pr_err("IBLOCK: block_issue_flush() failed: %d "
" error_sector: %llu\n", ret,
(unsigned long long)error_sector);
}
@@ -401,9 +388,9 @@ static int iblock_do_task(struct se_task *task)
* Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit.
*/
- if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
- (DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
- T_TASK(task->task_se_cmd)->t_tasks_fua))
+ if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
+ (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+ task->task_se_cmd->t_tasks_fua))
rw = WRITE_FUA;
else
rw = WRITE;
@@ -415,8 +402,9 @@ static int iblock_do_task(struct se_task *task)
while (bio) {
nbio = bio->bi_next;
bio->bi_next = NULL;
- DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
- " bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
+ pr_debug("Calling submit_bio() task: %p bio: %p"
+ " bio->bi_sector: %llu\n", task, bio,
+ (unsigned long long)bio->bi_sector);
submit_bio(rw, bio);
bio = nbio;
@@ -470,7 +458,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
- int ret = 0, arg, token;
+ int ret = 0, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -486,7 +474,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
switch (token) {
case Opt_udev_path:
if (ib_dev->ibd_bd) {
- printk(KERN_ERR "Unable to set udev_path= while"
+ pr_err("Unable to set udev_path= while"
" ib_dev->ibd_bd exists\n");
ret = -EEXIST;
goto out;
@@ -499,15 +487,11 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
"%s", arg_p);
kfree(arg_p);
- printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
+ pr_debug("IBLOCK: Referencing UDEV path: %s\n",
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break;
case Opt_force:
- match_int(args, &arg);
- ib_dev->ibd_force = arg;
- printk(KERN_INFO "IBLOCK: Set force=%d\n",
- ib_dev->ibd_force);
break;
default:
break;
@@ -526,8 +510,8 @@ static ssize_t iblock_check_configfs_dev_params(
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
- printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
- return -1;
+ pr_err("Missing udev_path= parameters for IBLOCK\n");
+ return -EINVAL;
}
return 0;
@@ -555,12 +539,11 @@ static ssize_t iblock_show_configfs_dev_params(
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
- ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
+ MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
- bl += sprintf(b + bl, "Major: %d Minor: %d\n",
- ibd->ibd_major, ibd->ibd_minor);
+ bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
}
return bl;
@@ -585,103 +568,103 @@ static struct bio *iblock_get_bio(
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
- if (!(bio)) {
- printk(KERN_ERR "Unable to allocate memory for bio\n");
+ if (!bio) {
+ pr_err("Unable to allocate memory for bio\n");
*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
return NULL;
}
- DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
- " %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
- DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+ pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
+ " %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
+ pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
bio->bi_bdev = ib_dev->ibd_bd;
- bio->bi_private = (void *) task;
+ bio->bi_private = task;
bio->bi_destructor = iblock_bio_destructor;
bio->bi_end_io = &iblock_bio_done;
bio->bi_sector = lba;
atomic_inc(&ib_req->ib_bio_cnt);
- DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
- DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
+ pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
+ pr_debug("Set ib_req->ib_bio_cnt: %d\n",
atomic_read(&ib_req->ib_bio_cnt));
return bio;
}
-static int iblock_map_task_SG(struct se_task *task)
+static int iblock_map_data_SG(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
struct scatterlist *sg;
int ret = 0;
- u32 i, sg_num = task->task_sg_num;
+ u32 i, sg_num = task->task_sg_nents;
sector_t block_lba;
/*
* Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
*/
- if (DEV_ATTRIB(dev)->block_size == 4096)
+ if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
block_lba = (task->task_lba << 3);
- else if (DEV_ATTRIB(dev)->block_size == 2048)
+ else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
block_lba = (task->task_lba << 2);
- else if (DEV_ATTRIB(dev)->block_size == 1024)
+ else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
block_lba = (task->task_lba << 1);
- else if (DEV_ATTRIB(dev)->block_size == 512)
+ else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
block_lba = task->task_lba;
else {
- printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
- " %u\n", DEV_ATTRIB(dev)->block_size);
+ pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
+ " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
- if (!(bio))
+ if (!bio)
return ret;
ib_req->ib_bio = bio;
hbio = tbio = bio;
/*
* Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
- * from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
+ * from task->task_sg -> struct scatterlist memory.
*/
- for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
- DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
" %p len: %u offset: %u\n", task, bio, sg_page(sg),
sg->length, sg->offset);
again:
ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
if (ret != sg->length) {
- DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
- bio->bi_sector);
- DEBUG_IBLOCK("** task->task_size: %u\n",
+ pr_debug("*** Set bio->bi_sector: %llu\n",
+ (unsigned long long)bio->bi_sector);
+ pr_debug("** task->task_size: %u\n",
task->task_size);
- DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
+ pr_debug("*** bio->bi_max_vecs: %u\n",
bio->bi_max_vecs);
- DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
+ pr_debug("*** bio->bi_vcnt: %u\n",
bio->bi_vcnt);
bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
block_lba, sg_num);
- if (!(bio))
+ if (!bio)
goto fail;
tbio = tbio->bi_next = bio;
- DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
+ pr_debug("-----------------> Added +1 bio: %p to"
" list, Going to again\n", bio);
goto again;
}
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sg_num--;
- DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
+ pr_debug("task: %p bio-add_page() passed!, decremented"
" sg_num to %u\n", task, sg_num);
- DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
- " to %llu\n", task, block_lba);
- DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
+ pr_debug("task: %p bio_add_page() passed!, increased lba"
+ " to %llu\n", task, (unsigned long long)block_lba);
+ pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
" %u\n", task, bio->bi_vcnt);
}
@@ -727,11 +710,11 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
- if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
+ if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
err = -EIO;
if (err != 0) {
- printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
+ pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
" err: %d\n", bio, err);
/*
* Bump the ib_bio_err_cnt and release bio.
@@ -742,15 +725,15 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Wait to complete the task until the last bio as completed.
*/
- if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+ if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
return;
ibr->ib_bio = NULL;
transport_complete_task(task, 0);
return;
}
- DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
- task, bio, task->task_lba, bio->bi_sector, err);
+ pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+ task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
/*
* bio_put() will call iblock_bio_destructor() to release the bio back
* to ibr->ib_bio_set.
@@ -759,7 +742,7 @@ static void iblock_bio_done(struct bio *bio, int err)
/*
* Wait to complete the task until the last bio as completed.
*/
- if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+ if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
return;
/*
* Return GOOD status for task if zero ib_bio_err_cnt exists.
@@ -772,7 +755,7 @@ static struct se_subsystem_api iblock_template = {
.name = "iblock",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
- .map_task_SG = iblock_map_task_SG,
+ .map_data_SG = iblock_map_data_SG,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
.allocate_virtdevice = iblock_allocate_virtdevice,
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 64c1f4d69f7..a121cd1b657 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -3,9 +3,6 @@
#define IBLOCK_VERSION "4.0"
-#define IBLOCK_HBA_QUEUE_DEPTH 512
-#define IBLOCK_DEVICE_QUEUE_DEPTH 32
-#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
#define IBLOCK_MAX_CDBS 16
#define IBLOCK_LBA_SHIFT 9
@@ -15,18 +12,12 @@ struct iblock_req {
atomic_t ib_bio_cnt;
atomic_t ib_bio_err_cnt;
struct bio *ib_bio;
- struct iblock_dev *ib_dev;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
-#define IBDF_HAS_FORCE 0x02
struct iblock_dev {
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
- int ibd_force;
- int ibd_major;
- int ibd_minor;
- u32 ibd_depth;
u32 ibd_flags;
struct bio_set *ibd_bio_set;
struct block_device *ibd_bd;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index b662db3a320..1c1b849cd4f 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -62,7 +62,7 @@ int core_pr_dump_initiator_port(
char *buf,
u32 size)
{
- if (!(pr_reg->isid_present_at_reg))
+ if (!pr_reg->isid_present_at_reg)
return 0;
snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
@@ -95,7 +95,7 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
struct se_session *sess = cmd->se_sess;
int ret;
- if (!(sess))
+ if (!sess)
return 0;
spin_lock(&dev->dev_reservation_lock);
@@ -105,13 +105,13 @@ static int core_scsi2_reservation_check(struct se_cmd *cmd, u32 *pr_reg_type)
}
if (dev->dev_reserved_node_acl != sess->se_node_acl) {
spin_unlock(&dev->dev_reservation_lock);
- return -1;
+ return -EINVAL;
}
if (!(dev->dev_flags & DF_SPC2_RESERVATIONS_WITH_ISID)) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
- ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -1;
+ ret = (dev->dev_res_bin_isid == sess->sess_bin_isid) ? 0 : -EINVAL;
spin_unlock(&dev->dev_reservation_lock);
return ret;
@@ -123,7 +123,7 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd)
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
- if (!(sess) || !(tpg))
+ if (!sess || !tpg)
return 0;
spin_lock(&dev->dev_reservation_lock);
@@ -142,9 +142,9 @@ static int core_scsi2_reservation_release(struct se_cmd *cmd)
dev->dev_res_bin_isid = 0;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
}
- printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
- " MAPPED LUN: %u for %s\n", TPG_TFO(tpg)->get_fabric_name(),
- SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+ pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
+ " MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -157,9 +157,9 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
- if ((T_TASK(cmd)->t_task_cdb[1] & 0x01) &&
- (T_TASK(cmd)->t_task_cdb[1] & 0x02)) {
- printk(KERN_ERR "LongIO and Obselete Bits set, returning"
+ if ((cmd->t_task_cdb[1] & 0x01) &&
+ (cmd->t_task_cdb[1] & 0x02)) {
+ pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
return PYX_TRANSPORT_ILLEGAL_REQUEST;
}
@@ -167,19 +167,19 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
* This is currently the case for target_core_mod passthrough struct se_cmd
* ops
*/
- if (!(sess) || !(tpg))
+ if (!sess || !tpg)
return 0;
spin_lock(&dev->dev_reservation_lock);
if (dev->dev_reserved_node_acl &&
(dev->dev_reserved_node_acl != sess->se_node_acl)) {
- printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
- TPG_TFO(tpg)->get_fabric_name());
- printk(KERN_ERR "Original reserver LUN: %u %s\n",
- SE_LUN(cmd)->unpacked_lun,
+ pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+ tpg->se_tpg_tfo->get_fabric_name());
+ pr_err("Original reserver LUN: %u %s\n",
+ cmd->se_lun->unpacked_lun,
dev->dev_reserved_node_acl->initiatorname);
- printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
- " from %s \n", SE_LUN(cmd)->unpacked_lun,
+ pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
+ " from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -192,9 +192,9 @@ static int core_scsi2_reservation_reserve(struct se_cmd *cmd)
dev->dev_res_bin_isid = sess->sess_bin_isid;
dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
}
- printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
- " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
- SE_LUN(cmd)->unpacked_lun, cmd->se_deve->mapped_lun,
+ pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+ " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -215,15 +215,15 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
struct se_session *se_sess = cmd->se_sess;
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
- struct t10_reservation_template *pr_tmpl = &su_dev->t10_reservation;
- unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
- int crh = (T10_RES(su_dev)->res_type == SPC3_PERSISTENT_RESERVATIONS);
+ struct t10_reservation *pr_tmpl = &su_dev->t10_pr;
+ unsigned char *cdb = &cmd->t_task_cdb[0];
+ int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
int conflict = 0;
- if (!(se_sess))
+ if (!se_sess)
return 0;
- if (!(crh))
+ if (!crh)
goto after_crh;
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -280,7 +280,7 @@ int core_scsi2_emulate_crh(struct se_cmd *cmd)
}
if (conflict) {
- printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
+ pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -307,7 +307,7 @@ static int core_scsi3_pr_seq_non_holder(
u32 pr_reg_type)
{
struct se_dev_entry *se_deve;
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
int other_cdb = 0, ignore_reg;
int registered_nexus = 0, ret = 1; /* Conflict by default */
int all_reg = 0, reg_only = 0; /* ALL_REG, REG_ONLY */
@@ -362,7 +362,7 @@ static int core_scsi3_pr_seq_non_holder(
registered_nexus = 1;
break;
default:
- return -1;
+ return -EINVAL;
}
/*
* Referenced from spc4r17 table 45 for *NON* PR holder access
@@ -412,9 +412,9 @@ static int core_scsi3_pr_seq_non_holder(
ret = (registered_nexus) ? 0 : 1;
break;
default:
- printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+ pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- return -1;
+ return -EINVAL;
}
break;
case RELEASE:
@@ -459,9 +459,9 @@ static int core_scsi3_pr_seq_non_holder(
ret = 0; /* Allowed */
break;
default:
- printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
+ pr_err("Unknown MI Service Action: 0x%02x\n",
(cdb[1] & 0x1f));
- return -1;
+ return -EINVAL;
}
break;
case ACCESS_CONTROL_IN:
@@ -481,9 +481,9 @@ static int core_scsi3_pr_seq_non_holder(
* Case where the CDB is explicitly allowed in the above switch
* statement.
*/
- if (!(ret) && !(other_cdb)) {
+ if (!ret && !other_cdb) {
#if 0
- printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
+ pr_debug("Allowing explict CDB: 0x%02x for %s"
" reservation holder\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
#endif
@@ -498,7 +498,7 @@ static int core_scsi3_pr_seq_non_holder(
/*
* Conflict for write exclusive
*/
- printk(KERN_INFO "%s Conflict for unregistered nexus"
+ pr_debug("%s Conflict for unregistered nexus"
" %s CDB: 0x%02x to %s reservation\n",
transport_dump_cmd_direction(cmd),
se_sess->se_node_acl->initiatorname, cdb[0],
@@ -515,8 +515,8 @@ static int core_scsi3_pr_seq_non_holder(
* nexuses to issue CDBs.
*/
#if 0
- if (!(registered_nexus)) {
- printk(KERN_INFO "Allowing implict CDB: 0x%02x"
+ if (!registered_nexus) {
+ pr_debug("Allowing implict CDB: 0x%02x"
" for %s reservation on unregistered"
" nexus\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
@@ -531,14 +531,14 @@ static int core_scsi3_pr_seq_non_holder(
* allow commands from registered nexuses.
*/
#if 0
- printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
+ pr_debug("Allowing implict CDB: 0x%02x for %s"
" reservation\n", cdb[0],
core_scsi3_pr_dump_type(pr_reg_type));
#endif
return 0;
}
}
- printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+ pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
" for %s reservation\n", transport_dump_cmd_direction(cmd),
(registered_nexus) ? "" : "un",
se_sess->se_node_acl->initiatorname, cdb[0],
@@ -549,7 +549,7 @@ static int core_scsi3_pr_seq_non_holder(
static u32 core_scsi3_pr_generation(struct se_device *dev)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
u32 prg;
/*
* PRGeneration field shall contain the value of a 32-bit wrapping
@@ -561,7 +561,7 @@ static u32 core_scsi3_pr_generation(struct se_device *dev)
* See spc4r17 section 6.3.12 READ_KEYS service action
*/
spin_lock(&dev->dev_reservation_lock);
- prg = T10_RES(su_dev)->pr_generation++;
+ prg = su_dev->t10_pr.pr_generation++;
spin_unlock(&dev->dev_reservation_lock);
return prg;
@@ -575,7 +575,7 @@ static int core_scsi3_pr_reservation_check(
struct se_session *sess = cmd->se_sess;
int ret;
- if (!(sess))
+ if (!sess)
return 0;
/*
* A legacy SPC-2 reservation is being held.
@@ -584,7 +584,7 @@ static int core_scsi3_pr_reservation_check(
return core_scsi2_reservation_check(cmd, pr_reg_type);
spin_lock(&dev->dev_reservation_lock);
- if (!(dev->dev_pr_res_holder)) {
+ if (!dev->dev_pr_res_holder) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
@@ -592,14 +592,14 @@ static int core_scsi3_pr_reservation_check(
cmd->pr_res_key = dev->dev_pr_res_holder->pr_res_key;
if (dev->dev_pr_res_holder->pr_reg_nacl != sess->se_node_acl) {
spin_unlock(&dev->dev_reservation_lock);
- return -1;
+ return -EINVAL;
}
- if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
+ if (!dev->dev_pr_res_holder->isid_present_at_reg) {
spin_unlock(&dev->dev_reservation_lock);
return 0;
}
ret = (dev->dev_pr_res_holder->pr_reg_bin_isid ==
- sess->sess_bin_isid) ? 0 : -1;
+ sess->sess_bin_isid) ? 0 : -EINVAL;
/*
* Use bit in *pr_reg_type to notify ISID mismatch in
* core_scsi3_pr_seq_non_holder().
@@ -620,19 +620,19 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
int all_tg_pt,
int aptpl)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
- if (!(pr_reg)) {
- printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+ if (!pr_reg) {
+ pr_err("Unable to allocate struct t10_pr_registration\n");
return NULL;
}
- pr_reg->pr_aptpl_buf = kzalloc(T10_RES(su_dev)->pr_aptpl_buf_len,
+ pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
GFP_ATOMIC);
- if (!(pr_reg->pr_aptpl_buf)) {
- printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
+ if (!pr_reg->pr_aptpl_buf) {
+ pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
kmem_cache_free(t10_pr_reg_cache, pr_reg);
return NULL;
}
@@ -692,12 +692,12 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
*/
pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
sa_res_key, all_tg_pt, aptpl);
- if (!(pr_reg))
+ if (!pr_reg)
return NULL;
/*
* Return pointer to pr_reg for ALL_TG_PT=0
*/
- if (!(all_tg_pt))
+ if (!all_tg_pt)
return pr_reg;
/*
* Create list of matching SCSI Initiator Port registrations
@@ -717,7 +717,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
* that have not been make explict via a ConfigFS
* MappedLUN group for the SCSI Initiator Node ACL.
*/
- if (!(deve_tmp->se_lun_acl))
+ if (!deve_tmp->se_lun_acl)
continue;
nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
@@ -751,7 +751,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
*/
ret = core_scsi3_lunacl_depend_item(deve_tmp);
if (ret < 0) {
- printk(KERN_ERR "core_scsi3_lunacl_depend"
+ pr_err("core_scsi3_lunacl_depend"
"_item() failed\n");
atomic_dec(&port->sep_tg_pt_ref_cnt);
smp_mb__after_atomic_dec();
@@ -769,7 +769,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
nacl_tmp, deve_tmp, NULL,
sa_res_key, all_tg_pt, aptpl);
- if (!(pr_reg_atp)) {
+ if (!pr_reg_atp) {
atomic_dec(&port->sep_tg_pt_ref_cnt);
smp_mb__after_atomic_dec();
atomic_dec(&deve_tmp->pr_ref_count);
@@ -803,7 +803,7 @@ out:
}
int core_scsi3_alloc_aptpl_registration(
- struct t10_reservation_template *pr_tmpl,
+ struct t10_reservation *pr_tmpl,
u64 sa_res_key,
unsigned char *i_port,
unsigned char *isid,
@@ -817,15 +817,15 @@ int core_scsi3_alloc_aptpl_registration(
{
struct t10_pr_registration *pr_reg;
- if (!(i_port) || !(t_port) || !(sa_res_key)) {
- printk(KERN_ERR "Illegal parameters for APTPL registration\n");
- return -1;
+ if (!i_port || !t_port || !sa_res_key) {
+ pr_err("Illegal parameters for APTPL registration\n");
+ return -EINVAL;
}
pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
- if (!(pr_reg)) {
- printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
- return -1;
+ if (!pr_reg) {
+ pr_err("Unable to allocate struct t10_pr_registration\n");
+ return -ENOMEM;
}
pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
@@ -869,7 +869,7 @@ int core_scsi3_alloc_aptpl_registration(
pr_reg->pr_res_holder = res_holder;
list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
- printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
+ pr_debug("SPC-3 PR APTPL Successfully added registration%s from"
" metadata\n", (res_holder) ? "+reservation" : "");
return 0;
}
@@ -891,13 +891,13 @@ static void core_scsi3_aptpl_reserve(
dev->dev_pr_res_holder = pr_reg;
spin_unlock(&dev->dev_reservation_lock);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+ pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
- TPG_TFO(tpg)->get_fabric_name(),
+ tpg->se_tpg_tfo->get_fabric_name(),
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
- TPG_TFO(tpg)->get_fabric_name(), node_acl->initiatorname,
+ pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
+ tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
}
@@ -913,7 +913,7 @@ static int __core_scsi3_check_aptpl_registration(
struct se_dev_entry *deve)
{
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
unsigned char i_port[PR_APTPL_MAX_IPORT_LEN];
unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
u16 tpgt;
@@ -925,8 +925,8 @@ static int __core_scsi3_check_aptpl_registration(
*/
snprintf(i_port, PR_APTPL_MAX_IPORT_LEN, "%s", nacl->initiatorname);
snprintf(t_port, PR_APTPL_MAX_TPORT_LEN, "%s",
- TPG_TFO(tpg)->tpg_get_wwn(tpg));
- tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg));
+ tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg);
/*
* Look for the matching registrations+reservation from those
* created from APTPL metadata. Note that multiple registrations
@@ -936,7 +936,7 @@ static int __core_scsi3_check_aptpl_registration(
spin_lock(&pr_tmpl->aptpl_reg_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
pr_reg_aptpl_list) {
- if (!(strcmp(pr_reg->pr_iport, i_port)) &&
+ if (!strcmp(pr_reg->pr_iport, i_port) &&
(pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
!(strcmp(pr_reg->pr_tport, t_port)) &&
(pr_reg->pr_reg_tpgt == tpgt) &&
@@ -980,11 +980,11 @@ int core_scsi3_check_aptpl_registration(
struct se_lun *lun,
struct se_lun_acl *lun_acl)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct se_node_acl *nacl = lun_acl->se_lun_nacl;
struct se_dev_entry *deve = &nacl->device_list[lun_acl->mapped_lun];
- if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
+ if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return __core_scsi3_check_aptpl_registration(dev, tpg, lun,
@@ -1006,19 +1006,19 @@ static void __core_scsi3_dump_registration(
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+ pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
" Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
"_AND_MOVE" : (register_type == 1) ?
"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
(prf_isid) ? i_buf : "");
- printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+ pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
tfo->tpg_get_tag(se_tpg));
- printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+ pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
" Port(s)\n", tfo->get_fabric_name(),
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
- TRANSPORT(dev)->name);
- printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+ dev->transport->name);
+ pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
" 0x%08x APTPL: %d\n", tfo->get_fabric_name(),
pr_reg->pr_res_key, pr_reg->pr_res_generation,
pr_reg->pr_reg_aptpl);
@@ -1035,10 +1035,10 @@ static void __core_scsi3_add_registration(
int register_type,
int register_move)
{
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct target_core_fabric_ops *tfo = nacl->se_tpg->se_tpg_tfo;
struct t10_pr_registration *pr_reg_tmp, *pr_reg_tmp_safe;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
/*
* Increment PRgeneration counter for struct se_device upon a successful
@@ -1050,7 +1050,7 @@ static void __core_scsi3_add_registration(
* for the REGISTER.
*/
pr_reg->pr_res_generation = (register_move) ?
- T10_RES(su_dev)->pr_generation++ :
+ su_dev->t10_pr.pr_generation++ :
core_scsi3_pr_generation(dev);
spin_lock(&pr_tmpl->registration_lock);
@@ -1062,7 +1062,7 @@ static void __core_scsi3_add_registration(
/*
* Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
*/
- if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
+ if (!pr_reg->pr_reg_all_tg_pt || register_move)
return;
/*
* Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
@@ -1106,8 +1106,8 @@ static int core_scsi3_alloc_registration(
pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
sa_res_key, all_tg_pt, aptpl);
- if (!(pr_reg))
- return -1;
+ if (!pr_reg)
+ return -EPERM;
__core_scsi3_add_registration(dev, nacl, pr_reg,
register_type, register_move);
@@ -1119,7 +1119,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
struct se_node_acl *nacl,
unsigned char *isid)
{
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
struct se_portal_group *tpg;
@@ -1137,14 +1137,14 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
* If this registration does NOT contain a fabric provided
* ISID, then we have found a match.
*/
- if (!(pr_reg->isid_present_at_reg)) {
+ if (!pr_reg->isid_present_at_reg) {
/*
* Determine if this SCSI device server requires that
* SCSI Intiatior TransportID w/ ISIDs is enforced
* for fabric modules (iSCSI) requiring them.
*/
- if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
- if (DEV_ATTRIB(dev)->enforce_pr_isids)
+ if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
+ if (dev->se_sub_dev->se_dev_attrib.enforce_pr_isids)
continue;
}
atomic_inc(&pr_reg->pr_res_holders);
@@ -1157,7 +1157,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
* SCSI Initiator Port TransportIDs, then we expect a valid
* matching ISID to be provided by the local SCSI Initiator Port.
*/
- if (!(isid))
+ if (!isid)
continue;
if (strcmp(isid, pr_reg->pr_reg_isid))
continue;
@@ -1180,9 +1180,9 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
struct se_portal_group *tpg = nacl->se_tpg;
unsigned char buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
- if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL) {
+ if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
memset(&buf[0], 0, PR_REG_ISID_LEN);
- TPG_TFO(tpg)->sess_get_initiator_sid(sess, &buf[0],
+ tpg->se_tpg_tfo->sess_get_initiator_sid(sess, &buf[0],
PR_REG_ISID_LEN);
isid_ptr = &buf[0];
}
@@ -1206,7 +1206,7 @@ static int core_scsi3_check_implict_release(
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if (!(pr_res_holder)) {
+ if (!pr_res_holder) {
spin_unlock(&dev->dev_reservation_lock);
return ret;
}
@@ -1236,11 +1236,11 @@ static int core_scsi3_check_implict_release(
(!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
pr_reg->pr_reg_nacl->initiatorname)) &&
(pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
- printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
+ pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1"
" UNREGISTER while existing reservation with matching"
" key 0x%016Lx is present from another SCSI Initiator"
" Port\n", pr_reg->pr_res_key);
- ret = -1;
+ ret = -EPERM;
}
spin_unlock(&dev->dev_reservation_lock);
@@ -1248,7 +1248,7 @@ static int core_scsi3_check_implict_release(
}
/*
- * Called with struct t10_reservation_template->registration_lock held.
+ * Called with struct t10_reservation->registration_lock held.
*/
static void __core_scsi3_free_registration(
struct se_device *dev,
@@ -1258,7 +1258,7 @@ static void __core_scsi3_free_registration(
{
struct target_core_fabric_ops *tfo =
pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
@@ -1283,25 +1283,25 @@ static void __core_scsi3_free_registration(
*/
while (atomic_read(&pr_reg->pr_res_holders) != 0) {
spin_unlock(&pr_tmpl->registration_lock);
- printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
+ pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
tfo->get_fabric_name());
cpu_relax();
spin_lock(&pr_tmpl->registration_lock);
}
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+ pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
" Node: %s%s\n", tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
- printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+ pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
" Port(s)\n", tfo->get_fabric_name(),
(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
- TRANSPORT(dev)->name);
- printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+ dev->transport->name);
+ pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
" 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
pr_reg->pr_res_generation);
- if (!(preempt_and_abort_list)) {
+ if (!preempt_and_abort_list) {
pr_reg->pr_reg_deve = NULL;
pr_reg->pr_reg_nacl = NULL;
kfree(pr_reg->pr_aptpl_buf);
@@ -1319,7 +1319,7 @@ void core_scsi3_free_pr_reg_from_nacl(
struct se_device *dev,
struct se_node_acl *nacl)
{
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
/*
* If the passed se_node_acl matches the reservation holder,
@@ -1349,7 +1349,7 @@ void core_scsi3_free_pr_reg_from_nacl(
void core_scsi3_free_all_registrations(
struct se_device *dev)
{
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
spin_lock(&dev->dev_reservation_lock);
@@ -1381,13 +1381,13 @@ void core_scsi3_free_all_registrations(
static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
{
- return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
&tpg->tpg_group.cg_item);
}
static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
{
- configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&tpg->tpg_group.cg_item);
atomic_dec(&tpg->tpg_pr_ref_count);
@@ -1401,7 +1401,7 @@ static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
if (nacl->dynamic_node_acl)
return 0;
- return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
&nacl->acl_group.cg_item);
}
@@ -1415,7 +1415,7 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
return;
}
- configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&nacl->acl_group.cg_item);
atomic_dec(&nacl->acl_pr_ref_count);
@@ -1430,13 +1430,13 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
/*
* For nacl->dynamic_node_acl=1
*/
- if (!(lun_acl))
+ if (!lun_acl)
return 0;
nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg;
- return configfs_depend_item(TPG_TFO(tpg)->tf_subsys,
+ return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
&lun_acl->se_lun_group.cg_item);
}
@@ -1448,7 +1448,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
/*
* For nacl->dynamic_node_acl=1
*/
- if (!(lun_acl)) {
+ if (!lun_acl) {
atomic_dec(&se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
return;
@@ -1456,7 +1456,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
nacl = lun_acl->se_lun_nacl;
tpg = nacl->se_tpg;
- configfs_undepend_item(TPG_TFO(tpg)->tf_subsys,
+ configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
&lun_acl->se_lun_group.cg_item);
atomic_dec(&se_deve->pr_ref_count);
@@ -1471,10 +1471,10 @@ static int core_scsi3_decode_spec_i_port(
int all_tg_pt,
int aptpl)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_port *tmp_port;
struct se_portal_group *dest_tpg = NULL, *tmp_tpg;
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
struct se_node_acl *dest_node_acl = NULL;
struct se_dev_entry *dest_se_deve = NULL, *local_se_deve;
struct t10_pr_registration *dest_pr_reg, *local_pr_reg, *pr_reg_e;
@@ -1482,7 +1482,7 @@ static int core_scsi3_decode_spec_i_port(
struct list_head tid_dest_list;
struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
struct target_core_fabric_ops *tmp_tf_ops;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tpdl, tid_len = 0;
@@ -1500,8 +1500,8 @@ static int core_scsi3_decode_spec_i_port(
* processing in the loop of tid_dest_list below.
*/
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
- if (!(tidh_new)) {
- printk(KERN_ERR "Unable to allocate tidh_new\n");
+ if (!tidh_new) {
+ pr_err("Unable to allocate tidh_new\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1509,10 +1509,10 @@ static int core_scsi3_decode_spec_i_port(
tidh_new->dest_node_acl = se_sess->se_node_acl;
tidh_new->dest_se_deve = local_se_deve;
- local_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+ local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
se_sess->se_node_acl, local_se_deve, l_isid,
sa_res_key, all_tg_pt, aptpl);
- if (!(local_pr_reg)) {
+ if (!local_pr_reg) {
kfree(tidh_new);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -1524,6 +1524,8 @@ static int core_scsi3_decode_spec_i_port(
*/
tidh_new->dest_local_nexus = 1;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
+
+ buf = transport_kmap_first_data_page(cmd);
/*
* For a PERSISTENT RESERVE OUT specify initiator ports payload,
* first extract TransportID Parameter Data Length, and make sure
@@ -1535,7 +1537,7 @@ static int core_scsi3_decode_spec_i_port(
tpdl |= buf[27] & 0xff;
if ((tpdl + 28) != cmd->data_length) {
- printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+ pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -1555,13 +1557,13 @@ static int core_scsi3_decode_spec_i_port(
spin_lock(&dev->se_port_lock);
list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
tmp_tpg = tmp_port->sep_tpg;
- if (!(tmp_tpg))
+ if (!tmp_tpg)
continue;
- tmp_tf_ops = TPG_TFO(tmp_tpg);
- if (!(tmp_tf_ops))
+ tmp_tf_ops = tmp_tpg->se_tpg_tfo;
+ if (!tmp_tf_ops)
continue;
- if (!(tmp_tf_ops->get_fabric_proto_ident) ||
- !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
+ if (!tmp_tf_ops->get_fabric_proto_ident ||
+ !tmp_tf_ops->tpg_parse_pr_out_transport_id)
continue;
/*
* Look for the matching proto_ident provided by
@@ -1575,7 +1577,7 @@ static int core_scsi3_decode_spec_i_port(
i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
tmp_tpg, (const char *)ptr, &tid_len,
&iport_ptr);
- if (!(i_str))
+ if (!i_str)
continue;
atomic_inc(&tmp_tpg->tpg_pr_ref_count);
@@ -1584,7 +1586,7 @@ static int core_scsi3_decode_spec_i_port(
ret = core_scsi3_tpg_depend_item(tmp_tpg);
if (ret != 0) {
- printk(KERN_ERR " core_scsi3_tpg_depend_item()"
+ pr_err(" core_scsi3_tpg_depend_item()"
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -1605,7 +1607,7 @@ static int core_scsi3_decode_spec_i_port(
}
spin_unlock_bh(&tmp_tpg->acl_node_lock);
- if (!(dest_node_acl)) {
+ if (!dest_node_acl) {
core_scsi3_tpg_undepend_item(tmp_tpg);
spin_lock(&dev->se_port_lock);
continue;
@@ -1613,7 +1615,7 @@ static int core_scsi3_decode_spec_i_port(
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
if (ret != 0) {
- printk(KERN_ERR "configfs_depend_item() failed"
+ pr_err("configfs_depend_item() failed"
" for dest_node_acl->acl_group\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -1623,9 +1625,9 @@ static int core_scsi3_decode_spec_i_port(
}
dest_tpg = tmp_tpg;
- printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
+ pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
" %s Port RTPI: %hu\n",
- TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, dest_rtpi);
spin_lock(&dev->se_port_lock);
@@ -1633,20 +1635,20 @@ static int core_scsi3_decode_spec_i_port(
}
spin_unlock(&dev->se_port_lock);
- if (!(dest_tpg)) {
- printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
+ if (!dest_tpg) {
+ pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
#if 0
- printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+ pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
" tid_len: %d for %s + %s\n",
- TPG_TFO(dest_tpg)->get_fabric_name(), cmd->data_length,
+ dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
tpdl, tid_len, i_str, iport_ptr);
#endif
if (tid_len > tpdl) {
- printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+ pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1660,10 +1662,10 @@ static int core_scsi3_decode_spec_i_port(
*/
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
dest_rtpi);
- if (!(dest_se_deve)) {
- printk(KERN_ERR "Unable to locate %s dest_se_deve"
+ if (!dest_se_deve) {
+ pr_err("Unable to locate %s dest_se_deve"
" from destination RTPI: %hu\n",
- TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_rtpi);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
@@ -1674,7 +1676,7 @@ static int core_scsi3_decode_spec_i_port(
ret = core_scsi3_lunacl_depend_item(dest_se_deve);
if (ret < 0) {
- printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
+ pr_err("core_scsi3_lunacl_depend_item()"
" failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
@@ -1684,9 +1686,9 @@ static int core_scsi3_decode_spec_i_port(
goto out;
}
#if 0
- printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+ pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
" dest_se_deve mapped_lun: %u\n",
- TPG_TFO(dest_tpg)->get_fabric_name(),
+ dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
#endif
/*
@@ -1712,8 +1714,8 @@ static int core_scsi3_decode_spec_i_port(
*/
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
GFP_KERNEL);
- if (!(tidh_new)) {
- printk(KERN_ERR "Unable to allocate tidh_new\n");
+ if (!tidh_new) {
+ pr_err("Unable to allocate tidh_new\n");
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1741,10 +1743,10 @@ static int core_scsi3_decode_spec_i_port(
* and then call __core_scsi3_add_registration() in the
* 2nd loop which will never fail.
*/
- dest_pr_reg = __core_scsi3_alloc_registration(SE_DEV(cmd),
+ dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_se_deve, iport_ptr,
sa_res_key, all_tg_pt, aptpl);
- if (!(dest_pr_reg)) {
+ if (!dest_pr_reg) {
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1760,6 +1762,9 @@ static int core_scsi3_decode_spec_i_port(
tid_len = 0;
}
+
+ transport_kunmap_first_data_page(cmd);
+
/*
* Go ahead and create a registrations from tid_dest_list for the
* SPEC_I_PT provided TransportID for the *tidh referenced dest_node_acl
@@ -1787,12 +1792,12 @@ static int core_scsi3_decode_spec_i_port(
prf_isid = core_pr_dump_initiator_port(dest_pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- __core_scsi3_add_registration(SE_DEV(cmd), dest_node_acl,
+ __core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
dest_pr_reg, 0, 0);
- printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
+ pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
" registered Transport ID for Node: %s%s Mapped LUN:"
- " %u\n", TPG_TFO(dest_tpg)->get_fabric_name(),
+ " %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
dest_node_acl->initiatorname, (prf_isid) ?
&i_buf[0] : "", dest_se_deve->mapped_lun);
@@ -1806,6 +1811,7 @@ static int core_scsi3_decode_spec_i_port(
return 0;
out:
+ transport_kunmap_first_data_page(cmd);
/*
* For the failure case, release everything from tid_dest_list
* including *dest_pr_reg and the configfs dependances..
@@ -1855,7 +1861,7 @@ static int __core_scsi3_update_aptpl_buf(
{
struct se_lun *lun;
struct se_portal_group *tpg;
- struct se_subsystem_dev *su_dev = SU_DEV(dev);
+ struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
unsigned char tmp[512], isid_buf[32];
ssize_t len = 0;
@@ -1873,8 +1879,8 @@ static int __core_scsi3_update_aptpl_buf(
/*
* Walk the registration list..
*/
- spin_lock(&T10_RES(su_dev)->registration_lock);
- list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ spin_lock(&su_dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
pr_reg_list) {
tmp[0] = '\0';
@@ -1900,7 +1906,7 @@ static int __core_scsi3_update_aptpl_buf(
"res_holder=1\nres_type=%02x\n"
"res_scope=%02x\nres_all_tg_pt=%d\n"
"mapped_lun=%u\n", reg_count,
- TPG_TFO(tpg)->get_fabric_name(),
+ tpg->se_tpg_tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_res_type,
pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt,
@@ -1910,17 +1916,17 @@ static int __core_scsi3_update_aptpl_buf(
"initiator_fabric=%s\ninitiator_node=%s\n%s"
"sa_res_key=%llu\nres_holder=0\n"
"res_all_tg_pt=%d\nmapped_lun=%u\n",
- reg_count, TPG_TFO(tpg)->get_fabric_name(),
+ reg_count, tpg->se_tpg_tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname, isid_buf,
pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt,
pr_reg->pr_res_mapped_lun);
}
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
- printk(KERN_ERR "Unable to update renaming"
+ pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&T10_RES(su_dev)->registration_lock);
- return -1;
+ spin_unlock(&su_dev->t10_pr.registration_lock);
+ return -EMSGSIZE;
}
len += sprintf(buf+len, "%s", tmp);
@@ -1929,23 +1935,23 @@ static int __core_scsi3_update_aptpl_buf(
*/
snprintf(tmp, 512, "target_fabric=%s\ntarget_node=%s\n"
"tpgt=%hu\nport_rtpi=%hu\ntarget_lun=%u\nPR_REG_END:"
- " %d\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
- TPG_TFO(tpg)->tpg_get_tag(tpg),
+ " %d\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg),
lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
- printk(KERN_ERR "Unable to update renaming"
+ pr_err("Unable to update renaming"
" APTPL metadata\n");
- spin_unlock(&T10_RES(su_dev)->registration_lock);
- return -1;
+ spin_unlock(&su_dev->t10_pr.registration_lock);
+ return -EMSGSIZE;
}
len += sprintf(buf+len, "%s", tmp);
reg_count++;
}
- spin_unlock(&T10_RES(su_dev)->registration_lock);
+ spin_unlock(&su_dev->t10_pr.registration_lock);
- if (!(reg_count))
+ if (!reg_count)
len += sprintf(buf+len, "No Registrations or Reservations");
return 0;
@@ -1975,7 +1981,7 @@ static int __core_scsi3_write_aptpl_to_file(
unsigned char *buf,
u32 pr_aptpl_buf_len)
{
- struct t10_wwn *wwn = &SU_DEV(dev)->t10_wwn;
+ struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
struct file *file;
struct iovec iov[1];
mm_segment_t old_fs;
@@ -1987,21 +1993,21 @@ static int __core_scsi3_write_aptpl_to_file(
memset(path, 0, 512);
if (strlen(&wwn->unit_serial[0]) >= 512) {
- printk(KERN_ERR "WWN value for struct se_device does not fit"
+ pr_err("WWN value for struct se_device does not fit"
" into path buffer\n");
- return -1;
+ return -EMSGSIZE;
}
snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file) || !file || !file->f_dentry) {
- printk(KERN_ERR "filp_open(%s) for APTPL metadata"
+ pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
- return -1;
+ return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
}
iov[0].iov_base = &buf[0];
- if (!(pr_aptpl_buf_len))
+ if (!pr_aptpl_buf_len)
iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
else
iov[0].iov_len = pr_aptpl_buf_len;
@@ -2012,9 +2018,9 @@ static int __core_scsi3_write_aptpl_to_file(
set_fs(old_fs);
if (ret < 0) {
- printk("Error writing APTPL metadata file: %s\n", path);
+ pr_debug("Error writing APTPL metadata file: %s\n", path);
filp_close(file, NULL);
- return -1;
+ return -EIO;
}
filp_close(file, NULL);
@@ -2032,7 +2038,7 @@ static int core_scsi3_update_and_write_aptpl(
/*
* Can be called with a NULL pointer from PROUT service action CLEAR
*/
- if (!(in_buf)) {
+ if (!in_buf) {
memset(null_buf, 0, 64);
buf = &null_buf[0];
/*
@@ -2049,14 +2055,14 @@ static int core_scsi3_update_and_write_aptpl(
ret = core_scsi3_update_aptpl_buf(dev, buf, pr_aptpl_buf_len,
clear_aptpl_metadata);
if (ret != 0)
- return -1;
+ return ret;
/*
* __core_scsi3_write_aptpl_to_file() will call strlen()
* on the passed buf to determine pr_aptpl_buf_len.
*/
ret = __core_scsi3_write_aptpl_to_file(dev, buf, 0);
if (ret != 0)
- return -1;
+ return ret;
return ret;
}
@@ -2070,28 +2076,28 @@ static int core_scsi3_emulate_pro_register(
int spec_i_pt,
int ignore_key)
{
- struct se_session *se_sess = SE_SESS(cmd);
- struct se_device *dev = SE_DEV(cmd);
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve;
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_reg_tmp, *pr_reg_e;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
/* Used for APTPL metadata w/ UNREGISTER */
unsigned char *pr_aptpl_buf = NULL;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
int pr_holder = 0, ret = 0, type;
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
- if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+ if (se_tpg->se_tpg_tfo->sess_get_initiator_sid) {
memset(&isid_buf[0], 0, PR_REG_ISID_LEN);
- TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess, &isid_buf[0],
+ se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &isid_buf[0],
PR_REG_ISID_LEN);
isid_ptr = &isid_buf[0];
}
@@ -2099,30 +2105,30 @@ static int core_scsi3_emulate_pro_register(
* Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
*/
pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
- if (!(pr_reg_e)) {
+ if (!pr_reg_e) {
if (res_key) {
- printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
+ pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
}
/*
* Do nothing but return GOOD status.
*/
- if (!(sa_res_key))
+ if (!sa_res_key)
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
- if (!(spec_i_pt)) {
+ if (!spec_i_pt) {
/*
* Perform the Service Action REGISTER on the Initiator
* Port Endpoint that the PRO was received from on the
* Logical Unit of the SCSI device server.
*/
- ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+ ret = core_scsi3_alloc_registration(cmd->se_dev,
se_sess->se_node_acl, se_deve, isid_ptr,
sa_res_key, all_tg_pt, aptpl,
ignore_key, 0);
if (ret != 0) {
- printk(KERN_ERR "Unable to allocate"
+ pr_err("Unable to allocate"
" struct t10_pr_registration\n");
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -2143,10 +2149,10 @@ static int core_scsi3_emulate_pro_register(
/*
* Nothing left to do for the APTPL=0 case.
*/
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
- printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
" REGISTER\n");
return 0;
}
@@ -2155,15 +2161,15 @@ static int core_scsi3_emulate_pro_register(
* update the APTPL metadata information using its
* preallocated *pr_reg->pr_aptpl_buf.
*/
- pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev,
se_sess->se_node_acl, se_sess);
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret)) {
+ if (!ret) {
pr_tmpl->pr_aptpl_active = 1;
- printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
}
core_scsi3_put_pr_reg(pr_reg);
@@ -2175,9 +2181,9 @@ static int core_scsi3_emulate_pro_register(
pr_reg = pr_reg_e;
type = pr_reg->pr_res_type;
- if (!(ignore_key)) {
+ if (!ignore_key) {
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+ pr_err("SPC-3 PR REGISTER: Received"
" res_key: 0x%016Lx does not match"
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key,
@@ -2187,7 +2193,7 @@ static int core_scsi3_emulate_pro_register(
}
}
if (spec_i_pt) {
- printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
+ pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -2197,7 +2203,7 @@ static int core_scsi3_emulate_pro_register(
* must also set ALL_TG_PT=1 in the incoming PROUT.
*/
if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
- printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+ pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
@@ -2209,8 +2215,8 @@ static int core_scsi3_emulate_pro_register(
if (aptpl) {
pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
GFP_KERNEL);
- if (!(pr_aptpl_buf)) {
- printk(KERN_ERR "Unable to allocate"
+ if (!pr_aptpl_buf) {
+ pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -2221,9 +2227,9 @@ static int core_scsi3_emulate_pro_register(
* Nexus sa_res_key=1 Change Reservation Key for registered I_T
* Nexus.
*/
- if (!(sa_res_key)) {
+ if (!sa_res_key) {
pr_holder = core_scsi3_check_implict_release(
- SE_DEV(cmd), pr_reg);
+ cmd->se_dev, pr_reg);
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
@@ -2240,7 +2246,7 @@ static int core_scsi3_emulate_pro_register(
&pr_tmpl->registration_list,
pr_reg_list) {
- if (!(pr_reg_p->pr_reg_all_tg_pt))
+ if (!pr_reg_p->pr_reg_all_tg_pt)
continue;
if (pr_reg_p->pr_res_key != res_key)
@@ -2260,7 +2266,7 @@ static int core_scsi3_emulate_pro_register(
/*
* Release the calling I_T Nexus registration now..
*/
- __core_scsi3_free_registration(SE_DEV(cmd), pr_reg,
+ __core_scsi3_free_registration(cmd->se_dev, pr_reg,
NULL, 1);
/*
* From spc4r17, section 5.7.11.3 Unregistering
@@ -2289,10 +2295,10 @@ static int core_scsi3_emulate_pro_register(
}
spin_unlock(&pr_tmpl->registration_lock);
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
core_scsi3_update_and_write_aptpl(dev, NULL, 0);
- printk("SPC-3 PR: Set APTPL Bit Deactivated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
" for UNREGISTER\n");
return 0;
}
@@ -2300,9 +2306,9 @@ static int core_scsi3_emulate_pro_register(
ret = core_scsi3_update_and_write_aptpl(dev,
&pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret)) {
+ if (!ret) {
pr_tmpl->pr_aptpl_active = 1;
- printk("SPC-3 PR: Set APTPL Bit Activated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated"
" for UNREGISTER\n");
}
@@ -2315,20 +2321,20 @@ static int core_scsi3_emulate_pro_register(
* READ_KEYS service action.
*/
pr_reg->pr_res_generation = core_scsi3_pr_generation(
- SE_DEV(cmd));
+ cmd->se_dev);
pr_reg->pr_res_key = sa_res_key;
- printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+ pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
" Key for %s to: 0x%016Lx PRgeneration:"
- " 0x%08x\n", CMD_TFO(cmd)->get_fabric_name(),
+ " 0x%08x\n", cmd->se_tfo->get_fabric_name(),
(ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
pr_reg->pr_reg_nacl->initiatorname,
pr_reg->pr_res_key, pr_reg->pr_res_generation);
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
core_scsi3_update_and_write_aptpl(dev, NULL, 0);
core_scsi3_put_pr_reg(pr_reg);
- printk("SPC-3 PR: Set APTPL Bit Deactivated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
" for REGISTER\n");
return 0;
}
@@ -2336,9 +2342,9 @@ static int core_scsi3_emulate_pro_register(
ret = core_scsi3_update_and_write_aptpl(dev,
&pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret)) {
+ if (!ret) {
pr_tmpl->pr_aptpl_active = 1;
- printk("SPC-3 PR: Set APTPL Bit Activated"
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated"
" for REGISTER\n");
}
@@ -2378,19 +2384,19 @@ static int core_scsi3_pro_reserve(
int scope,
u64 res_key)
{
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
struct se_dev_entry *se_deve;
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_lun *se_lun = cmd->se_lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_res_holder;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
char i_buf[PR_REG_ISID_ID_LEN];
int ret, prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
se_tpg = se_sess->se_tpg;
@@ -2398,10 +2404,10 @@ static int core_scsi3_pro_reserve(
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
- pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
- if (!(pr_reg)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2415,7 +2421,7 @@ static int core_scsi3_pro_reserve(
* registered with the logical unit for the I_T nexus; and
*/
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+ pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
@@ -2432,7 +2438,7 @@ static int core_scsi3_pro_reserve(
* and that persistent reservation has a scope of LU_SCOPE.
*/
if (scope != PR_SCOPE_LU_SCOPE) {
- printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+ pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -2456,12 +2462,12 @@ static int core_scsi3_pro_reserve(
*/
if (pr_res_holder != pr_reg) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
- printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+ pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s while reservation already held by"
" [%s]: %s, returning RESERVATION_CONFLICT\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
- TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2478,13 +2484,13 @@ static int core_scsi3_pro_reserve(
if ((pr_res_holder->pr_res_type != type) ||
(pr_res_holder->pr_res_scope != scope)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
- printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+ pr_err("SPC-3 PR: Attempted RESERVE from"
" [%s]: %s trying to change TYPE and/or SCOPE,"
" while reservation already held by [%s]: %s,"
" returning RESERVATION_CONFLICT\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
- TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2516,22 +2522,22 @@ static int core_scsi3_pro_reserve(
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
+ pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
- CMD_TFO(cmd)->get_fabric_name(), core_scsi3_pr_dump_type(type),
+ cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL metadata"
" for RESERVE\n");
}
@@ -2558,7 +2564,7 @@ static int core_scsi3_emulate_pro_reserve(
ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
break;
default:
- printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
+ pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -2587,12 +2593,12 @@ static void __core_scsi3_complete_pro_release(
*/
dev->dev_pr_res_holder = NULL;
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+ pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
tfo->get_fabric_name(), (explict) ? "explict" : "implict",
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
+ pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
tfo->get_fabric_name(), se_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "");
/*
@@ -2608,22 +2614,22 @@ static int core_scsi3_emulate_pro_release(
u64 res_key)
{
struct se_device *dev = cmd->se_dev;
- struct se_session *se_sess = SE_SESS(cmd);
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_lun *se_lun = cmd->se_lun;
struct t10_pr_registration *pr_reg, *pr_reg_p, *pr_res_holder;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
int ret, all_reg = 0;
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
- if (!(pr_reg)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2641,7 +2647,7 @@ static int core_scsi3_emulate_pro_release(
*/
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if (!(pr_res_holder)) {
+ if (!pr_res_holder) {
/*
* No persistent reservation, return GOOD status.
*/
@@ -2678,7 +2684,7 @@ static int core_scsi3_emulate_pro_release(
* that is registered with the logical unit for the I_T nexus;
*/
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+ pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
@@ -2694,13 +2700,13 @@ static int core_scsi3_emulate_pro_release(
if ((pr_res_holder->pr_res_type != type) ||
(pr_res_holder->pr_res_scope != scope)) {
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
- printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
+ pr_err("SPC-3 PR RELEASE: Attempted to release"
" reservation from [%s]: %s with different TYPE "
"and/or SCOPE while reservation already held by"
" [%s]: %s, returning RESERVATION_CONFLICT\n",
- CMD_TFO(cmd)->get_fabric_name(),
+ cmd->se_tfo->get_fabric_name(),
se_sess->se_node_acl->initiatorname,
- TPG_TFO(pr_res_nacl->se_tpg)->get_fabric_name(),
+ pr_res_nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
pr_res_holder->pr_reg_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
@@ -2758,11 +2764,11 @@ static int core_scsi3_emulate_pro_release(
write_aptpl:
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
}
core_scsi3_put_pr_reg(pr_reg);
@@ -2775,18 +2781,18 @@ static int core_scsi3_emulate_pro_clear(
{
struct se_device *dev = cmd->se_dev;
struct se_node_acl *pr_reg_nacl;
- struct se_session *se_sess = SE_SESS(cmd);
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct se_session *se_sess = cmd->se_sess;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
u32 pr_res_mapped_lun = 0;
int calling_it_nexus = 0;
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
- pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd),
+ pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev,
se_sess->se_node_acl, se_sess);
- if (!(pr_reg_n)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg_n) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -2802,7 +2808,7 @@ static int core_scsi3_emulate_pro_clear(
* that is registered with the logical unit for the I_T nexus.
*/
if (res_key != pr_reg_n->pr_res_key) {
- printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+ pr_err("SPC-3 PR REGISTER: Received"
" res_key: 0x%016Lx does not match"
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
@@ -2839,18 +2845,18 @@ static int core_scsi3_emulate_pro_clear(
* command with CLEAR service action was received, with the
* additional sense code set to RESERVATIONS PREEMPTED.
*/
- if (!(calling_it_nexus))
+ if (!calling_it_nexus)
core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
}
spin_unlock(&pr_tmpl->registration_lock);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
- CMD_TFO(cmd)->get_fabric_name());
+ pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
+ cmd->se_tfo->get_fabric_name());
if (pr_tmpl->pr_aptpl_active) {
- core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
- printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+ pr_debug("SPC-3 PR: Updated APTPL metadata"
" for CLEAR\n");
}
@@ -2889,12 +2895,12 @@ static void __core_scsi3_complete_pro_preempt(
pr_reg->pr_res_type = type;
pr_reg->pr_res_scope = scope;
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+ pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
core_scsi3_pr_dump_type(type),
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
- printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+ pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
/*
@@ -2920,7 +2926,7 @@ static void core_scsi3_release_preempt_and_abort(
if (pr_reg_holder == pr_reg)
continue;
if (pr_reg->pr_res_holder) {
- printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
+ pr_warn("pr_reg->pr_res_holder still set\n");
continue;
}
@@ -2954,25 +2960,25 @@ static int core_scsi3_pro_preempt(
u64 sa_res_key,
int abort)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve;
struct se_node_acl *pr_reg_nacl;
- struct se_session *se_sess = SE_SESS(cmd);
+ struct se_session *se_sess = cmd->se_sess;
struct list_head preempt_and_abort_list;
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
u32 pr_res_mapped_lun = 0;
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
- if (!(se_sess))
+ if (!se_sess)
return PYX_TRANSPORT_LU_COMM_FAILURE;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
- pr_reg_n = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
- if (!(pr_reg_n)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate"
+ if (!pr_reg_n) {
+ pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -2982,7 +2988,7 @@ static int core_scsi3_pro_preempt(
return PYX_TRANSPORT_RESERVATION_CONFLICT;
}
if (scope != PR_SCOPE_LU_SCOPE) {
- printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+ pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -2995,7 +3001,7 @@ static int core_scsi3_pro_preempt(
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
all_reg = 1;
- if (!(all_reg) && !(sa_res_key)) {
+ if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3009,7 +3015,7 @@ static int core_scsi3_pro_preempt(
* server shall perform a preempt by doing the following in an
* uninterrupted series of actions. (See below..)
*/
- if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
+ if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) {
/*
* No existing or SA Reservation Key matching reservations..
*
@@ -3036,7 +3042,7 @@ static int core_scsi3_pro_preempt(
* was received, with the additional sense code set
* to REGISTRATIONS PREEMPTED.
*/
- if (!(all_reg)) {
+ if (!all_reg) {
if (pr_reg->pr_res_key != sa_res_key)
continue;
@@ -3076,7 +3082,7 @@ static int core_scsi3_pro_preempt(
NULL, 0);
released_regs++;
}
- if (!(calling_it_nexus))
+ if (!calling_it_nexus)
core_scsi3_ua_allocate(pr_reg_nacl,
pr_res_mapped_lun, 0x2A,
ASCQ_2AH_RESERVATIONS_PREEMPTED);
@@ -3089,7 +3095,7 @@ static int core_scsi3_pro_preempt(
* registered reservation key, then the device server shall
* complete the command with RESERVATION CONFLICT status.
*/
- if (!(released_regs)) {
+ if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -3111,17 +3117,17 @@ static int core_scsi3_pro_preempt(
spin_unlock(&dev->dev_reservation_lock);
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk(KERN_INFO "SPC-3 PR: Updated APTPL"
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL"
" metadata for PREEMPT%s\n", (abort) ?
"_AND_ABORT" : "");
}
core_scsi3_put_pr_reg(pr_reg_n);
- core_scsi3_pr_generation(SE_DEV(cmd));
+ core_scsi3_pr_generation(cmd->se_dev);
return 0;
}
/*
@@ -3247,16 +3253,16 @@ static int core_scsi3_pro_preempt(
}
if (pr_tmpl->pr_aptpl_active) {
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&pr_reg_n->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
+ if (!ret)
+ pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
"%s\n", (abort) ? "_AND_ABORT" : "");
}
core_scsi3_put_pr_reg(pr_reg_n);
- core_scsi3_pr_generation(SE_DEV(cmd));
+ core_scsi3_pr_generation(cmd->se_dev);
return 0;
}
@@ -3281,7 +3287,7 @@ static int core_scsi3_emulate_pro_preempt(
res_key, sa_res_key, abort);
break;
default:
- printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
+ pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -3297,17 +3303,17 @@ static int core_scsi3_emulate_pro_register_and_move(
int aptpl,
int unreg)
{
- struct se_session *se_sess = SE_SESS(cmd);
- struct se_device *dev = SE_DEV(cmd);
+ struct se_session *se_sess = cmd->se_sess;
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *se_deve, *dest_se_deve = NULL;
- struct se_lun *se_lun = SE_LUN(cmd);
+ struct se_lun *se_lun = cmd->se_lun;
struct se_node_acl *pr_res_nacl, *pr_reg_nacl, *dest_node_acl = NULL;
struct se_port *se_port;
struct se_portal_group *se_tpg, *dest_se_tpg = NULL;
struct target_core_fabric_ops *dest_tf_ops = NULL, *tf_ops;
struct t10_pr_registration *pr_reg, *pr_res_holder, *dest_pr_reg;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ unsigned char *buf;
unsigned char *initiator_str;
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
u32 tid_len, tmp_tid_len;
@@ -3315,14 +3321,14 @@ static int core_scsi3_emulate_pro_register_and_move(
unsigned short rtpi;
unsigned char proto_ident;
- if (!(se_sess) || !(se_lun)) {
- printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+ if (!se_sess || !se_lun) {
+ pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
se_tpg = se_sess->se_tpg;
- tf_ops = TPG_TFO(se_tpg);
+ tf_ops = se_tpg->se_tpg_tfo;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
/*
* Follow logic from spc4r17 Section 5.7.8, Table 50 --
@@ -3330,10 +3336,10 @@ static int core_scsi3_emulate_pro_register_and_move(
*
* Locate the existing *pr_reg via struct se_node_acl pointers
*/
- pr_reg = core_scsi3_locate_pr_reg(SE_DEV(cmd), se_sess->se_node_acl,
+ pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
- if (!(pr_reg)) {
- printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
+ if (!pr_reg) {
+ pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -3342,7 +3348,7 @@ static int core_scsi3_emulate_pro_register_and_move(
* provided during this initiator's I_T nexus registration.
*/
if (res_key != pr_reg->pr_res_key) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
@@ -3351,26 +3357,30 @@ static int core_scsi3_emulate_pro_register_and_move(
/*
* The service active reservation key needs to be non zero
*/
- if (!(sa_res_key)) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
+ if (!sa_res_key) {
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
+
/*
* Determine the Relative Target Port Identifier where the reservation
* will be moved to for the TransportID containing SCSI initiator WWN
* information.
*/
+ buf = transport_kmap_first_data_page(cmd);
rtpi = (buf[18] & 0xff) << 8;
rtpi |= buf[19] & 0xff;
tid_len = (buf[20] & 0xff) << 24;
tid_len |= (buf[21] & 0xff) << 16;
tid_len |= (buf[22] & 0xff) << 8;
tid_len |= buf[23] & 0xff;
+ transport_kunmap_first_data_page(cmd);
+ buf = NULL;
if ((tid_len + 24) != cmd->data_length) {
- printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+ pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
@@ -3382,10 +3392,10 @@ static int core_scsi3_emulate_pro_register_and_move(
if (se_port->sep_rtpi != rtpi)
continue;
dest_se_tpg = se_port->sep_tpg;
- if (!(dest_se_tpg))
+ if (!dest_se_tpg)
continue;
- dest_tf_ops = TPG_TFO(dest_se_tpg);
- if (!(dest_tf_ops))
+ dest_tf_ops = dest_se_tpg->se_tpg_tfo;
+ if (!dest_tf_ops)
continue;
atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
@@ -3394,7 +3404,7 @@ static int core_scsi3_emulate_pro_register_and_move(
ret = core_scsi3_tpg_depend_item(dest_se_tpg);
if (ret != 0) {
- printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
+ pr_err("core_scsi3_tpg_depend_item() failed"
" for dest_se_tpg\n");
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -3407,20 +3417,22 @@ static int core_scsi3_emulate_pro_register_and_move(
}
spin_unlock(&dev->se_port_lock);
- if (!(dest_se_tpg) || (!dest_tf_ops)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ if (!dest_se_tpg || !dest_tf_ops) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
+
+ buf = transport_kmap_first_data_page(cmd);
proto_ident = (buf[24] & 0x0f);
#if 0
- printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+ pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
" 0x%02x\n", proto_ident);
#endif
if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
" proto_ident: 0x%02x does not match ident: 0x%02x"
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
@@ -3429,7 +3441,7 @@ static int core_scsi3_emulate_pro_register_and_move(
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
ret = PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -3437,14 +3449,17 @@ static int core_scsi3_emulate_pro_register_and_move(
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
(const char *)&buf[24], &tmp_tid_len, &iport_ptr);
- if (!(initiator_str)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+ if (!initiator_str) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
- printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+ transport_kunmap_first_data_page(cmd);
+ buf = NULL;
+
+ pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
" %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
"port" : "device", initiator_str, (iport_ptr != NULL) ?
iport_ptr : "");
@@ -3459,18 +3474,18 @@ static int core_scsi3_emulate_pro_register_and_move(
pr_reg_nacl = pr_reg->pr_reg_nacl;
matching_iname = (!strcmp(initiator_str,
pr_reg_nacl->initiatorname)) ? 1 : 0;
- if (!(matching_iname))
+ if (!matching_iname)
goto after_iport_check;
- if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+ if (!iport_ptr || !pr_reg->isid_present_at_reg) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
}
- if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
- printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+ if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
+ pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
@@ -3490,8 +3505,8 @@ after_iport_check:
}
spin_unlock_bh(&dest_se_tpg->acl_node_lock);
- if (!(dest_node_acl)) {
- printk(KERN_ERR "Unable to locate %s dest_node_acl for"
+ if (!dest_node_acl) {
+ pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3499,7 +3514,7 @@ after_iport_check:
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
if (ret != 0) {
- printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
+ pr_err("core_scsi3_nodeacl_depend_item() for"
" dest_node_acl\n");
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
@@ -3508,7 +3523,7 @@ after_iport_check:
goto out;
}
#if 0
- printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+ pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
dest_node_acl->initiatorname);
#endif
@@ -3517,8 +3532,8 @@ after_iport_check:
* PORT IDENTIFIER.
*/
dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
- if (!(dest_se_deve)) {
- printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
+ if (!dest_se_deve) {
+ pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
goto out;
@@ -3526,7 +3541,7 @@ after_iport_check:
ret = core_scsi3_lunacl_depend_item(dest_se_deve);
if (ret < 0) {
- printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
+ pr_err("core_scsi3_lunacl_depend_item() failed\n");
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
@@ -3534,7 +3549,7 @@ after_iport_check:
goto out;
}
#if 0
- printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+ pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
" ACL for dest_se_deve->mapped_lun: %u\n",
dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
dest_se_deve->mapped_lun);
@@ -3545,8 +3560,8 @@ after_iport_check:
*/
spin_lock(&dev->dev_reservation_lock);
pr_res_holder = dev->dev_pr_res_holder;
- if (!(pr_res_holder)) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
+ if (!pr_res_holder) {
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
@@ -3559,7 +3574,7 @@ after_iport_check:
* Register behaviors for a REGISTER AND MOVE service action
*/
if (pr_res_holder != pr_reg) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -3576,7 +3591,7 @@ after_iport_check:
*/
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
- printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+ pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move"
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
@@ -3611,8 +3626,8 @@ after_iport_check:
*/
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
iport_ptr);
- if (!(dest_pr_reg)) {
- ret = core_scsi3_alloc_registration(SE_DEV(cmd),
+ if (!dest_pr_reg) {
+ ret = core_scsi3_alloc_registration(cmd->se_dev,
dest_node_acl, dest_se_deve, iport_ptr,
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
@@ -3644,16 +3659,16 @@ after_iport_check:
/*
* Increment PRGeneration for existing registrations..
*/
- if (!(new_reg))
+ if (!new_reg)
dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
spin_unlock(&dev->dev_reservation_lock);
- printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+ pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
" created new reservation holder TYPE: %s on object RTPI:"
" %hu PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
core_scsi3_pr_dump_type(type), rtpi,
dest_pr_reg->pr_res_generation);
- printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
+ pr_debug("SPC-3 PR Successfully moved reservation from"
" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
(prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
@@ -3681,24 +3696,28 @@ after_iport_check:
* Clear the APTPL metadata if APTPL has been disabled, otherwise
* write out the updated metadata to struct file for this SCSI device.
*/
- if (!(aptpl)) {
+ if (!aptpl) {
pr_tmpl->pr_aptpl_active = 0;
- core_scsi3_update_and_write_aptpl(SE_DEV(cmd), NULL, 0);
- printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+ core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
+ pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
" REGISTER_AND_MOVE\n");
} else {
pr_tmpl->pr_aptpl_active = 1;
- ret = core_scsi3_update_and_write_aptpl(SE_DEV(cmd),
+ ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
&dest_pr_reg->pr_aptpl_buf[0],
pr_tmpl->pr_aptpl_buf_len);
- if (!(ret))
- printk("SPC-3 PR: Set APTPL Bit Activated for"
+ if (!ret)
+ pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
" REGISTER_AND_MOVE\n");
}
+ transport_kunmap_first_data_page(cmd);
+
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
+ if (buf)
+ transport_kunmap_first_data_page(cmd);
if (dest_se_deve)
core_scsi3_lunacl_undepend_item(dest_se_deve);
if (dest_node_acl)
@@ -3723,7 +3742,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
*/
static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
{
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u64 res_key, sa_res_key;
int sa, scope, type, aptpl;
int spec_i_pt = 0, all_tg_pt = 0, unreg = 0;
@@ -3731,11 +3750,11 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!(SE_SESS(cmd)))
+ if (!cmd->se_sess)
return PYX_TRANSPORT_LU_COMM_FAILURE;
if (cmd->data_length < 24) {
- printk(KERN_WARNING "SPC-PR: Received PR OUT parameter list"
+ pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -3745,6 +3764,8 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
sa = (cdb[1] & 0x1f);
scope = (cdb[2] & 0xf0);
type = (cdb[2] & 0x0f);
+
+ buf = transport_kmap_first_data_page(cmd);
/*
* From PERSISTENT_RESERVE_OUT parameter list (payload)
*/
@@ -3762,6 +3783,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
aptpl = (buf[17] & 0x01);
unreg = (buf[17] & 0x02);
}
+ transport_kunmap_first_data_page(cmd);
+ buf = NULL;
+
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
@@ -3776,9 +3800,9 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
* the sense key set to ILLEGAL REQUEST, and the additional sense
* code set to PARAMETER LIST LENGTH ERROR.
*/
- if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+ if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
(cmd->data_length != 24)) {
- printk(KERN_WARNING "SPC-PR: Received PR OUT illegal parameter"
+ pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
@@ -3812,7 +3836,7 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
sa_res_key, aptpl, unreg);
default:
- printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+ pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -3827,25 +3851,26 @@ static int core_scsi3_emulate_pr_out(struct se_cmd *cmd, unsigned char *cdb)
*/
static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
{
- struct se_device *se_dev = SE_DEV(cmd);
- struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_device *se_dev = cmd->se_dev;
+ struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u32 add_len = 0, off = 8;
if (cmd->data_length < 8) {
- printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
+ pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
- buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
- buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
- buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+ buf = transport_kmap_first_data_page(cmd);
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
- spin_lock(&T10_RES(su_dev)->registration_lock);
- list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
+ spin_lock(&su_dev->t10_pr.registration_lock);
+ list_for_each_entry(pr_reg, &su_dev->t10_pr.registration_list,
pr_reg_list) {
/*
* Check for overflow of 8byte PRI READ_KEYS payload and
@@ -3865,13 +3890,15 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
add_len += 8;
}
- spin_unlock(&T10_RES(su_dev)->registration_lock);
+ spin_unlock(&su_dev->t10_pr.registration_lock);
buf[4] = ((add_len >> 24) & 0xff);
buf[5] = ((add_len >> 16) & 0xff);
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -3882,23 +3909,24 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
*/
static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
{
- struct se_device *se_dev = SE_DEV(cmd);
- struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_device *se_dev = cmd->se_dev;
+ struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct t10_pr_registration *pr_reg;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ unsigned char *buf;
u64 pr_res_key;
u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
if (cmd->data_length < 8) {
- printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+ pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
- buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
- buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
- buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+ buf = transport_kmap_first_data_page(cmd);
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
spin_lock(&se_dev->dev_reservation_lock);
pr_reg = se_dev->dev_pr_res_holder;
@@ -3911,10 +3939,9 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
- if (cmd->data_length < 22) {
- spin_unlock(&se_dev->dev_reservation_lock);
- return 0;
- }
+ if (cmd->data_length < 22)
+ goto err;
+
/*
* Set the Reservation key.
*
@@ -3951,7 +3978,10 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
buf[21] = (pr_reg->pr_res_scope & 0xf0) |
(pr_reg->pr_res_type & 0x0f);
}
+
+err:
spin_unlock(&se_dev->dev_reservation_lock);
+ transport_kunmap_first_data_page(cmd);
return 0;
}
@@ -3963,17 +3993,19 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
*/
static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- struct t10_reservation_template *pr_tmpl = &SU_DEV(dev)->t10_reservation;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ struct se_device *dev = cmd->se_dev;
+ struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
+ unsigned char *buf;
u16 add_len = 8; /* Hardcoded to 8. */
if (cmd->data_length < 6) {
- printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+ pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
+ buf = transport_kmap_first_data_page(cmd);
+
buf[0] = ((add_len << 8) & 0xff);
buf[1] = (add_len & 0xff);
buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
@@ -4004,6 +4036,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -4014,27 +4048,29 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
*/
static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
{
- struct se_device *se_dev = SE_DEV(cmd);
+ struct se_device *se_dev = cmd->se_dev;
struct se_node_acl *se_nacl;
- struct se_subsystem_dev *su_dev = SU_DEV(se_dev);
+ struct se_subsystem_dev *su_dev = se_dev->se_sub_dev;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg, *pr_reg_tmp;
- struct t10_reservation_template *pr_tmpl = &SU_DEV(se_dev)->t10_reservation;
- unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
+ struct t10_reservation *pr_tmpl = &se_dev->se_sub_dev->t10_pr;
+ unsigned char *buf;
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
u32 off = 8; /* off into first Full Status descriptor */
int format_code = 0;
if (cmd->data_length < 8) {
- printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+ pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
- buf[0] = ((T10_RES(su_dev)->pr_generation >> 24) & 0xff);
- buf[1] = ((T10_RES(su_dev)->pr_generation >> 16) & 0xff);
- buf[2] = ((T10_RES(su_dev)->pr_generation >> 8) & 0xff);
- buf[3] = (T10_RES(su_dev)->pr_generation & 0xff);
+ buf = transport_kmap_first_data_page(cmd);
+
+ buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
+ buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
+ buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
+ buf[3] = (su_dev->t10_pr.pr_generation & 0xff);
spin_lock(&pr_tmpl->registration_lock);
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
@@ -4051,11 +4087,11 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* Determine expected length of $FABRIC_MOD specific
* TransportID full status descriptor..
*/
- exp_desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id_len(
+ exp_desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id_len(
se_tpg, se_nacl, pr_reg, &format_code);
if ((exp_desc_len + add_len) > cmd->data_length) {
- printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
+ pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
" out of buffer: %d\n", cmd->data_length);
spin_lock(&pr_tmpl->registration_lock);
atomic_dec(&pr_reg->pr_res_holders);
@@ -4105,7 +4141,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
* bit is set to one, the contents of the RELATIVE TARGET PORT
* IDENTIFIER field are not defined by this standard.
*/
- if (!(pr_reg->pr_reg_all_tg_pt)) {
+ if (!pr_reg->pr_reg_all_tg_pt) {
struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
@@ -4116,7 +4152,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
/*
* Now, have the $FABRIC_MOD fill in the protocol identifier
*/
- desc_len = TPG_TFO(se_tpg)->tpg_get_pr_transport_id(se_tpg,
+ desc_len = se_tpg->se_tpg_tfo->tpg_get_pr_transport_id(se_tpg,
se_nacl, pr_reg, &format_code, &buf[off+4]);
spin_lock(&pr_tmpl->registration_lock);
@@ -4150,6 +4186,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
+ transport_kunmap_first_data_page(cmd);
+
return 0;
}
@@ -4165,7 +4203,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
case PRI_READ_FULL_STATUS:
return core_scsi3_pri_read_full_status(cmd);
default:
- printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
+ pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cdb[1] & 0x1f);
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
@@ -4174,7 +4212,7 @@ static int core_scsi3_emulate_pr_in(struct se_cmd *cmd, unsigned char *cdb)
int core_scsi3_emulate_pr(struct se_cmd *cmd)
{
- unsigned char *cdb = &T_TASK(cmd)->t_task_cdb[0];
+ unsigned char *cdb = &cmd->t_task_cdb[0];
struct se_device *dev = cmd->se_dev;
/*
* Following spc2r20 5.5.1 Reservations overview:
@@ -4186,7 +4224,7 @@ int core_scsi3_emulate_pr(struct se_cmd *cmd)
* CONFLICT status.
*/
if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
- printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
+ pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -4213,39 +4251,39 @@ static int core_pt_seq_non_holder(
int core_setup_reservations(struct se_device *dev, int force_pt)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
- struct t10_reservation_template *rest = &su_dev->t10_reservation;
+ struct t10_reservation *rest = &su_dev->t10_pr;
/*
* If this device is from Target_Core_Mod/pSCSI, use the reservations
* of the Underlying SCSI hardware. In Linux/SCSI terms, this can
* cause a problem because libata and some SATA RAID HBAs appear
* under Linux/SCSI, but to emulate reservations themselves.
*/
- if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
- !(DEV_ATTRIB(dev)->emulate_reservations)) || force_pt) {
+ if (((dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ !(dev->se_sub_dev->se_dev_attrib.emulate_reservations)) || force_pt) {
rest->res_type = SPC_PASSTHROUGH;
rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
- printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
- " emulation\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
+ " emulation\n", dev->transport->name);
return 0;
}
/*
* If SPC-3 or above is reported by real or emulated struct se_device,
* use emulated Persistent Reservations.
*/
- if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
+ if (dev->transport->get_device_rev(dev) >= SCSI_3) {
rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
- printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
- " emulation\n", TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
+ " emulation\n", dev->transport->name);
} else {
rest->res_type = SPC2_RESERVATIONS;
rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
rest->pr_ops.t10_seq_non_holder =
&core_scsi2_reservation_seq_non_holder;
- printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
- TRANSPORT(dev)->name);
+ pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
+ dev->transport->name);
}
return 0;
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 5603bcfd86d..c8f47d06458 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -49,7 +49,7 @@ extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern int core_scsi2_emulate_crh(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
- struct t10_reservation_template *, u64,
+ struct t10_reservation *, u64,
unsigned char *, unsigned char *, u32,
unsigned char *, u16, u32, int, int, u8);
extern int core_scsi3_check_aptpl_registration(struct se_device *,
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 331d423fd0e..2b7b0da9146 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -55,24 +55,6 @@ static struct se_subsystem_api pscsi_template;
static void pscsi_req_done(struct request *, int);
-/* pscsi_get_sh():
- *
- *
- */
-static struct Scsi_Host *pscsi_get_sh(u32 host_no)
-{
- struct Scsi_Host *sh = NULL;
-
- sh = scsi_host_lookup(host_no);
- if (IS_ERR(sh)) {
- printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
- " %u\n", host_no);
- return NULL;
- }
-
- return sh;
-}
-
/* pscsi_attach_hba():
*
* pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
@@ -80,28 +62,23 @@ static struct Scsi_Host *pscsi_get_sh(u32 host_no)
*/
static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
{
- int hba_depth;
struct pscsi_hba_virt *phv;
phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
- if (!(phv)) {
- printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
- return -1;
+ if (!phv) {
+ pr_err("Unable to allocate struct pscsi_hba_virt\n");
+ return -ENOMEM;
}
phv->phv_host_id = host_id;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
- hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
- atomic_set(&hba->left_queue_depth, hba_depth);
- atomic_set(&hba->max_queue_depth, hba_depth);
- hba->hba_ptr = (void *)phv;
+ hba->hba_ptr = phv;
- printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+ pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
- " Target Core with TCQ Depth: %d\n", hba->hba_id,
- atomic_read(&hba->max_queue_depth));
+ pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
+ hba->hba_id);
return 0;
}
@@ -114,12 +91,12 @@ static void pscsi_detach_hba(struct se_hba *hba)
if (scsi_host) {
scsi_host_put(scsi_host);
- printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
+ pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
" Generic Target Core\n", hba->hba_id,
(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
"Unknown");
} else
- printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
+ pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
" from Generic Target Core\n", hba->hba_id);
kfree(phv);
@@ -130,20 +107,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
{
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
- int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
/*
* Release the struct Scsi_Host
*/
- if (!(mode_flag)) {
- if (!(sh))
+ if (!mode_flag) {
+ if (!sh)
return 0;
phv->phv_lld_host = NULL;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
- atomic_set(&hba->left_queue_depth, hba_depth);
- atomic_set(&hba->max_queue_depth, hba_depth);
- printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+ pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
" %s\n", hba->hba_id, (sh->hostt->name) ?
(sh->hostt->name) : "Unknown");
@@ -154,27 +128,17 @@ static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
* Otherwise, locate struct Scsi_Host from the original passed
* pSCSI Host ID and enable for phba mode
*/
- sh = pscsi_get_sh(phv->phv_host_id);
- if (!(sh)) {
- printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
+ sh = scsi_host_lookup(phv->phv_host_id);
+ if (IS_ERR(sh)) {
+ pr_err("pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
- return -1;
+ return PTR_ERR(sh);
}
- /*
- * Usually the SCSI LLD will use the hostt->can_queue value to define
- * its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
- * this at all and set sh->can_queue at runtime.
- */
- hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
- sh->hostt->can_queue : sh->can_queue;
-
- atomic_set(&hba->left_queue_depth, hba_depth);
- atomic_set(&hba->max_queue_depth, hba_depth);
phv->phv_lld_host = sh;
phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
- printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+ pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
return 1;
@@ -236,7 +200,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
if (!buf)
- return -1;
+ return -ENOMEM;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = INQUIRY;
@@ -259,7 +223,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
out_free:
kfree(buf);
- return -1;
+ return -EPERM;
}
static void
@@ -293,15 +257,15 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
page_83 = &buf[off];
ident_len = page_83[3];
if (!ident_len) {
- printk(KERN_ERR "page_83[3]: identifier"
+ pr_err("page_83[3]: identifier"
" length zero!\n");
break;
}
- printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
+ pr_debug("T10 VPD Identifer Length: %d\n", ident_len);
vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
if (!vpd) {
- printk(KERN_ERR "Unable to allocate memory for"
+ pr_err("Unable to allocate memory for"
" struct t10_vpd\n");
goto out;
}
@@ -353,7 +317,7 @@ static struct se_device *pscsi_add_device_to_list(
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
- printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
+ pr_err("Set broken SCSI Device %d:%d:%d"
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
@@ -364,10 +328,8 @@ static struct se_device *pscsi_add_device_to_list(
q = sd->request_queue;
limits = &dev_limits.limits;
limits->logical_block_size = sd->sector_size;
- limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
- queue_max_hw_sectors(q) : sd->host->max_sectors;
- limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
- queue_max_sectors(q) : sd->host->max_sectors;
+ limits->max_hw_sectors = min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ limits->max_sectors = min_t(int, sd->host->max_sectors, queue_max_sectors(q));
dev_limits.hw_queue_depth = sd->queue_depth;
dev_limits.queue_depth = sd->queue_depth;
/*
@@ -391,9 +353,9 @@ static struct se_device *pscsi_add_device_to_list(
pdv->pdv_sd = sd;
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
- se_dev, dev_flags, (void *)pdv,
+ se_dev, dev_flags, pdv,
&dev_limits, NULL, NULL);
- if (!(dev)) {
+ if (!dev) {
pdv->pdv_sd = NULL;
return NULL;
}
@@ -423,14 +385,14 @@ static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
struct pscsi_dev_virt *pdv;
pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
- if (!(pdv)) {
- printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
+ if (!pdv) {
+ pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
return NULL;
}
pdv->pdv_se_hba = hba;
- printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
- return (void *)pdv;
+ pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+ return pdv;
}
/*
@@ -450,7 +412,7 @@ static struct se_device *pscsi_create_type_disk(
u32 dev_flags = 0;
if (scsi_device_get(sd)) {
- printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+ pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return NULL;
@@ -463,19 +425,19 @@ static struct se_device *pscsi_create_type_disk(
bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
if (IS_ERR(bd)) {
- printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
+ pr_err("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
return NULL;
}
pdv->pdv_bd = bd;
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!(dev)) {
+ if (!dev) {
blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
scsi_device_put(sd);
return NULL;
}
- printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
return dev;
@@ -497,7 +459,7 @@ static struct se_device *pscsi_create_type_rom(
u32 dev_flags = 0;
if (scsi_device_get(sd)) {
- printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+ pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return NULL;
@@ -505,11 +467,11 @@ static struct se_device *pscsi_create_type_rom(
spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!(dev)) {
+ if (!dev) {
scsi_device_put(sd);
return NULL;
}
- printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
@@ -533,10 +495,10 @@ static struct se_device *pscsi_create_type_other(
spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
- if (!(dev))
+ if (!dev)
return NULL;
- printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+ pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
@@ -555,8 +517,8 @@ static struct se_device *pscsi_create_virtdevice(
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
- if (!(pdv)) {
- printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
+ if (!pdv) {
+ pr_err("Unable to locate struct pscsi_dev_virt"
" parameter\n");
return ERR_PTR(-EINVAL);
}
@@ -564,9 +526,9 @@ static struct se_device *pscsi_create_virtdevice(
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
* struct Scsi_Host we will need to bring the TCM/pSCSI object online
*/
- if (!(sh)) {
+ if (!sh) {
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
- printk(KERN_ERR "pSCSI: Unable to locate struct"
+ pr_err("pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
return ERR_PTR(-ENODEV);
}
@@ -575,7 +537,7 @@ static struct se_device *pscsi_create_virtdevice(
* reference, we enforce that udev_path has been set
*/
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
- printk(KERN_ERR "pSCSI: udev_path attribute has not"
+ pr_err("pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n");
return ERR_PTR(-EINVAL);
}
@@ -586,8 +548,8 @@ static struct se_device *pscsi_create_virtdevice(
*/
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
spin_lock(&hba->device_lock);
- if (!(list_empty(&hba->hba_dev_list))) {
- printk(KERN_ERR "pSCSI: Unable to set hba_mode"
+ if (!list_empty(&hba->hba_dev_list)) {
+ pr_err("pSCSI: Unable to set hba_mode"
" with active devices\n");
spin_unlock(&hba->device_lock);
return ERR_PTR(-EEXIST);
@@ -601,16 +563,16 @@ static struct se_device *pscsi_create_virtdevice(
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
sh = phv->phv_lld_host;
} else {
- sh = pscsi_get_sh(pdv->pdv_host_id);
- if (!(sh)) {
- printk(KERN_ERR "pSCSI: Unable to locate"
+ sh = scsi_host_lookup(pdv->pdv_host_id);
+ if (IS_ERR(sh)) {
+ pr_err("pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
- return ERR_PTR(-ENODEV);
+ return (struct se_device *) sh;
}
}
} else {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
- printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
+ pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while"
" struct Scsi_Host exists\n");
return ERR_PTR(-EEXIST);
}
@@ -639,7 +601,7 @@ static struct se_device *pscsi_create_virtdevice(
break;
}
- if (!(dev)) {
+ if (!dev) {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
@@ -653,7 +615,7 @@ static struct se_device *pscsi_create_virtdevice(
}
spin_unlock_irq(sh->host_lock);
- printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+ pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
@@ -728,13 +690,12 @@ static int pscsi_transport_complete(struct se_task *task)
*/
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
- if (!TASK_CMD(task)->se_deve)
+ if (!task->task_se_cmd->se_deve)
goto after_mode_sense;
- if (TASK_CMD(task)->se_deve->lun_flags &
+ if (task->task_se_cmd->se_deve->lun_flags &
TRANSPORT_LUNFLAGS_READ_ONLY) {
- unsigned char *buf = (unsigned char *)
- T_TASK(task->task_se_cmd)->t_task_buf;
+ unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd);
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@@ -743,6 +704,8 @@ static int pscsi_transport_complete(struct se_task *task)
if (!(buf[2] & 0x80))
buf[2] |= 0x80;
}
+
+ transport_kunmap_first_data_page(task->task_se_cmd);
}
}
after_mode_sense:
@@ -766,8 +729,8 @@ after_mode_sense:
u32 blocksize;
buf = sg_virt(&sg[0]);
- if (!(buf)) {
- printk(KERN_ERR "Unable to get buf for scatterlist\n");
+ if (!buf) {
+ pr_err("Unable to get buf for scatterlist\n");
goto after_mode_select;
}
@@ -797,34 +760,20 @@ after_mode_select:
}
static struct se_task *
-pscsi_alloc_task(struct se_cmd *cmd)
+pscsi_alloc_task(unsigned char *cdb)
{
struct pscsi_plugin_task *pt;
- unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
- pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
+ /*
+ * Dynamically alloc cdb space, since it may be larger than
+ * TCM_MAX_COMMAND_SIZE
+ */
+ pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
if (!pt) {
- printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
+ pr_err("Unable to allocate struct pscsi_plugin_task\n");
return NULL;
}
- /*
- * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
- * allocate the extended CDB buffer for per struct se_task context
- * pt->pscsi_cdb now.
- */
- if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
-
- pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
- if (!(pt->pscsi_cdb)) {
- printk(KERN_ERR "pSCSI: Unable to allocate extended"
- " pt->pscsi_cdb\n");
- kfree(pt);
- return NULL;
- }
- } else
- pt->pscsi_cdb = &pt->__pscsi_cdb[0];
-
return &pt->pscsi_task;
}
@@ -849,7 +798,7 @@ static inline void pscsi_blk_init_request(
* also set the end_io_data pointer.to struct se_task.
*/
req->end_io = pscsi_req_done;
- req->end_io_data = (void *)task;
+ req->end_io_data = task;
/*
* Load the referenced struct se_task's SCSI CDB into
* include/linux/blkdev.h:struct request->cmd
@@ -859,7 +808,7 @@ static inline void pscsi_blk_init_request(
/*
* Setup pointer for outgoing sense data.
*/
- req->sense = (void *)&pt->pscsi_sense[0];
+ req->sense = &pt->pscsi_sense[0];
req->sense_len = 0;
}
@@ -874,8 +823,8 @@ static int pscsi_blk_get_request(struct se_task *task)
pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
(task->task_data_direction == DMA_TO_DEVICE),
GFP_KERNEL);
- if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
- printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
+ if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) {
+ pr_err("PSCSI: blk_get_request() failed: %ld\n",
IS_ERR(pt->pscsi_req));
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
@@ -920,15 +869,8 @@ static int pscsi_do_task(struct se_task *task)
static void pscsi_free_task(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct se_cmd *cmd = task->task_se_cmd;
/*
- * Release the extended CDB allocation from pscsi_alloc_task()
- * if one exists.
- */
- if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
- kfree(pt->pscsi_cdb);
- /*
* We do not release the bio(s) here associated with this task, as
* this is handled by bio_put() and pscsi_bi_endio().
*/
@@ -973,7 +915,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
switch (token) {
case Opt_scsi_host_id:
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
- printk(KERN_ERR "PSCSI[%d]: Unable to accept"
+ pr_err("PSCSI[%d]: Unable to accept"
" scsi_host_id while phv_mode =="
" PHV_LLD_SCSI_HOST_NO\n",
phv->phv_host_id);
@@ -982,14 +924,14 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
}
match_int(args, &arg);
pdv->pdv_host_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
+ pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
" %d\n", phv->phv_host_id, pdv->pdv_host_id);
pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
break;
case Opt_scsi_channel_id:
match_int(args, &arg);
pdv->pdv_channel_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
+ pr_debug("PSCSI[%d]: Referencing SCSI Channel"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_channel_id);
pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
@@ -997,7 +939,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
case Opt_scsi_target_id:
match_int(args, &arg);
pdv->pdv_target_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
+ pr_debug("PSCSI[%d]: Referencing SCSI Target"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_target_id);
pdv->pdv_flags |= PDF_HAS_TARGET_ID;
@@ -1005,7 +947,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
case Opt_scsi_lun_id:
match_int(args, &arg);
pdv->pdv_lun_id = arg;
- printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
+ pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
pdv->pdv_flags |= PDF_HAS_LUN_ID;
break;
@@ -1028,9 +970,9 @@ static ssize_t pscsi_check_configfs_dev_params(
if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
!(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
!(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
- printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
+ pr_err("Missing scsi_channel_id=, scsi_target_id= and"
" scsi_lun_id= parameters\n");
- return -1;
+ return -EINVAL;
}
return 0;
@@ -1090,7 +1032,7 @@ static void pscsi_bi_endio(struct bio *bio, int error)
bio_put(bio);
}
-static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
+static inline struct bio *pscsi_get_bio(int sg_num)
{
struct bio *bio;
/*
@@ -1098,8 +1040,8 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
* in block/blk-core.c:blk_make_request()
*/
bio = bio_kmalloc(GFP_KERNEL, sg_num);
- if (!(bio)) {
- printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
+ if (!bio) {
+ pr_err("PSCSI: bio_kmalloc() failed\n");
return NULL;
}
bio->bi_end_io = pscsi_bi_endio;
@@ -1107,13 +1049,7 @@ static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
return bio;
}
-#if 0
-#define DEBUG_PSCSI(x...) printk(x)
-#else
-#define DEBUG_PSCSI(x...)
-#endif
-
-static int __pscsi_map_task_SG(
+static int __pscsi_map_SG(
struct se_task *task,
struct scatterlist *task_sg,
u32 task_sg_num,
@@ -1134,7 +1070,7 @@ static int __pscsi_map_task_SG(
return 0;
/*
* For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
- * the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
+ * the bio_vec maplist from task->task_sg ->
* struct scatterlist memory. The struct se_task->task_sg[] currently needs
* to be attached to struct bios for submission to Linux/SCSI using
* struct request to struct scsi_device->request_queue.
@@ -1143,34 +1079,34 @@ static int __pscsi_map_task_SG(
* is ported to upstream SCSI passthrough functionality that accepts
* struct scatterlist->page_link or struct page as a paraemeter.
*/
- DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
+ pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
for_each_sg(task_sg, sg, task_sg_num, i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
- DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+ pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
page, len, off);
while (len > 0 && data_len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
- if (!(bio)) {
+ if (!bio) {
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
nr_pages -= nr_vecs;
/*
* Calls bio_kmalloc() and sets bio->bi_end_io()
*/
- bio = pscsi_get_bio(pdv, nr_vecs);
- if (!(bio))
+ bio = pscsi_get_bio(nr_vecs);
+ if (!bio)
goto fail;
if (rw)
bio->bi_rw |= REQ_WRITE;
- DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
+ pr_debug("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
(rw) ? "rw" : "r", nr_vecs);
/*
@@ -1185,7 +1121,7 @@ static int __pscsi_map_task_SG(
tbio = tbio->bi_next = bio;
}
- DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
+ pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
" bio: %p page: %p len: %d off: %d\n", i, bio,
page, len, off);
@@ -1194,11 +1130,11 @@ static int __pscsi_map_task_SG(
if (rc != bytes)
goto fail;
- DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+ pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
bio->bi_vcnt, nr_vecs);
if (bio->bi_vcnt > nr_vecs) {
- DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
+ pr_debug("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
/*
@@ -1220,15 +1156,15 @@ static int __pscsi_map_task_SG(
* Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
* primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
*/
- if (!(bidi_read)) {
+ if (!bidi_read) {
/*
* Starting with v2.6.31, call blk_make_request() passing in *hbio to
* allocate the pSCSI task a struct request.
*/
pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
hbio, GFP_KERNEL);
- if (!(pt->pscsi_req)) {
- printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
+ if (!pt->pscsi_req) {
+ pr_err("pSCSI: blk_make_request() failed\n");
goto fail;
}
/*
@@ -1237,7 +1173,7 @@ static int __pscsi_map_task_SG(
*/
pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
- return task->task_sg_num;
+ return task->task_sg_nents;
}
/*
* Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
@@ -1245,13 +1181,13 @@ static int __pscsi_map_task_SG(
*/
pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
hbio, GFP_KERNEL);
- if (!(pt->pscsi_req->next_rq)) {
- printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
+ if (!pt->pscsi_req->next_rq) {
+ pr_err("pSCSI: blk_make_request() failed for BIDI\n");
goto fail;
}
pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
- return task->task_sg_num;
+ return task->task_sg_nents;
fail:
while (hbio) {
bio = hbio;
@@ -1262,7 +1198,10 @@ fail:
return ret;
}
-static int pscsi_map_task_SG(struct se_task *task)
+/*
+ * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call.
+ */
+static int pscsi_map_SG(struct se_task *task)
{
int ret;
@@ -1270,14 +1209,14 @@ static int pscsi_map_task_SG(struct se_task *task)
* Setup the main struct request for the task->task_sg[] payload
*/
- ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
+ ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0);
if (ret >= 0 && task->task_sg_bidi) {
/*
* If present, set up the extra BIDI-COMMAND SCSI READ
* struct request and payload.
*/
- ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
- task->task_sg_num, 1);
+ ret = __pscsi_map_SG(task, task->task_sg_bidi,
+ task->task_sg_nents, 1);
}
if (ret < 0)
@@ -1285,33 +1224,6 @@ static int pscsi_map_task_SG(struct se_task *task)
return 0;
}
-/* pscsi_map_task_non_SG():
- *
- *
- */
-static int pscsi_map_task_non_SG(struct se_task *task)
-{
- struct se_cmd *cmd = TASK_CMD(task);
- struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
- int ret = 0;
-
- if (pscsi_blk_get_request(task) < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
- if (!task->task_size)
- return 0;
-
- ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
- pt->pscsi_req, T_TASK(cmd)->t_task_buf,
- task->task_size, GFP_KERNEL);
- if (ret < 0) {
- printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- }
- return 0;
-}
-
static int pscsi_CDB_none(struct se_task *task)
{
return pscsi_blk_get_request(task);
@@ -1383,9 +1295,9 @@ static inline void pscsi_process_SAM_status(
struct pscsi_plugin_task *pt)
{
task->task_scsi_status = status_byte(pt->pscsi_result);
- if ((task->task_scsi_status)) {
+ if (task->task_scsi_status) {
task->task_scsi_status <<= 1;
- printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
+ pr_debug("PSCSI Status Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
}
@@ -1395,18 +1307,16 @@ static inline void pscsi_process_SAM_status(
transport_complete_task(task, (!task->task_scsi_status));
break;
default:
- printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
+ pr_debug("PSCSI Host Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- TASK_CMD(task)->transport_error_status =
+ task->task_se_cmd->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
transport_complete_task(task, 0);
break;
}
-
- return;
}
static void pscsi_req_done(struct request *req, int uptodate)
@@ -1433,8 +1343,8 @@ static struct se_subsystem_api pscsi_template = {
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
.cdb_none = pscsi_CDB_none,
- .map_task_non_SG = pscsi_map_task_non_SG,
- .map_task_SG = pscsi_map_task_SG,
+ .map_control_SG = pscsi_map_SG,
+ .map_data_SG = pscsi_map_SG,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index a4cd5d352c3..ebf4f1ae2c8 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -2,7 +2,6 @@
#define TARGET_CORE_PSCSI_H
#define PSCSI_VERSION "v4.0"
-#define PSCSI_VIRTUAL_HBA_DEPTH 2048
/* used in pscsi_find_alloc_len() */
#ifndef INQUIRY_DATA_SIZE
@@ -24,13 +23,12 @@
struct pscsi_plugin_task {
struct se_task pscsi_task;
- unsigned char *pscsi_cdb;
- unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
int pscsi_direction;
int pscsi_result;
u32 pscsi_resid;
struct request *pscsi_req;
+ unsigned char pscsi_cdb[0];
} ____cacheline_aligned;
#define PDF_HAS_CHANNEL_ID 0x01
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 7837dd365a9..3dd81d24d9a 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -44,12 +44,8 @@
#include "target_core_rd.h"
-static struct se_subsystem_api rd_dr_template;
static struct se_subsystem_api rd_mcp_template;
-/* #define DEBUG_RAMDISK_MCP */
-/* #define DEBUG_RAMDISK_DR */
-
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
*
*
@@ -59,24 +55,21 @@ static int rd_attach_hba(struct se_hba *hba, u32 host_id)
struct rd_host *rd_host;
rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
- if (!(rd_host)) {
- printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
+ if (!rd_host) {
+ pr_err("Unable to allocate memory for struct rd_host\n");
return -ENOMEM;
}
rd_host->rd_host_id = host_id;
- atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
- atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
- hba->hba_ptr = (void *) rd_host;
+ hba->hba_ptr = rd_host;
- printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+ pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
- printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
- " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
- rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
- RD_MAX_SECTORS);
+ pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+ " MaxSectors: %u\n", hba->hba_id,
+ rd_host->rd_host_id, RD_MAX_SECTORS);
return 0;
}
@@ -85,7 +78,7 @@ static void rd_detach_hba(struct se_hba *hba)
{
struct rd_host *rd_host = hba->hba_ptr;
- printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+ pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
kfree(rd_host);
@@ -114,7 +107,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
for (j = 0; j < sg_per_table; j++) {
pg = sg_page(&sg[j]);
- if ((pg)) {
+ if (pg) {
__free_page(pg);
page_count++;
}
@@ -123,7 +116,7 @@ static void rd_release_device_space(struct rd_dev *rd_dev)
kfree(sg);
}
- printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+ pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
@@ -148,7 +141,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
struct scatterlist *sg;
if (rd_dev->rd_page_count <= 0) {
- printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+ pr_err("Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -EINVAL;
}
@@ -157,8 +150,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
- if (!(sg_table)) {
- printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+ if (!sg_table) {
+ pr_err("Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
return -ENOMEM;
}
@@ -172,13 +165,13 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
GFP_KERNEL);
- if (!(sg)) {
- printk(KERN_ERR "Unable to allocate scatterlist array"
+ if (!sg) {
+ pr_err("Unable to allocate scatterlist array"
" for struct rd_dev\n");
return -ENOMEM;
}
- sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+ sg_init_table(sg, sg_per_table);
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
@@ -188,8 +181,8 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
- if (!(pg)) {
- printk(KERN_ERR "Unable to allocate scatterlist"
+ if (!pg) {
+ pr_err("Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -ENOMEM;
}
@@ -201,7 +194,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
total_sg_needed -= sg_per_table;
}
- printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+ pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
@@ -218,8 +211,8 @@ static void *rd_allocate_virtdevice(
struct rd_host *rd_host = hba->hba_ptr;
rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
- if (!(rd_dev)) {
- printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
+ if (!rd_dev) {
+ pr_err("Unable to allocate memory for struct rd_dev\n");
return NULL;
}
@@ -229,11 +222,6 @@ static void *rd_allocate_virtdevice(
return rd_dev;
}
-static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
-{
- return rd_allocate_virtdevice(hba, name, 1);
-}
-
static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
{
return rd_allocate_virtdevice(hba, name, 0);
@@ -273,16 +261,15 @@ static struct se_device *rd_create_virtdevice(
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba,
- (rd_dev->rd_direct) ? &rd_dr_template :
- &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
+ &rd_mcp_template, se_dev, dev_flags, rd_dev,
&dev_limits, prod, rev);
- if (!(dev))
+ if (!dev)
goto fail;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
rd_dev->rd_queue_depth = dev->queue_depth;
- printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+ pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
" %u pages in %u tables, %lu total bytes\n",
rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
@@ -296,14 +283,6 @@ fail:
return ERR_PTR(ret);
}
-static struct se_device *rd_DIRECT_create_virtdevice(
- struct se_hba *hba,
- struct se_subsystem_dev *se_dev,
- void *p)
-{
- return rd_create_virtdevice(hba, se_dev, p, 1);
-}
-
static struct se_device *rd_MEMCPY_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
@@ -330,16 +309,15 @@ static inline struct rd_request *RD_REQ(struct se_task *task)
}
static struct se_task *
-rd_alloc_task(struct se_cmd *cmd)
+rd_alloc_task(unsigned char *cdb)
{
struct rd_request *rd_req;
rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
if (!rd_req) {
- printk(KERN_ERR "Unable to allocate struct rd_request\n");
+ pr_err("Unable to allocate struct rd_request\n");
return NULL;
}
- rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
return &rd_req->rd_task;
}
@@ -360,7 +338,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
return sg_table;
}
- printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
+ pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
page);
return NULL;
@@ -373,7 +351,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
static int rd_MEMCPY_read(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_dev;
+ struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@@ -382,32 +360,32 @@ static int rd_MEMCPY_read(struct rd_request *req)
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = task->task_sg;
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+
+ pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
-#endif
+
src_offset = rd_offset;
while (req->rd_size) {
if ((sg_d[i].length - dst_offset) <
(sg_s[j].length - src_offset)) {
length = (sg_d[i].length - dst_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+
+ pr_debug("Step 1 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
sg_s[j].length);
- printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+ pr_debug("Step 1 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -424,15 +402,15 @@ static int rd_MEMCPY_read(struct rd_request *req)
page_end = 0;
} else {
length = (sg_s[j].length - src_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+
+ pr_debug("Step 2 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset,
j, sg_s[j].length);
- printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+ pr_debug("Step 2 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -456,32 +434,29 @@ static int rd_MEMCPY_read(struct rd_request *req)
memcpy(dst, src, length);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+ pr_debug("page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
-#endif
+
req->rd_size -= length;
- if (!(req->rd_size))
+ if (!req->rd_size)
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u in same page table\n",
+ pr_debug("page: %u in same page table\n",
req->rd_page);
-#endif
continue;
}
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "getting new page table for page: %u\n",
+
+ pr_debug("getting new page table for page: %u\n",
req->rd_page);
-#endif
+
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
sg_s = &table->sg_table[j = 0];
}
@@ -496,7 +471,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
static int rd_MEMCPY_write(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_dev;
+ struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
@@ -505,32 +480,32 @@ static int rd_MEMCPY_write(struct rd_request *req)
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
sg_s = task->task_sg;
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+
+ pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
-#endif
+
dst_offset = rd_offset;
while (req->rd_size) {
if ((sg_s[i].length - src_offset) <
(sg_d[j].length - dst_offset)) {
length = (sg_s[i].length - src_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+
+ pr_debug("Step 1 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
- printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+ pr_debug("Step 1 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -547,15 +522,15 @@ static int rd_MEMCPY_write(struct rd_request *req)
page_end = 0;
} else {
length = (sg_d[j].length - dst_offset);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+
+ pr_debug("Step 2 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
- printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+ pr_debug("Step 2 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
-#endif
+
if (length > req->rd_size)
length = req->rd_size;
@@ -579,32 +554,29 @@ static int rd_MEMCPY_write(struct rd_request *req)
memcpy(dst, src, length);
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+ pr_debug("page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
-#endif
+
req->rd_size -= length;
- if (!(req->rd_size))
+ if (!req->rd_size)
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "page: %u in same page table\n",
+ pr_debug("page: %u in same page table\n",
req->rd_page);
-#endif
continue;
}
-#ifdef DEBUG_RAMDISK_MCP
- printk(KERN_INFO "getting new page table for page: %u\n",
+
+ pr_debug("getting new page table for page: %u\n",
req->rd_page);
-#endif
+
table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
+ if (!table)
+ return -EINVAL;
sg_d = &table->sg_table[j = 0];
}
@@ -623,11 +595,11 @@ static int rd_MEMCPY_do_task(struct se_task *task)
unsigned long long lba;
int ret;
- req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
+ req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
lba = task->task_lba;
req->rd_offset = (do_div(lba,
- (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
- DEV_ATTRIB(dev)->block_size;
+ (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
+ dev->se_sub_dev->se_dev_attrib.block_size;
req->rd_size = task->task_size;
if (task->task_data_direction == DMA_FROM_DEVICE)
@@ -644,274 +616,6 @@ static int rd_MEMCPY_do_task(struct se_task *task)
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
-/* rd_DIRECT_with_offset():
- *
- *
- */
-static int rd_DIRECT_with_offset(
- struct se_task *task,
- struct list_head *se_mem_list,
- u32 *se_mem_cnt,
- u32 *task_offset)
-{
- struct rd_request *req = RD_REQ(task);
- struct rd_dev *dev = req->rd_dev;
- struct rd_dev_sg_table *table;
- struct se_mem *se_mem;
- struct scatterlist *sg_s;
- u32 j = 0, set_offset = 1;
- u32 get_next_table = 0, offset_length, table_sg_end;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
- (task->task_data_direction == DMA_TO_DEVICE) ?
- "Write" : "Read",
- task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
-#endif
- while (req->rd_size) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- return -1;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
-
- if (set_offset) {
- offset_length = sg_s[j].length - req->rd_offset;
- if (offset_length > req->rd_size)
- offset_length = req->rd_size;
-
- se_mem->se_page = sg_page(&sg_s[j++]);
- se_mem->se_off = req->rd_offset;
- se_mem->se_len = offset_length;
-
- set_offset = 0;
- get_next_table = (j > table_sg_end);
- goto check_eot;
- }
-
- offset_length = (req->rd_size < req->rd_offset) ?
- req->rd_size : req->rd_offset;
-
- se_mem->se_page = sg_page(&sg_s[j]);
- se_mem->se_len = offset_length;
-
- set_offset = 1;
-
-check_eot:
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
- " se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
- req->rd_page, req->rd_size, offset_length, j, se_mem,
- se_mem->se_page, se_mem->se_off, se_mem->se_len);
-#endif
- list_add_tail(&se_mem->se_list, se_mem_list);
- (*se_mem_cnt)++;
-
- req->rd_size -= offset_length;
- if (!(req->rd_size))
- goto out;
-
- if (!set_offset && !get_next_table)
- continue;
-
- if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "page: %u in same page table\n",
- req->rd_page);
-#endif
- continue;
- }
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "getting new page table for page: %u\n",
- req->rd_page);
-#endif
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- sg_s = &table->sg_table[j = 0];
- }
-
-out:
- T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
- *se_mem_cnt);
-#endif
- return 0;
-}
-
-/* rd_DIRECT_without_offset():
- *
- *
- */
-static int rd_DIRECT_without_offset(
- struct se_task *task,
- struct list_head *se_mem_list,
- u32 *se_mem_cnt,
- u32 *task_offset)
-{
- struct rd_request *req = RD_REQ(task);
- struct rd_dev *dev = req->rd_dev;
- struct rd_dev_sg_table *table;
- struct se_mem *se_mem;
- struct scatterlist *sg_s;
- u32 length, j = 0;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
- (task->task_data_direction == DMA_TO_DEVICE) ?
- "Write" : "Read",
- task->task_lba, req->rd_size, req->rd_page);
-#endif
- while (req->rd_size) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- return -1;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
-
- length = (req->rd_size < sg_s[j].length) ?
- req->rd_size : sg_s[j].length;
-
- se_mem->se_page = sg_page(&sg_s[j++]);
- se_mem->se_len = length;
-
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
- " se_page: %p se_off: %u se_len: %u\n", req->rd_page,
- req->rd_size, j, se_mem, se_mem->se_page,
- se_mem->se_off, se_mem->se_len);
-#endif
- list_add_tail(&se_mem->se_list, se_mem_list);
- (*se_mem_cnt)++;
-
- req->rd_size -= length;
- if (!(req->rd_size))
- goto out;
-
- if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_DR
- printk("page: %u in same page table\n",
- req->rd_page);
-#endif
- continue;
- }
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "getting new page table for page: %u\n",
- req->rd_page);
-#endif
- table = rd_get_sg_table(dev, req->rd_page);
- if (!(table))
- return -1;
-
- sg_s = &table->sg_table[j = 0];
- }
-
-out:
- T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
-#ifdef DEBUG_RAMDISK_DR
- printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
- *se_mem_cnt);
-#endif
- return 0;
-}
-
-/* rd_DIRECT_do_se_mem_map():
- *
- *
- */
-static int rd_DIRECT_do_se_mem_map(
- struct se_task *task,
- struct list_head *se_mem_list,
- void *in_mem,
- struct se_mem *in_se_mem,
- struct se_mem **out_se_mem,
- u32 *se_mem_cnt,
- u32 *task_offset_in)
-{
- struct se_cmd *cmd = task->task_se_cmd;
- struct rd_request *req = RD_REQ(task);
- u32 task_offset = *task_offset_in;
- unsigned long long lba;
- int ret;
-
- req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
- PAGE_SIZE);
- lba = task->task_lba;
- req->rd_offset = (do_div(lba,
- (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
- DEV_ATTRIB(task->se_dev)->block_size;
- req->rd_size = task->task_size;
-
- if (req->rd_offset)
- ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
- task_offset_in);
- else
- ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
- task_offset_in);
-
- if (ret < 0)
- return ret;
-
- if (CMD_TFO(cmd)->task_sg_chaining == 0)
- return 0;
- /*
- * Currently prevent writers from multiple HW fabrics doing
- * pci_map_sg() to RD_DR's internal scatterlist memory.
- */
- if (cmd->data_direction == DMA_TO_DEVICE) {
- printk(KERN_ERR "DMA_TO_DEVICE not supported for"
- " RAMDISK_DR with task_sg_chaining=1\n");
- return -1;
- }
- /*
- * Special case for if task_sg_chaining is enabled, then
- * we setup struct se_task->task_sg[], as it will be used by
- * transport_do_task_sg_chain() for creating chainged SGLs
- * across multiple struct se_task->task_sg[].
- */
- if (!(transport_calc_sg_num(task,
- list_entry(T_TASK(cmd)->t_mem_list->next,
- struct se_mem, se_list),
- task_offset)))
- return -1;
-
- return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
- list_entry(T_TASK(cmd)->t_mem_list->next,
- struct se_mem, se_list),
- out_se_mem, se_mem_cnt, task_offset_in);
-}
-
-/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static int rd_DIRECT_do_task(struct se_task *task)
-{
- /*
- * At this point the locally allocated RD tables have been mapped
- * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
- */
- task->task_scsi_status = GOOD;
- transport_complete_task(task, 1);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
-}
-
/* rd_free_task(): (Part of se_subsystem_api_t template)
*
*
@@ -956,7 +660,7 @@ static ssize_t rd_set_configfs_dev_params(
case Opt_rd_pages:
match_int(args, &arg);
rd_dev->rd_page_count = arg;
- printk(KERN_INFO "RAMDISK: Referencing Page"
+ pr_debug("RAMDISK: Referencing Page"
" Count: %u\n", rd_dev->rd_page_count);
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
break;
@@ -974,8 +678,8 @@ static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsys
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
- printk(KERN_INFO "Missing rd_pages= parameter\n");
- return -1;
+ pr_debug("Missing rd_pages= parameter\n");
+ return -EINVAL;
}
return 0;
@@ -1021,32 +725,11 @@ static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = dev->dev_ptr;
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
- DEV_ATTRIB(dev)->block_size) - 1;
+ dev->se_sub_dev->se_dev_attrib.block_size) - 1;
return blocks_long;
}
-static struct se_subsystem_api rd_dr_template = {
- .name = "rd_dr",
- .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
- .attach_hba = rd_attach_hba,
- .detach_hba = rd_detach_hba,
- .allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
- .create_virtdevice = rd_DIRECT_create_virtdevice,
- .free_device = rd_free_device,
- .alloc_task = rd_alloc_task,
- .do_task = rd_DIRECT_do_task,
- .free_task = rd_free_task,
- .check_configfs_dev_params = rd_check_configfs_dev_params,
- .set_configfs_dev_params = rd_set_configfs_dev_params,
- .show_configfs_dev_params = rd_show_configfs_dev_params,
- .get_cdb = rd_get_cdb,
- .get_device_rev = rd_get_device_rev,
- .get_device_type = rd_get_device_type,
- .get_blocks = rd_get_blocks,
- .do_se_mem_map = rd_DIRECT_do_se_mem_map,
-};
-
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
@@ -1071,13 +754,8 @@ int __init rd_module_init(void)
{
int ret;
- ret = transport_subsystem_register(&rd_dr_template);
- if (ret < 0)
- return ret;
-
ret = transport_subsystem_register(&rd_mcp_template);
if (ret < 0) {
- transport_subsystem_release(&rd_dr_template);
return ret;
}
@@ -1086,6 +764,5 @@ int __init rd_module_init(void)
void rd_module_exit(void)
{
- transport_subsystem_release(&rd_dr_template);
transport_subsystem_release(&rd_mcp_template);
}
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 3ea19e29d8e..0d027732cd0 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -7,8 +7,6 @@
/* Largest piece of memory kmalloc can allocate */
#define RD_MAX_ALLOCATION_SIZE 65536
-/* Maximum queuedepth for the Ramdisk HBA */
-#define RD_HBA_QUEUE_DEPTH 256
#define RD_DEVICE_QUEUE_DEPTH 32
#define RD_MAX_DEVICE_QUEUE_DEPTH 128
#define RD_BLOCKSIZE 512
@@ -34,8 +32,6 @@ struct rd_request {
u32 rd_page_count;
/* Scatterlist count */
u32 rd_size;
- /* Ramdisk device */
- struct rd_dev *rd_dev;
} ____cacheline_aligned;
struct rd_dev_sg_table {
diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c
index dc6fed037ab..72843441d4f 100644
--- a/drivers/target/target_core_scdb.c
+++ b/drivers/target/target_core_scdb.c
@@ -42,13 +42,13 @@
*/
void split_cdb_XX_6(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
cdb[1] = (lba >> 16) & 0x1f;
cdb[2] = (lba >> 8) & 0xff;
cdb[3] = lba & 0xff;
- cdb[4] = *sectors & 0xff;
+ cdb[4] = sectors & 0xff;
}
/* split_cdb_XX_10():
@@ -57,11 +57,11 @@ void split_cdb_XX_6(
*/
void split_cdb_XX_10(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be16(*sectors, &cdb[7]);
+ put_unaligned_be16(sectors, &cdb[7]);
}
/* split_cdb_XX_12():
@@ -70,11 +70,11 @@ void split_cdb_XX_10(
*/
void split_cdb_XX_12(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be32(lba, &cdb[2]);
- put_unaligned_be32(*sectors, &cdb[6]);
+ put_unaligned_be32(sectors, &cdb[6]);
}
/* split_cdb_XX_16():
@@ -83,11 +83,11 @@ void split_cdb_XX_12(
*/
void split_cdb_XX_16(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be64(lba, &cdb[2]);
- put_unaligned_be32(*sectors, &cdb[10]);
+ put_unaligned_be32(sectors, &cdb[10]);
}
/*
@@ -97,9 +97,9 @@ void split_cdb_XX_16(
*/
void split_cdb_XX_32(
unsigned long long lba,
- u32 *sectors,
+ u32 sectors,
unsigned char *cdb)
{
put_unaligned_be64(lba, &cdb[12]);
- put_unaligned_be32(*sectors, &cdb[28]);
+ put_unaligned_be32(sectors, &cdb[28]);
}
diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h
index 98cd1c01ed8..48e9ccc9585 100644
--- a/drivers/target/target_core_scdb.h
+++ b/drivers/target/target_core_scdb.h
@@ -1,10 +1,10 @@
#ifndef TARGET_CORE_SCDB_H
#define TARGET_CORE_SCDB_H
-extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
-extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
+extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *);
+extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *);
#endif /* TARGET_CORE_SCDB_H */
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 5e3a067a747..a8d6e1dee93 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -402,8 +402,8 @@ static ssize_t target_stat_scsi_lu_show_attr_lu_name(
return -ENODEV;
/* scsiLuWwnName */
return snprintf(page, PAGE_SIZE, "%s\n",
- (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
- (char *)&DEV_T10_WWN(dev)->unit_serial[0] : "None");
+ (strlen(dev->se_sub_dev->t10_wwn.unit_serial)) ?
+ dev->se_sub_dev->t10_wwn.unit_serial : "None");
}
DEV_STAT_SCSI_LU_ATTR_RO(lu_name);
@@ -413,17 +413,17 @@ static ssize_t target_stat_scsi_lu_show_attr_vend(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
- int j;
- char str[28];
+ int i;
+ char str[sizeof(dev->se_sub_dev->t10_wwn.vendor)+1];
if (!dev)
return -ENODEV;
+
/* scsiLuVendorId */
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- for (j = 0; j < 8; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
- DEV_T10_WWN(dev)->vendor[j] : 0x20;
- str[8] = 0;
+ for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.vendor[i]) ?
+ dev->se_sub_dev->t10_wwn.vendor[i] : ' ';
+ str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(vend);
@@ -434,18 +434,17 @@ static ssize_t target_stat_scsi_lu_show_attr_prod(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
- int j;
- char str[28];
+ int i;
+ char str[sizeof(dev->se_sub_dev->t10_wwn.model)+1];
if (!dev)
return -ENODEV;
/* scsiLuProductId */
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- for (j = 0; j < 16; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
- DEV_T10_WWN(dev)->model[j] : 0x20;
- str[16] = 0;
+ for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.vendor); i++)
+ str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.model[i]) ?
+ dev->se_sub_dev->t10_wwn.model[i] : ' ';
+ str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(prod);
@@ -456,18 +455,17 @@ static ssize_t target_stat_scsi_lu_show_attr_rev(
struct se_subsystem_dev *se_subdev = container_of(sgrps,
struct se_subsystem_dev, dev_stat_grps);
struct se_device *dev = se_subdev->se_dev_ptr;
- int j;
- char str[28];
+ int i;
+ char str[sizeof(dev->se_sub_dev->t10_wwn.revision)+1];
if (!dev)
return -ENODEV;
/* scsiLuRevisionId */
- memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
- for (j = 0; j < 4; j++)
- str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
- DEV_T10_WWN(dev)->revision[j] : 0x20;
- str[4] = 0;
+ for (i = 0; i < sizeof(dev->se_sub_dev->t10_wwn.revision); i++)
+ str[i] = ISPRINT(dev->se_sub_dev->t10_wwn.revision[i]) ?
+ dev->se_sub_dev->t10_wwn.revision[i] : ' ';
+ str[i] = '\0';
return snprintf(page, PAGE_SIZE, "%s\n", str);
}
DEV_STAT_SCSI_LU_ATTR_RO(rev);
@@ -484,7 +482,7 @@ static ssize_t target_stat_scsi_lu_show_attr_dev_type(
/* scsiLuPeripheralType */
return snprintf(page, PAGE_SIZE, "%u\n",
- TRANSPORT(dev)->get_device_type(dev));
+ dev->transport->get_device_type(dev));
}
DEV_STAT_SCSI_LU_ATTR_RO(dev_type);
@@ -668,18 +666,18 @@ static struct config_item_type target_stat_scsi_lu_cit = {
*/
void target_stat_setup_dev_default_groups(struct se_subsystem_dev *se_subdev)
{
- struct config_group *dev_stat_grp = &DEV_STAT_GRP(se_subdev)->stat_group;
+ struct config_group *dev_stat_grp = &se_subdev->dev_stat_grps.stat_group;
- config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_dev_group,
+ config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_dev_group,
"scsi_dev", &target_stat_scsi_dev_cit);
- config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group,
+ config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_tgt_dev_group,
"scsi_tgt_dev", &target_stat_scsi_tgt_dev_cit);
- config_group_init_type_name(&DEV_STAT_GRP(se_subdev)->scsi_lu_group,
+ config_group_init_type_name(&se_subdev->dev_stat_grps.scsi_lu_group,
"scsi_lu", &target_stat_scsi_lu_cit);
- dev_stat_grp->default_groups[0] = &DEV_STAT_GRP(se_subdev)->scsi_dev_group;
- dev_stat_grp->default_groups[1] = &DEV_STAT_GRP(se_subdev)->scsi_tgt_dev_group;
- dev_stat_grp->default_groups[2] = &DEV_STAT_GRP(se_subdev)->scsi_lu_group;
+ dev_stat_grp->default_groups[0] = &se_subdev->dev_stat_grps.scsi_dev_group;
+ dev_stat_grp->default_groups[1] = &se_subdev->dev_stat_grps.scsi_tgt_dev_group;
+ dev_stat_grp->default_groups[2] = &se_subdev->dev_stat_grps.scsi_lu_group;
dev_stat_grp->default_groups[3] = NULL;
}
@@ -922,7 +920,7 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_name(
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%sPort#%u\n",
- TPG_TFO(tpg)->get_fabric_name(), sep->sep_index);
+ tpg->se_tpg_tfo->get_fabric_name(), sep->sep_index);
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -945,8 +943,8 @@ static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%s%s%d\n",
- TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg), "+t+",
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -1128,7 +1126,7 @@ static ssize_t target_stat_scsi_transport_show_attr_device(
tpg = sep->sep_tpg;
/* scsiTransportType */
ret = snprintf(page, PAGE_SIZE, "scsiTransport%s\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -1150,7 +1148,7 @@ static ssize_t target_stat_scsi_transport_show_attr_indx(
}
tpg = sep->sep_tpg;
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock(&lun->lun_sep_lock);
return ret;
}
@@ -1173,10 +1171,10 @@ static ssize_t target_stat_scsi_transport_show_attr_dev_name(
return -ENODEV;
}
tpg = sep->sep_tpg;
- wwn = DEV_T10_WWN(dev);
+ wwn = &dev->se_sub_dev->t10_wwn;
/* scsiTransportDevName */
ret = snprintf(page, PAGE_SIZE, "%s+%s\n",
- TPG_TFO(tpg)->tpg_get_wwn(tpg),
+ tpg->se_tpg_tfo->tpg_get_wwn(tpg),
(strlen(wwn->unit_serial)) ? wwn->unit_serial :
wwn->vendor);
spin_unlock(&lun->lun_sep_lock);
@@ -1212,18 +1210,18 @@ static struct config_item_type target_stat_scsi_transport_cit = {
*/
void target_stat_setup_port_default_groups(struct se_lun *lun)
{
- struct config_group *port_stat_grp = &PORT_STAT_GRP(lun)->stat_group;
+ struct config_group *port_stat_grp = &lun->port_stat_grps.stat_group;
- config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_port_group,
+ config_group_init_type_name(&lun->port_stat_grps.scsi_port_group,
"scsi_port", &target_stat_scsi_port_cit);
- config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_tgt_port_group,
+ config_group_init_type_name(&lun->port_stat_grps.scsi_tgt_port_group,
"scsi_tgt_port", &target_stat_scsi_tgt_port_cit);
- config_group_init_type_name(&PORT_STAT_GRP(lun)->scsi_transport_group,
+ config_group_init_type_name(&lun->port_stat_grps.scsi_transport_group,
"scsi_transport", &target_stat_scsi_transport_cit);
- port_stat_grp->default_groups[0] = &PORT_STAT_GRP(lun)->scsi_port_group;
- port_stat_grp->default_groups[1] = &PORT_STAT_GRP(lun)->scsi_tgt_port_group;
- port_stat_grp->default_groups[2] = &PORT_STAT_GRP(lun)->scsi_transport_group;
+ port_stat_grp->default_groups[0] = &lun->port_stat_grps.scsi_port_group;
+ port_stat_grp->default_groups[1] = &lun->port_stat_grps.scsi_tgt_port_group;
+ port_stat_grp->default_groups[2] = &lun->port_stat_grps.scsi_transport_group;
port_stat_grp->default_groups[3] = NULL;
}
@@ -1264,7 +1262,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_inst(
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1314,7 +1312,7 @@ static ssize_t target_stat_scsi_auth_intr_show_attr_port(
}
tpg = nacl->se_tpg;
/* scsiAuthIntrTgtPortIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1632,7 +1630,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(
tpg = nacl->se_tpg;
/* scsiInstIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->tpg_get_inst_index(tpg));
+ tpg->se_tpg_tfo->tpg_get_inst_index(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1682,7 +1680,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port(
}
tpg = nacl->se_tpg;
/* scsiPortIndex */
- ret = snprintf(page, PAGE_SIZE, "%u\n", TPG_TFO(tpg)->tpg_get_tag(tpg));
+ ret = snprintf(page, PAGE_SIZE, "%u\n", tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_irq(&nacl->device_list_lock);
return ret;
}
@@ -1708,7 +1706,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(
tpg = nacl->se_tpg;
/* scsiAttIntrPortIndex */
ret = snprintf(page, PAGE_SIZE, "%u\n",
- TPG_TFO(tpg)->sess_get_index(se_sess));
+ tpg->se_tpg_tfo->sess_get_index(se_sess));
spin_unlock_irq(&nacl->nacl_sess_lock);
return ret;
}
@@ -1757,8 +1755,8 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
tpg = nacl->se_tpg;
/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
memset(buf, 0, 64);
- if (TPG_TFO(tpg)->sess_get_initiator_sid != NULL)
- TPG_TFO(tpg)->sess_get_initiator_sid(se_sess,
+ if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
+ tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
(unsigned char *)&buf[0], 64);
ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
@@ -1797,14 +1795,14 @@ static struct config_item_type target_stat_scsi_att_intr_port_cit = {
*/
void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl)
{
- struct config_group *ml_stat_grp = &ML_STAT_GRPS(lacl)->stat_group;
+ struct config_group *ml_stat_grp = &lacl->ml_stat_grps.stat_group;
- config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_auth_intr_group,
+ config_group_init_type_name(&lacl->ml_stat_grps.scsi_auth_intr_group,
"scsi_auth_intr", &target_stat_scsi_auth_intr_cit);
- config_group_init_type_name(&ML_STAT_GRPS(lacl)->scsi_att_intr_port_group,
+ config_group_init_type_name(&lacl->ml_stat_grps.scsi_att_intr_port_group,
"scsi_att_intr_port", &target_stat_scsi_att_intr_port_cit);
- ml_stat_grp->default_groups[0] = &ML_STAT_GRPS(lacl)->scsi_auth_intr_group;
- ml_stat_grp->default_groups[1] = &ML_STAT_GRPS(lacl)->scsi_att_intr_port_group;
+ ml_stat_grp->default_groups[0] = &lacl->ml_stat_grps.scsi_auth_intr_group;
+ ml_stat_grp->default_groups[1] = &lacl->ml_stat_grps.scsi_att_intr_port_group;
ml_stat_grp->default_groups[2] = NULL;
}
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 179063d81cd..27d4925e51c 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -41,13 +41,6 @@
#include "target_core_alua.h"
#include "target_core_pr.h"
-#define DEBUG_LUN_RESET
-#ifdef DEBUG_LUN_RESET
-#define DEBUG_LR(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_LR(x...)
-#endif
-
struct se_tmr_req *core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
@@ -57,8 +50,8 @@ struct se_tmr_req *core_tmr_alloc_req(
tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
GFP_ATOMIC : GFP_KERNEL);
- if (!(tmr)) {
- printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
+ if (!tmr) {
+ pr_err("Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
}
tmr->task_cmd = se_cmd;
@@ -80,9 +73,9 @@ void core_tmr_release_req(
return;
}
- spin_lock(&dev->se_tmr_lock);
+ spin_lock_irq(&dev->se_tmr_lock);
list_del(&tmr->tmr_list);
- spin_unlock(&dev->se_tmr_lock);
+ spin_unlock_irq(&dev->se_tmr_lock);
kmem_cache_free(se_tmr_req_cache, tmr);
}
@@ -93,14 +86,14 @@ static void core_tmr_handle_tas_abort(
int tas,
int fe_count)
{
- if (!(fe_count)) {
+ if (!fe_count) {
transport_cmd_finish_abort(cmd, 1);
return;
}
/*
* TASK ABORTED status (TAS) bit support
*/
- if (((tmr_nacl != NULL) &&
+ if ((tmr_nacl &&
(tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
transport_send_task_abort(cmd);
@@ -113,15 +106,14 @@ int core_tmr_lun_reset(
struct list_head *preempt_and_abort_list,
struct se_cmd *prout_cmd)
{
- struct se_cmd *cmd;
- struct se_queue_req *qr, *qr_tmp;
+ struct se_cmd *cmd, *tcmd;
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
- struct se_queue_obj *qobj = dev->dev_queue_obj;
+ struct se_queue_obj *qobj = &dev->dev_queue_obj;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_task *task, *task_tmp;
unsigned long flags;
- int fe_count, state, tas;
+ int fe_count, tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
@@ -133,7 +125,7 @@ int core_tmr_lun_reset(
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
- tas = DEV_ATTRIB(dev)->emulate_tas;
+ tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
@@ -142,20 +134,20 @@ int core_tmr_lun_reset(
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
- DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
+ pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
- TPG_TFO(tmr_tpg)->get_fabric_name(),
+ tmr_tpg->se_tpg_tfo->get_fabric_name(),
tmr_nacl->initiatorname);
}
}
- DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
+ pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
- TRANSPORT(dev)->name, tas);
+ dev->transport->name, tas);
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
- spin_lock(&dev->se_tmr_lock);
+ spin_lock_irq(&dev->se_tmr_lock);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
@@ -164,8 +156,8 @@ int core_tmr_lun_reset(
continue;
cmd = tmr_p->task_cmd;
- if (!(cmd)) {
- printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
+ if (!cmd) {
+ pr_err("Unable to locate struct se_cmd for TMR\n");
continue;
}
/*
@@ -173,33 +165,33 @@ int core_tmr_lun_reset(
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
- if ((preempt_and_abort_list != NULL) &&
+ if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
- spin_unlock(&dev->se_tmr_lock);
+ spin_unlock_irq(&dev->se_tmr_lock);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- spin_lock(&dev->se_tmr_lock);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->t_transport_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ spin_lock_irq(&dev->se_tmr_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- spin_lock(&dev->se_tmr_lock);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ spin_lock_irq(&dev->se_tmr_lock);
continue;
}
- DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+ pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_cmd_finish_abort_tmr(cmd);
- spin_lock(&dev->se_tmr_lock);
+ spin_lock_irq(&dev->se_tmr_lock);
}
- spin_unlock(&dev->se_tmr_lock);
+ spin_unlock_irq(&dev->se_tmr_lock);
/*
* Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
* This is following sam4r17, section 5.6 Aborting commands, Table 38
@@ -224,23 +216,17 @@ int core_tmr_lun_reset(
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
t_state_list) {
- if (!(TASK_CMD(task))) {
- printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+ if (!task->task_se_cmd) {
+ pr_err("task->task_se_cmd is NULL!\n");
continue;
}
- cmd = TASK_CMD(task);
+ cmd = task->task_se_cmd;
- if (!T_TASK(cmd)) {
- printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
- " %p ITT: 0x%08x\n", task, cmd,
- CMD_TFO(cmd)->get_task_tag(cmd));
- continue;
- }
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
- if ((preempt_and_abort_list != NULL) &&
+ if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
@@ -254,38 +240,38 @@ int core_tmr_lun_reset(
atomic_set(&task->task_state_active, 0);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ pr_debug("LUN_RESET: %s cmd: %p task: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
"def_t_state: %d/%d cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
- CMD_TFO(cmd)->get_task_tag(cmd), 0,
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
- cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
- DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+ cmd->se_tfo->get_task_tag(cmd), 0,
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
+ cmd->deferred_t_state, cmd->t_task_cdb[0]);
+ pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n",
- CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
- T_TASK(cmd)->t_task_cdbs,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
- atomic_read(&T_TASK(cmd)->t_transport_active),
- atomic_read(&T_TASK(cmd)->t_transport_stop),
- atomic_read(&T_TASK(cmd)->t_transport_sent));
+ cmd->se_tfo->get_task_tag(cmd), cmd->pr_res_key,
+ cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+ atomic_read(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+ atomic_read(&cmd->t_transport_sent));
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
+ pr_debug("LUN_RESET: Waiting for task: %p to shutdown"
" for dev: %p\n", task, dev);
wait_for_completion(&task->task_stop_comp);
- DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
+ pr_debug("LUN_RESET Completed task: %p shutdown for"
" dev: %p\n", task, dev);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
@@ -295,34 +281,34 @@ int core_tmr_lun_reset(
}
__transport_stop_task_timer(task, &flags);
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
- DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
+ &cmd->t_state_lock, flags);
+ pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+ atomic_read(&cmd->t_task_cdbs_ex_left));
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
+ fe_count = atomic_read(&cmd->t_fe_count);
- if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
- DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
+ if (atomic_read(&cmd->t_transport_active)) {
+ pr_debug("LUN_RESET: got t_transport_active = 1 for"
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ atomic_set(&cmd->t_transport_aborted, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+ pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
- atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_aborted, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -337,25 +323,12 @@ int core_tmr_lun_reset(
* reference, otherwise the struct se_cmd is released.
*/
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
- cmd = (struct se_cmd *)qr->cmd;
- if (!(cmd)) {
- /*
- * Skip these for non PREEMPT_AND_ABORT usage..
- */
- if (preempt_and_abort_list != NULL)
- continue;
-
- atomic_dec(&qobj->queue_cnt);
- list_del(&qr->qr_list);
- kfree(qr);
- continue;
- }
+ list_for_each_entry_safe(cmd, tcmd, &qobj->qobj_list, se_queue_node) {
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
- if ((preempt_and_abort_list != NULL) &&
+ if (preempt_and_abort_list &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
@@ -365,30 +338,22 @@ int core_tmr_lun_reset(
if (prout_cmd == cmd)
continue;
- atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
+ atomic_dec(&cmd->t_transport_queue_active);
atomic_dec(&qobj->queue_cnt);
- list_del(&qr->qr_list);
+ list_del(&cmd->se_queue_node);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- state = qr->state;
- kfree(qr);
-
- DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
+ pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
- "Preempt" : "", cmd, state,
- atomic_read(&T_TASK(cmd)->t_fe_count));
+ "Preempt" : "", cmd, cmd->t_state,
+ atomic_read(&cmd->t_fe_count));
/*
* Signal that the command has failed via cmd->se_cmd_flags,
- * and call TFO->new_cmd_failure() to wakeup any fabric
- * dependent code used to wait for unsolicited data out
- * allocation to complete. The fabric module is expected
- * to dump any remaining unsolicited data out for the aborted
- * command at this point.
*/
transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
- atomic_read(&T_TASK(cmd)->t_fe_count));
+ atomic_read(&cmd->t_fe_count));
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
}
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
@@ -396,21 +361,21 @@ int core_tmr_lun_reset(
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
*/
- if (!(preempt_and_abort_list) &&
+ if (!preempt_and_abort_list &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
- printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
+ pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
}
spin_lock_irq(&dev->stats_lock);
dev->num_resets++;
spin_unlock_irq(&dev->stats_lock);
- DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
+ pr_debug("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
- TRANSPORT(dev)->name);
+ dev->transport->name);
return 0;
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 5ec745fed93..4f1ba4c5ef1 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -44,6 +44,12 @@
#include <target/target_core_fabric_ops.h>
#include "target_core_hba.h"
+#include "target_core_stat.h"
+
+extern struct se_device *g_lun0_dev;
+
+static DEFINE_SPINLOCK(tpg_lock);
+static LIST_HEAD(tpg_list);
/* core_clear_initiator_node_from_tpg():
*
@@ -66,9 +72,9 @@ static void core_clear_initiator_node_from_tpg(
continue;
if (!deve->se_lun) {
- printk(KERN_ERR "%s device entries device pointer is"
+ pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
- TPG_TFO(tpg)->get_fabric_name());
+ tpg->se_tpg_tfo->get_fabric_name());
continue;
}
@@ -80,14 +86,13 @@ static void core_clear_initiator_node_from_tpg(
spin_lock(&lun->lun_acl_lock);
list_for_each_entry_safe(acl, acl_tmp,
&lun->lun_acl_list, lacl_list) {
- if (!(strcmp(acl->initiatorname,
- nacl->initiatorname)) &&
- (acl->mapped_lun == deve->mapped_lun))
+ if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
+ (acl->mapped_lun == deve->mapped_lun))
break;
}
if (!acl) {
- printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
+ pr_err("Unable to locate struct se_lun_acl for %s,"
" mapped_lun: %u\n", nacl->initiatorname,
deve->mapped_lun);
spin_unlock(&lun->lun_acl_lock);
@@ -115,7 +120,7 @@ struct se_node_acl *__core_tpg_get_initiator_node_acl(
struct se_node_acl *acl;
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (!(strcmp(acl->initiatorname, initiatorname)))
+ if (!strcmp(acl->initiatorname, initiatorname))
return acl;
}
@@ -134,8 +139,8 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
- if (!(strcmp(acl->initiatorname, initiatorname)) &&
- (!(acl->dynamic_node_acl))) {
+ if (!strcmp(acl->initiatorname, initiatorname) &&
+ !acl->dynamic_node_acl) {
spin_unlock_bh(&tpg->acl_node_lock);
return acl;
}
@@ -171,7 +176,7 @@ void core_tpg_add_node_to_devs(
* By default in LIO-Target $FABRIC_MOD,
* demo_mode_write_protect is ON, or READ_ONLY;
*/
- if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
+ if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
if (dev->dev_flags & DF_READ_ONLY)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
@@ -181,16 +186,16 @@ void core_tpg_add_node_to_devs(
* Allow only optical drives to issue R/W in default RO
* demo mode.
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
+ if (dev->transport->get_device_type(dev) == TYPE_DISK)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
}
- printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+ pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
" access for LUN in Demo Mode\n",
- TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY");
@@ -210,8 +215,8 @@ static int core_set_queue_depth_for_node(
struct se_node_acl *acl)
{
if (!acl->queue_depth) {
- printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
- "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
+ pr_err("Queue depth for %s Initiator Node: %s is 0,"
+ "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
acl->initiatorname);
acl->queue_depth = 1;
}
@@ -230,10 +235,10 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
- if (!(nacl->device_list)) {
- printk(KERN_ERR "Unable to allocate memory for"
+ if (!nacl->device_list) {
+ pr_err("Unable to allocate memory for"
" struct se_node_acl->device_list\n");
- return -1;
+ return -ENOMEM;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &nacl->device_list[i];
@@ -259,14 +264,14 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_node_acl *acl;
acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if ((acl))
+ if (acl)
return acl;
- if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
+ if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
return NULL;
- acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
- if (!(acl))
+ acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
+ if (!acl)
return NULL;
INIT_LIST_HEAD(&acl->acl_list);
@@ -274,23 +279,23 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
- acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
+ acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1;
- TPG_TFO(tpg)->set_default_node_attributes(acl);
+ tpg->se_tpg_tfo->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
@@ -301,10 +306,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock);
- printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
- TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+ tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
return acl;
}
@@ -351,12 +356,12 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if ((acl)) {
+ if (acl) {
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
- printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
- " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
+ pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
+ " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
spin_unlock_bh(&tpg->acl_node_lock);
/*
* Release the locally allocated struct se_node_acl
@@ -364,22 +369,22 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
* a pointer to an existing demo mode node ACL.
*/
if (se_nacl)
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
se_nacl);
goto done;
}
- printk(KERN_ERR "ACL entry for %s Initiator"
+ pr_err("ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
- " request.\n", TPG_TFO(tpg)->get_fabric_name(),
- initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+ " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
+ initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock);
return ERR_PTR(-EEXIST);
}
spin_unlock_bh(&tpg->acl_node_lock);
- if (!(se_nacl)) {
- printk("struct se_node_acl pointer is NULL\n");
+ if (!se_nacl) {
+ pr_err("struct se_node_acl pointer is NULL\n");
return ERR_PTR(-EINVAL);
}
/*
@@ -400,16 +405,16 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
- TPG_TFO(tpg)->set_default_node_attributes(acl);
+ tpg->se_tpg_tfo->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-ENOMEM);
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
- TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
+ tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-EINVAL);
}
@@ -419,10 +424,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
spin_unlock_bh(&tpg->acl_node_lock);
done:
- printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
- TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+ tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
return acl;
}
@@ -457,7 +462,7 @@ int core_tpg_del_initiator_node_acl(
/*
* Determine if the session needs to be closed by our context.
*/
- if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+ if (!tpg->se_tpg_tfo->shutdown_session(sess))
continue;
spin_unlock_bh(&tpg->session_lock);
@@ -465,7 +470,7 @@ int core_tpg_del_initiator_node_acl(
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
- TPG_TFO(tpg)->close_session(sess);
+ tpg->se_tpg_tfo->close_session(sess);
spin_lock_bh(&tpg->session_lock);
}
@@ -475,10 +480,10 @@ int core_tpg_del_initiator_node_acl(
core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
- printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
- " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
- TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
+ pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+ " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
+ tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
return 0;
}
@@ -500,11 +505,11 @@ int core_tpg_set_initiator_node_queue_depth(
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!(acl)) {
- printk(KERN_ERR "Access Control List entry for %s Initiator"
+ if (!acl) {
+ pr_err("Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring"
- " request.\n", TPG_TFO(tpg)->get_fabric_name(),
- initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
+ " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
+ initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock);
return -ENODEV;
}
@@ -520,12 +525,12 @@ int core_tpg_set_initiator_node_queue_depth(
continue;
if (!force) {
- printk(KERN_ERR "Unable to change queue depth for %s"
+ pr_err("Unable to change queue depth for %s"
" Initiator Node: %s while session is"
" operational. To forcefully change the queue"
" depth and force session reinstatement"
" use the \"force=1\" parameter.\n",
- TPG_TFO(tpg)->get_fabric_name(), initiatorname);
+ tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
spin_unlock_bh(&tpg->session_lock);
spin_lock_bh(&tpg->acl_node_lock);
@@ -537,7 +542,7 @@ int core_tpg_set_initiator_node_queue_depth(
/*
* Determine if the session needs to be closed by our context.
*/
- if (!(TPG_TFO(tpg)->shutdown_session(sess)))
+ if (!tpg->se_tpg_tfo->shutdown_session(sess))
continue;
init_sess = sess;
@@ -549,7 +554,7 @@ int core_tpg_set_initiator_node_queue_depth(
* Change the value in the Node's struct se_node_acl, and call
* core_set_queue_depth_for_node() to add the requested queue depth.
*
- * Finally call TPG_TFO(tpg)->close_session() to force session
+ * Finally call tpg->se_tpg_tfo->close_session() to force session
* reinstatement to occur if there is an active session for the
* $FABRIC_MOD Initiator Node in question.
*/
@@ -561,10 +566,10 @@ int core_tpg_set_initiator_node_queue_depth(
* Force session reinstatement if
* core_set_queue_depth_for_node() failed, because we assume
* the $FABRIC_MOD has already the set session reinstatement
- * bit from TPG_TFO(tpg)->shutdown_session() called above.
+ * bit from tpg->se_tpg_tfo->shutdown_session() called above.
*/
if (init_sess)
- TPG_TFO(tpg)->close_session(init_sess);
+ tpg->se_tpg_tfo->close_session(init_sess);
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
@@ -578,12 +583,12 @@ int core_tpg_set_initiator_node_queue_depth(
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
if (init_sess)
- TPG_TFO(tpg)->close_session(init_sess);
+ tpg->se_tpg_tfo->close_session(init_sess);
- printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
+ pr_debug("Successfuly changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", queue_depth,
- initiatorname, TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
@@ -597,7 +602,7 @@ EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
- struct se_device *dev = se_global->g_lun0_dev;
+ struct se_device *dev = g_lun0_dev;
struct se_lun *lun = &se_tpg->tpg_virt_lun0;
u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
int ret;
@@ -614,7 +619,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
if (ret < 0)
- return -1;
+ return ret;
return 0;
}
@@ -638,8 +643,8 @@ int core_tpg_register(
se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
- if (!(se_tpg->tpg_lun_list)) {
- printk(KERN_ERR "Unable to allocate struct se_portal_group->"
+ if (!se_tpg->tpg_lun_list) {
+ pr_err("Unable to allocate struct se_portal_group->"
"tpg_lun_list\n");
return -ENOMEM;
}
@@ -663,7 +668,7 @@ int core_tpg_register(
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
- INIT_LIST_HEAD(&se_tpg->se_tpg_list);
+ INIT_LIST_HEAD(&se_tpg->se_tpg_node);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->acl_node_lock);
spin_lock_init(&se_tpg->session_lock);
@@ -676,11 +681,11 @@ int core_tpg_register(
}
}
- spin_lock_bh(&se_global->se_tpg_lock);
- list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
- spin_unlock_bh(&se_global->se_tpg_lock);
+ spin_lock_bh(&tpg_lock);
+ list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
+ spin_unlock_bh(&tpg_lock);
- printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
+ pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
@@ -694,16 +699,16 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
{
struct se_node_acl *nacl, *nacl_tmp;
- printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
+ pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
" for endpoint: %s Portal Tag %u\n",
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
- "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
- TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
- TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
+ "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
- spin_lock_bh(&se_global->se_tpg_lock);
- list_del(&se_tpg->se_tpg_list);
- spin_unlock_bh(&se_global->se_tpg_lock);
+ spin_lock_bh(&tpg_lock);
+ list_del(&se_tpg->se_tpg_node);
+ spin_unlock_bh(&tpg_lock);
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
@@ -721,7 +726,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg);
- TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
+ se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
spin_lock_bh(&se_tpg->acl_node_lock);
}
@@ -743,21 +748,21 @@ struct se_lun *core_tpg_pre_addlun(
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
- TPG_TFO(tpg)->get_fabric_name(),
+ tpg->se_tpg_tfo->get_fabric_name(),
unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
- printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
+ pr_err("TPG Logical Unit Number: %u is already active"
" on %s Target Portal Group: %u, ignoring request.\n",
- unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-EINVAL);
}
@@ -772,8 +777,11 @@ int core_tpg_post_addlun(
u32 lun_access,
void *lun_ptr)
{
- if (core_dev_export(lun_ptr, tpg, lun) < 0)
- return -1;
+ int ret;
+
+ ret = core_dev_export(lun_ptr, tpg, lun);
+ if (ret < 0)
+ return ret;
spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
@@ -799,21 +807,21 @@ struct se_lun *core_tpg_pre_dellun(
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
- printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+ pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
- printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+ pr_err("%s Logical Unit Number: %u is not active on"
" Target Portal Group: %u, ignoring request.\n",
- TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
- TPG_TFO(tpg)->tpg_get_tag(tpg));
+ tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
+ tpg->se_tpg_tfo->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-ENODEV);
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 4b9b7169bdd..46352d658e3 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -58,139 +58,12 @@
#include "target_core_scdb.h"
#include "target_core_ua.h"
-/* #define DEBUG_CDB_HANDLER */
-#ifdef DEBUG_CDB_HANDLER
-#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CDB_H(x...)
-#endif
-
-/* #define DEBUG_CMD_MAP */
-#ifdef DEBUG_CMD_MAP
-#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CMD_M(x...)
-#endif
-
-/* #define DEBUG_MEM_ALLOC */
-#ifdef DEBUG_MEM_ALLOC
-#define DEBUG_MEM(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_MEM(x...)
-#endif
-
-/* #define DEBUG_MEM2_ALLOC */
-#ifdef DEBUG_MEM2_ALLOC
-#define DEBUG_MEM2(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_MEM2(x...)
-#endif
-
-/* #define DEBUG_SG_CALC */
-#ifdef DEBUG_SG_CALC
-#define DEBUG_SC(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_SC(x...)
-#endif
-
-/* #define DEBUG_SE_OBJ */
-#ifdef DEBUG_SE_OBJ
-#define DEBUG_SO(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_SO(x...)
-#endif
-
-/* #define DEBUG_CMD_VOL */
-#ifdef DEBUG_CMD_VOL
-#define DEBUG_VOL(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_VOL(x...)
-#endif
-
-/* #define DEBUG_CMD_STOP */
-#ifdef DEBUG_CMD_STOP
-#define DEBUG_CS(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CS(x...)
-#endif
-
-/* #define DEBUG_PASSTHROUGH */
-#ifdef DEBUG_PASSTHROUGH
-#define DEBUG_PT(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_PT(x...)
-#endif
-
-/* #define DEBUG_TASK_STOP */
-#ifdef DEBUG_TASK_STOP
-#define DEBUG_TS(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TS(x...)
-#endif
-
-/* #define DEBUG_TRANSPORT_STOP */
-#ifdef DEBUG_TRANSPORT_STOP
-#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TRANSPORT_S(x...)
-#endif
-
-/* #define DEBUG_TASK_FAILURE */
-#ifdef DEBUG_TASK_FAILURE
-#define DEBUG_TF(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TF(x...)
-#endif
-
-/* #define DEBUG_DEV_OFFLINE */
-#ifdef DEBUG_DEV_OFFLINE
-#define DEBUG_DO(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_DO(x...)
-#endif
-
-/* #define DEBUG_TASK_STATE */
-#ifdef DEBUG_TASK_STATE
-#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TSTATE(x...)
-#endif
-
-/* #define DEBUG_STATUS_THR */
-#ifdef DEBUG_STATUS_THR
-#define DEBUG_ST(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_ST(x...)
-#endif
-
-/* #define DEBUG_TASK_TIMEOUT */
-#ifdef DEBUG_TASK_TIMEOUT
-#define DEBUG_TT(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TT(x...)
-#endif
-
-/* #define DEBUG_GENERIC_REQUEST_FAILURE */
-#ifdef DEBUG_GENERIC_REQUEST_FAILURE
-#define DEBUG_GRF(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_GRF(x...)
-#endif
-
-/* #define DEBUG_SAM_TASK_ATTRS */
-#ifdef DEBUG_SAM_TASK_ATTRS
-#define DEBUG_STA(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_STA(x...)
-#endif
-
-struct se_global *se_global;
+static int sub_api_initialized;
static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
-struct kmem_cache *se_mem_cache;
struct kmem_cache *t10_pr_reg_cache;
struct kmem_cache *t10_alua_lu_gp_cache;
struct kmem_cache *t10_alua_lu_gp_mem_cache;
@@ -201,116 +74,87 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
typedef int (*map_func_t)(struct se_task *, u32);
static int transport_generic_write_pending(struct se_cmd *);
-static int transport_processing_thread(void *);
+static int transport_processing_thread(void *param);
static int __transport_execute_tasks(struct se_device *dev);
static void transport_complete_task_attr(struct se_cmd *cmd);
+static int transport_complete_qf(struct se_cmd *cmd);
+static void transport_handle_queue_full(struct se_cmd *cmd,
+ struct se_device *dev, int (*qf_callback)(struct se_cmd *));
static void transport_direct_request_timeout(struct se_cmd *cmd);
static void transport_free_dev_tasks(struct se_cmd *cmd);
-static u32 transport_generic_get_cdb_count(struct se_cmd *cmd,
- unsigned long long starting_lba, u32 sectors,
+static u32 transport_allocate_tasks(struct se_cmd *cmd,
+ unsigned long long starting_lba,
enum dma_data_direction data_direction,
- struct list_head *mem_list, int set_counts);
-static int transport_generic_get_mem(struct se_cmd *cmd, u32 length,
- u32 dma_size);
+ struct scatterlist *sgl, unsigned int nents);
+static int transport_generic_get_mem(struct se_cmd *cmd);
static int transport_generic_remove(struct se_cmd *cmd,
- int release_to_pool, int session_reinstatement);
-static int transport_get_sectors(struct se_cmd *cmd);
-static struct list_head *transport_init_se_mem_list(void);
-static int transport_map_sg_to_mem(struct se_cmd *cmd,
- struct list_head *se_mem_list, void *in_mem,
- u32 *se_mem_cnt);
-static void transport_memcpy_se_mem_read_contig(struct se_cmd *cmd,
- unsigned char *dst, struct list_head *se_mem_list);
+ int session_reinstatement);
static void transport_release_fe_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
struct se_queue_obj *qobj);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
static void transport_stop_all_task_timers(struct se_cmd *cmd);
-int init_se_global(void)
+int init_se_kmem_caches(void)
{
- struct se_global *global;
-
- global = kzalloc(sizeof(struct se_global), GFP_KERNEL);
- if (!(global)) {
- printk(KERN_ERR "Unable to allocate memory for struct se_global\n");
- return -1;
- }
-
- INIT_LIST_HEAD(&global->g_lu_gps_list);
- INIT_LIST_HEAD(&global->g_se_tpg_list);
- INIT_LIST_HEAD(&global->g_hba_list);
- INIT_LIST_HEAD(&global->g_se_dev_list);
- spin_lock_init(&global->g_device_lock);
- spin_lock_init(&global->hba_lock);
- spin_lock_init(&global->se_tpg_lock);
- spin_lock_init(&global->lu_gps_lock);
- spin_lock_init(&global->plugin_class_lock);
-
se_cmd_cache = kmem_cache_create("se_cmd_cache",
sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
- if (!(se_cmd_cache)) {
- printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
+ if (!se_cmd_cache) {
+ pr_err("kmem_cache_create for struct se_cmd failed\n");
goto out;
}
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
- if (!(se_tmr_req_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
+ if (!se_tmr_req_cache) {
+ pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
0, NULL);
- if (!(se_sess_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_session"
+ if (!se_sess_cache) {
+ pr_err("kmem_cache_create() for struct se_session"
" failed\n");
goto out;
}
se_ua_cache = kmem_cache_create("se_ua_cache",
sizeof(struct se_ua), __alignof__(struct se_ua),
0, NULL);
- if (!(se_ua_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
- goto out;
- }
- se_mem_cache = kmem_cache_create("se_mem_cache",
- sizeof(struct se_mem), __alignof__(struct se_mem), 0, NULL);
- if (!(se_mem_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct se_mem failed\n");
+ if (!se_ua_cache) {
+ pr_err("kmem_cache_create() for struct se_ua failed\n");
goto out;
}
t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
sizeof(struct t10_pr_registration),
__alignof__(struct t10_pr_registration), 0, NULL);
- if (!(t10_pr_reg_cache)) {
- printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
+ if (!t10_pr_reg_cache) {
+ pr_err("kmem_cache_create() for struct t10_pr_registration"
" failed\n");
goto out;
}
t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
0, NULL);
- if (!(t10_alua_lu_gp_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
+ if (!t10_alua_lu_gp_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
" failed\n");
goto out;
}
t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
sizeof(struct t10_alua_lu_gp_member),
__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
- if (!(t10_alua_lu_gp_mem_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
+ if (!t10_alua_lu_gp_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
"cache failed\n");
goto out;
}
t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
sizeof(struct t10_alua_tg_pt_gp),
__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
- if (!(t10_alua_tg_pt_gp_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+ if (!t10_alua_tg_pt_gp_cache) {
+ pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"cache failed\n");
goto out;
}
@@ -319,14 +163,12 @@ int init_se_global(void)
sizeof(struct t10_alua_tg_pt_gp_member),
__alignof__(struct t10_alua_tg_pt_gp_member),
0, NULL);
- if (!(t10_alua_tg_pt_gp_mem_cache)) {
- printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+ if (!t10_alua_tg_pt_gp_mem_cache) {
+ pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
"mem_t failed\n");
goto out;
}
- se_global = global;
-
return 0;
out:
if (se_cmd_cache)
@@ -337,8 +179,6 @@ out:
kmem_cache_destroy(se_sess_cache);
if (se_ua_cache)
kmem_cache_destroy(se_ua_cache);
- if (se_mem_cache)
- kmem_cache_destroy(se_mem_cache);
if (t10_pr_reg_cache)
kmem_cache_destroy(t10_pr_reg_cache);
if (t10_alua_lu_gp_cache)
@@ -349,45 +189,25 @@ out:
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
if (t10_alua_tg_pt_gp_mem_cache)
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
- kfree(global);
- return -1;
+ return -ENOMEM;
}
-void release_se_global(void)
+void release_se_kmem_caches(void)
{
- struct se_global *global;
-
- global = se_global;
- if (!(global))
- return;
-
kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
- kmem_cache_destroy(se_mem_cache);
kmem_cache_destroy(t10_pr_reg_cache);
kmem_cache_destroy(t10_alua_lu_gp_cache);
kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);
- kfree(global);
-
- se_global = NULL;
}
-/* SCSI statistics table index */
-static struct scsi_index_table scsi_index_table;
-
-/*
- * Initialize the index table for allocating unique row indexes to various mib
- * tables.
- */
-void init_scsi_index_table(void)
-{
- memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
- spin_lock_init(&scsi_index_table.lock);
-}
+/* This code ensures unique mib indexes are handed out. */
+static DEFINE_SPINLOCK(scsi_mib_index_lock);
+static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
/*
* Allocate a new row index for the entry type specified
@@ -396,16 +216,11 @@ u32 scsi_get_new_index(scsi_index_t type)
{
u32 new_index;
- if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
- printk(KERN_ERR "Invalid index type %d\n", type);
- return -EINVAL;
- }
+ BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
- spin_lock(&scsi_index_table.lock);
- new_index = ++scsi_index_table.scsi_mib_index[type];
- if (new_index == 0)
- new_index = ++scsi_index_table.scsi_mib_index[type];
- spin_unlock(&scsi_index_table.lock);
+ spin_lock(&scsi_mib_index_lock);
+ new_index = ++scsi_mib_index[type];
+ spin_unlock(&scsi_mib_index_lock);
return new_index;
}
@@ -425,34 +240,37 @@ static int transport_subsystem_reqmods(void)
ret = request_module("target_core_iblock");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_iblock\n");
+ pr_err("Unable to load target_core_iblock\n");
ret = request_module("target_core_file");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_file\n");
+ pr_err("Unable to load target_core_file\n");
ret = request_module("target_core_pscsi");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_pscsi\n");
+ pr_err("Unable to load target_core_pscsi\n");
ret = request_module("target_core_stgt");
if (ret != 0)
- printk(KERN_ERR "Unable to load target_core_stgt\n");
+ pr_err("Unable to load target_core_stgt\n");
return 0;
}
int transport_subsystem_check_init(void)
{
- if (se_global->g_sub_api_initialized)
+ int ret;
+
+ if (sub_api_initialized)
return 0;
/*
* Request the loading of known TCM subsystem plugins..
*/
- if (transport_subsystem_reqmods() < 0)
- return -1;
+ ret = transport_subsystem_reqmods();
+ if (ret < 0)
+ return ret;
- se_global->g_sub_api_initialized = 1;
+ sub_api_initialized = 1;
return 0;
}
@@ -461,8 +279,8 @@ struct se_session *transport_init_session(void)
struct se_session *se_sess;
se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
- if (!(se_sess)) {
- printk(KERN_ERR "Unable to allocate struct se_session from"
+ if (!se_sess) {
+ pr_err("Unable to allocate struct se_session from"
" se_sess_cache\n");
return ERR_PTR(-ENOMEM);
}
@@ -497,9 +315,9 @@ void __transport_register_session(
* If the fabric module supports an ISID based TransportID,
* save this value in binary from the fabric I_T Nexus now.
*/
- if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) {
+ if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
memset(&buf[0], 0, PR_REG_ISID_LEN);
- TPG_TFO(se_tpg)->sess_get_initiator_sid(se_sess,
+ se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
&buf[0], PR_REG_ISID_LEN);
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
@@ -516,8 +334,8 @@ void __transport_register_session(
}
list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
- printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
- TPG_TFO(se_tpg)->get_fabric_name(), se_sess->fabric_sess_ptr);
+ pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+ se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
}
EXPORT_SYMBOL(__transport_register_session);
@@ -541,7 +359,7 @@ void transport_deregister_session_configfs(struct se_session *se_sess)
* Used by struct se_node_acl's under ConfigFS to locate active struct se_session
*/
se_nacl = se_sess->se_node_acl;
- if ((se_nacl)) {
+ if (se_nacl) {
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
list_del(&se_sess->sess_acl_list);
/*
@@ -572,7 +390,7 @@ void transport_deregister_session(struct se_session *se_sess)
struct se_portal_group *se_tpg = se_sess->se_tpg;
struct se_node_acl *se_nacl;
- if (!(se_tpg)) {
+ if (!se_tpg) {
transport_free_session(se_sess);
return;
}
@@ -588,18 +406,18 @@ void transport_deregister_session(struct se_session *se_sess)
* struct se_node_acl if it had been previously dynamically generated.
*/
se_nacl = se_sess->se_node_acl;
- if ((se_nacl)) {
+ if (se_nacl) {
spin_lock_bh(&se_tpg->acl_node_lock);
if (se_nacl->dynamic_node_acl) {
- if (!(TPG_TFO(se_tpg)->tpg_check_demo_mode_cache(
- se_tpg))) {
+ if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
+ se_tpg)) {
list_del(&se_nacl->acl_list);
se_tpg->num_node_acls--;
spin_unlock_bh(&se_tpg->acl_node_lock);
core_tpg_wait_for_nacl_pr_ref(se_nacl);
core_free_device_list_for_node(se_nacl, se_tpg);
- TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
+ se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
se_nacl);
spin_lock_bh(&se_tpg->acl_node_lock);
}
@@ -609,13 +427,13 @@ void transport_deregister_session(struct se_session *se_sess)
transport_free_session(se_sess);
- printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
- TPG_TFO(se_tpg)->get_fabric_name());
+ pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
+ se_tpg->se_tpg_tfo->get_fabric_name());
}
EXPORT_SYMBOL(transport_deregister_session);
/*
- * Called with T_TASK(cmd)->t_state_lock held.
+ * Called with cmd->t_state_lock held.
*/
static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
{
@@ -623,28 +441,25 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
struct se_task *task;
unsigned long flags;
- if (!T_TASK(cmd))
- return;
-
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
dev = task->se_dev;
- if (!(dev))
+ if (!dev)
continue;
if (atomic_read(&task->task_active))
continue;
- if (!(atomic_read(&task->task_state_active)))
+ if (!atomic_read(&task->task_state_active))
continue;
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_del(&task->t_state_list);
- DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
- CMD_TFO(cmd)->tfo_get_task_tag(cmd), dev, task);
+ pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
+ cmd->se_tfo->get_task_tag(cmd), dev, task);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
atomic_set(&task->task_state_active, 0);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_ex_left);
+ atomic_dec(&cmd->t_task_cdbs_ex_left);
}
}
@@ -663,34 +478,34 @@ static int transport_cmd_check_stop(
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* Determine if IOCTL context caller in requesting the stopping of this
* command for LUN shutdown purposes.
*/
- if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
- DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->transport_lun_stop)"
+ if (atomic_read(&cmd->transport_lun_stop)) {
+ pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
cmd->deferred_t_state = cmd->t_state;
cmd->t_state = TRANSPORT_DEFERRED_CMD;
- atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ atomic_set(&cmd->t_transport_active, 0);
if (transport_off == 2)
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&T_TASK(cmd)->transport_lun_stop_comp);
+ complete(&cmd->transport_lun_stop_comp);
return 1;
}
/*
* Determine if frontend context caller is requesting the stopping of
- * this command for frontend excpections.
+ * this command for frontend exceptions.
*/
- if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
- DEBUG_CS("%s:%d atomic_read(&T_TASK(cmd)->t_transport_stop) =="
+ if (atomic_read(&cmd->t_transport_stop)) {
+ pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
cmd->deferred_t_state = cmd->t_state;
cmd->t_state = TRANSPORT_DEFERRED_CMD;
@@ -703,13 +518,13 @@ static int transport_cmd_check_stop(
*/
if (transport_off == 2)
cmd->se_lun = NULL;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&T_TASK(cmd)->t_transport_stop_comp);
+ complete(&cmd->t_transport_stop_comp);
return 1;
}
if (transport_off) {
- atomic_set(&T_TASK(cmd)->t_transport_active, 0);
+ atomic_set(&cmd->t_transport_active, 0);
if (transport_off == 2) {
transport_all_task_dev_remove_state(cmd);
/*
@@ -722,20 +537,20 @@ static int transport_cmd_check_stop(
* their internally allocated I/O reference now and
* struct se_cmd now.
*/
- if (CMD_TFO(cmd)->check_stop_free != NULL) {
+ if (cmd->se_tfo->check_stop_free != NULL) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- CMD_TFO(cmd)->check_stop_free(cmd);
+ cmd->se_tfo->check_stop_free(cmd);
return 1;
}
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
} else if (t_state)
cmd->t_state = t_state;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
@@ -747,30 +562,30 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
static void transport_lun_remove_cmd(struct se_cmd *cmd)
{
- struct se_lun *lun = SE_LUN(cmd);
+ struct se_lun *lun = cmd->se_lun;
unsigned long flags;
if (!lun)
return;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto check_lun;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
check_lun:
spin_lock_irqsave(&lun->lun_cmd_lock, flags);
- if (atomic_read(&T_TASK(cmd)->transport_lun_active)) {
- list_del(&cmd->se_lun_list);
- atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+ if (atomic_read(&cmd->transport_lun_active)) {
+ list_del(&cmd->se_lun_node);
+ atomic_set(&cmd->transport_lun_active, 0);
#if 0
- printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
- CMD_TFO(cmd)->get_task_tag(cmd), lun->unpacked_lun);
+ pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
+ cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
#endif
}
spin_unlock_irqrestore(&lun->lun_cmd_lock, flags);
@@ -778,92 +593,59 @@ check_lun:
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
- transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
return;
if (remove)
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
void transport_cmd_finish_abort_tmr(struct se_cmd *cmd)
{
- transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
-static int transport_add_cmd_to_queue(
+static void transport_add_cmd_to_queue(
struct se_cmd *cmd,
int t_state)
{
struct se_device *dev = cmd->se_dev;
- struct se_queue_obj *qobj = dev->dev_queue_obj;
- struct se_queue_req *qr;
+ struct se_queue_obj *qobj = &dev->dev_queue_obj;
unsigned long flags;
- qr = kzalloc(sizeof(struct se_queue_req), GFP_ATOMIC);
- if (!(qr)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " struct se_queue_req\n");
- return -1;
- }
- INIT_LIST_HEAD(&qr->qr_list);
-
- qr->cmd = (void *)cmd;
- qr->state = t_state;
+ INIT_LIST_HEAD(&cmd->se_queue_node);
if (t_state) {
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = t_state;
- atomic_set(&T_TASK(cmd)->t_transport_active, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_active, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- list_add_tail(&qr->qr_list, &qobj->qobj_list);
- atomic_inc(&T_TASK(cmd)->t_transport_queue_active);
+ if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) {
+ cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL;
+ list_add(&cmd->se_queue_node, &qobj->qobj_list);
+ } else
+ list_add_tail(&cmd->se_queue_node, &qobj->qobj_list);
+ atomic_inc(&cmd->t_transport_queue_active);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
atomic_inc(&qobj->queue_cnt);
wake_up_interruptible(&qobj->thread_wq);
- return 0;
-}
-
-/*
- * Called with struct se_queue_obj->cmd_queue_lock held.
- */
-static struct se_queue_req *
-__transport_get_qr_from_queue(struct se_queue_obj *qobj)
-{
- struct se_cmd *cmd;
- struct se_queue_req *qr = NULL;
-
- if (list_empty(&qobj->qobj_list))
- return NULL;
-
- list_for_each_entry(qr, &qobj->qobj_list, qr_list)
- break;
-
- if (qr->cmd) {
- cmd = (struct se_cmd *)qr->cmd;
- atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
- }
- list_del(&qr->qr_list);
- atomic_dec(&qobj->queue_cnt);
-
- return qr;
}
-static struct se_queue_req *
-transport_get_qr_from_queue(struct se_queue_obj *qobj)
+static struct se_cmd *
+transport_get_cmd_from_queue(struct se_queue_obj *qobj)
{
struct se_cmd *cmd;
- struct se_queue_req *qr;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
@@ -871,50 +653,42 @@ transport_get_qr_from_queue(struct se_queue_obj *qobj)
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return NULL;
}
+ cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node);
- list_for_each_entry(qr, &qobj->qobj_list, qr_list)
- break;
+ atomic_dec(&cmd->t_transport_queue_active);
- if (qr->cmd) {
- cmd = (struct se_cmd *)qr->cmd;
- atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
- }
- list_del(&qr->qr_list);
+ list_del(&cmd->se_queue_node);
atomic_dec(&qobj->queue_cnt);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- return qr;
+ return cmd;
}
static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
struct se_queue_obj *qobj)
{
- struct se_cmd *q_cmd;
- struct se_queue_req *qr = NULL, *qr_p = NULL;
+ struct se_cmd *t;
unsigned long flags;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->t_transport_queue_active))) {
+ if (!atomic_read(&cmd->t_transport_queue_active)) {
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
return;
}
- list_for_each_entry_safe(qr, qr_p, &qobj->qobj_list, qr_list) {
- q_cmd = (struct se_cmd *)qr->cmd;
- if (q_cmd != cmd)
- continue;
-
- atomic_dec(&T_TASK(q_cmd)->t_transport_queue_active);
- atomic_dec(&qobj->queue_cnt);
- list_del(&qr->qr_list);
- kfree(qr);
- }
+ list_for_each_entry(t, &qobj->qobj_list, se_queue_node)
+ if (t == cmd) {
+ atomic_dec(&cmd->t_transport_queue_active);
+ atomic_dec(&qobj->queue_cnt);
+ list_del(&cmd->se_queue_node);
+ break;
+ }
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_transport_queue_active)) {
- printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- atomic_read(&T_TASK(cmd)->t_transport_queue_active));
+ if (atomic_read(&cmd->t_transport_queue_active)) {
+ pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
+ cmd->se_tfo->get_task_tag(cmd),
+ atomic_read(&cmd->t_transport_queue_active));
}
}
@@ -924,7 +698,7 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,
*/
void transport_complete_sync_cache(struct se_cmd *cmd, int good)
{
- struct se_task *task = list_entry(T_TASK(cmd)->t_task_list.next,
+ struct se_task *task = list_entry(cmd->t_task_list.next,
struct se_task, t_list);
if (good) {
@@ -933,7 +707,7 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
- TASK_CMD(task)->transport_error_status =
+ task->task_se_cmd->transport_error_status =
PYX_TRANSPORT_ILLEGAL_REQUEST;
}
@@ -948,22 +722,18 @@ EXPORT_SYMBOL(transport_complete_sync_cache);
*/
void transport_complete_task(struct se_task *task, int success)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = task->se_dev;
int t_state;
unsigned long flags;
#if 0
- printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
- T_TASK(cmd)->t_task_cdb[0], dev);
+ pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
+ cmd->t_task_cdb[0], dev);
#endif
- if (dev) {
- spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
+ if (dev)
atomic_inc(&dev->depth_left);
- atomic_inc(&SE_HBA(dev)->left_queue_depth);
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
- }
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_set(&task->task_active, 0);
/*
@@ -985,14 +755,14 @@ void transport_complete_task(struct se_task *task, int success)
*/
if (atomic_read(&task->task_stop)) {
/*
- * Decrement T_TASK(cmd)->t_se_count if this task had
+ * Decrement cmd->t_se_count if this task had
* previously thrown its timeout exception handler.
*/
if (atomic_read(&task->task_timeout)) {
- atomic_dec(&T_TASK(cmd)->t_se_count);
+ atomic_dec(&cmd->t_se_count);
atomic_set(&task->task_timeout, 0);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&task->task_stop_comp);
return;
@@ -1003,34 +773,34 @@ void transport_complete_task(struct se_task *task, int success)
* the processing thread.
*/
if (atomic_read(&task->task_timeout)) {
- if (!(atomic_dec_and_test(
- &T_TASK(cmd)->t_task_cdbs_timeout_left))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ if (!atomic_dec_and_test(
+ &cmd->t_task_cdbs_timeout_left)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
return;
}
t_state = TRANSPORT_COMPLETE_TIMEOUT;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_add_cmd_to_queue(cmd, t_state);
return;
}
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_timeout_left);
+ atomic_dec(&cmd->t_task_cdbs_timeout_left);
/*
* Decrement the outstanding t_task_cdbs_left count. The last
* struct se_task from struct se_cmd will complete itself into the
* device queue depending upon int success.
*/
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
if (!success)
- T_TASK(cmd)->t_tasks_failed = 1;
+ cmd->t_tasks_failed = 1;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- if (!success || T_TASK(cmd)->t_tasks_failed) {
+ if (!success || cmd->t_tasks_failed) {
t_state = TRANSPORT_COMPLETE_FAILURE;
if (!task->task_error_status) {
task->task_error_status =
@@ -1039,10 +809,10 @@ void transport_complete_task(struct se_task *task, int success)
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
} else {
- atomic_set(&T_TASK(cmd)->t_transport_complete, 1);
+ atomic_set(&cmd->t_transport_complete, 1);
t_state = TRANSPORT_COMPLETE_OK;
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_add_cmd_to_queue(cmd, t_state);
}
@@ -1080,9 +850,9 @@ static inline int transport_add_task_check_sam_attr(
&task_prev->t_execute_list :
&dev->execute_task_list);
- DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+ pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
" in execution queue\n",
- T_TASK(task->task_se_cmd)->t_task_cdb[0]);
+ task->task_se_cmd->t_task_cdb[0]);
return 1;
}
/*
@@ -1124,8 +894,8 @@ static void __transport_add_task_to_execute_queue(
atomic_set(&task->task_state_active, 1);
- DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
- CMD_TFO(task->task_se_cmd)->get_task_tag(task->task_se_cmd),
+ pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
task, dev);
}
@@ -1135,8 +905,8 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
struct se_task *task;
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
dev = task->se_dev;
if (atomic_read(&task->task_state_active))
@@ -1146,23 +916,23 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
list_add_tail(&task->t_state_list, &dev->state_task_list);
atomic_set(&task->task_state_active, 1);
- DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
- CMD_TFO(task->task_se_cmd)->get_task_tag(
+ pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ task->task_se_cmd->se_tfo->get_task_tag(
task->task_se_cmd), task, dev);
spin_unlock(&dev->execute_task_lock);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_task *task, *task_prev = NULL;
unsigned long flags;
spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
if (atomic_read(&task->task_execute_queue))
continue;
/*
@@ -1174,30 +944,6 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
task_prev = task;
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
-
- return;
-}
-
-/* transport_get_task_from_execute_queue():
- *
- * Called with dev->execute_task_lock held.
- */
-static struct se_task *
-transport_get_task_from_execute_queue(struct se_device *dev)
-{
- struct se_task *task;
-
- if (list_empty(&dev->execute_task_list))
- return NULL;
-
- list_for_each_entry(task, &dev->execute_task_list, t_execute_list)
- break;
-
- list_del(&task->t_execute_list);
- atomic_set(&task->task_execute_queue, 0);
- atomic_dec(&dev->execute_tasks);
-
- return task;
}
/* transport_remove_task_from_execute_queue():
@@ -1222,6 +968,40 @@ void transport_remove_task_from_execute_queue(
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
+/*
+ * Handle QUEUE_FULL / -EAGAIN status
+ */
+
+static void target_qf_do_work(struct work_struct *work)
+{
+ struct se_device *dev = container_of(work, struct se_device,
+ qf_work_queue);
+ struct se_cmd *cmd, *cmd_tmp;
+
+ spin_lock_irq(&dev->qf_cmd_lock);
+ list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) {
+
+ list_del(&cmd->se_qf_node);
+ atomic_dec(&dev->dev_qf_count);
+ smp_mb__after_atomic_dec();
+ spin_unlock_irq(&dev->qf_cmd_lock);
+
+ pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
+ " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
+ (cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
+ (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
+ : "UNKNOWN");
+ /*
+ * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd
+ * has been added to head of queue
+ */
+ transport_add_cmd_to_queue(cmd, cmd->t_state);
+
+ spin_lock_irq(&dev->qf_cmd_lock);
+ }
+ spin_unlock_irq(&dev->qf_cmd_lock);
+}
+
unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
{
switch (cmd->data_direction) {
@@ -1269,7 +1049,7 @@ void transport_dump_dev_state(
atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
- DEV_ATTRIB(dev)->block_size, DEV_ATTRIB(dev)->max_sectors);
+ dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
*bl += sprintf(b + *bl, " ");
}
@@ -1279,33 +1059,29 @@ void transport_dump_dev_state(
*/
static void transport_release_all_cmds(struct se_device *dev)
{
- struct se_cmd *cmd = NULL;
- struct se_queue_req *qr = NULL, *qr_p = NULL;
+ struct se_cmd *cmd, *tcmd;
int bug_out = 0, t_state;
unsigned long flags;
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
- list_for_each_entry_safe(qr, qr_p, &dev->dev_queue_obj->qobj_list,
- qr_list) {
-
- cmd = (struct se_cmd *)qr->cmd;
- t_state = qr->state;
- list_del(&qr->qr_list);
- kfree(qr);
- spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock,
+ spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
+ list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list,
+ se_queue_node) {
+ t_state = cmd->t_state;
+ list_del(&cmd->se_queue_node);
+ spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
flags);
- printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
+ pr_err("Releasing ITT: 0x%08x, i_state: %u,"
" t_state: %u directly\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), t_state);
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), t_state);
transport_release_fe_cmd(cmd);
bug_out = 1;
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags);
}
- spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
+ spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags);
#if 0
if (bug_out)
BUG();
@@ -1362,7 +1138,7 @@ void transport_dump_vpd_proto_id(
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
- printk(KERN_INFO "%s", buf);
+ pr_debug("%s", buf);
}
void
@@ -1387,7 +1163,8 @@ int transport_dump_vpd_assoc(
int p_buf_len)
{
unsigned char buf[VPD_TMP_BUF_SIZE];
- int ret = 0, len;
+ int ret = 0;
+ int len;
memset(buf, 0, VPD_TMP_BUF_SIZE);
len = sprintf(buf, "T10 VPD Identifier Association: ");
@@ -1404,14 +1181,14 @@ int transport_dump_vpd_assoc(
break;
default:
sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
- ret = -1;
+ ret = -EINVAL;
break;
}
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
- printk("%s", buf);
+ pr_debug("%s", buf);
return ret;
}
@@ -1434,7 +1211,8 @@ int transport_dump_vpd_ident_type(
int p_buf_len)
{
unsigned char buf[VPD_TMP_BUF_SIZE];
- int ret = 0, len;
+ int ret = 0;
+ int len;
memset(buf, 0, VPD_TMP_BUF_SIZE);
len = sprintf(buf, "T10 VPD Identifier Type: ");
@@ -1461,14 +1239,17 @@ int transport_dump_vpd_ident_type(
default:
sprintf(buf+len, "Unsupported: 0x%02x\n",
vpd->device_identifier_type);
- ret = -1;
+ ret = -EINVAL;
break;
}
- if (p_buf)
+ if (p_buf) {
+ if (p_buf_len < strlen(buf)+1)
+ return -EINVAL;
strncpy(p_buf, buf, p_buf_len);
- else
- printk("%s", buf);
+ } else {
+ pr_debug("%s", buf);
+ }
return ret;
}
@@ -1511,14 +1292,14 @@ int transport_dump_vpd_ident(
default:
sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
" 0x%02x", vpd->device_identifier_code_set);
- ret = -1;
+ ret = -EINVAL;
break;
}
if (p_buf)
strncpy(p_buf, buf, p_buf_len);
else
- printk("%s", buf);
+ pr_debug("%s", buf);
return ret;
}
@@ -1569,51 +1350,51 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
* This is currently not available in upsream Linux/SCSI Target
* mode code, and is assumed to be disabled while using TCM/pSCSI.
*/
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
dev->dev_task_attr_type = SAM_TASK_ATTR_PASSTHROUGH;
return;
}
dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
- DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
- " device\n", TRANSPORT(dev)->name,
- TRANSPORT(dev)->get_device_rev(dev));
+ pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+ " device\n", dev->transport->name,
+ dev->transport->get_device_rev(dev));
}
static void scsi_dump_inquiry(struct se_device *dev)
{
- struct t10_wwn *wwn = DEV_T10_WWN(dev);
+ struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
int i, device_type;
/*
* Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
*/
- printk(" Vendor: ");
+ pr_debug(" Vendor: ");
for (i = 0; i < 8; i++)
if (wwn->vendor[i] >= 0x20)
- printk("%c", wwn->vendor[i]);
+ pr_debug("%c", wwn->vendor[i]);
else
- printk(" ");
+ pr_debug(" ");
- printk(" Model: ");
+ pr_debug(" Model: ");
for (i = 0; i < 16; i++)
if (wwn->model[i] >= 0x20)
- printk("%c", wwn->model[i]);
+ pr_debug("%c", wwn->model[i]);
else
- printk(" ");
+ pr_debug(" ");
- printk(" Revision: ");
+ pr_debug(" Revision: ");
for (i = 0; i < 4; i++)
if (wwn->revision[i] >= 0x20)
- printk("%c", wwn->revision[i]);
+ pr_debug("%c", wwn->revision[i]);
else
- printk(" ");
+ pr_debug(" ");
- printk("\n");
+ pr_debug("\n");
- device_type = TRANSPORT(dev)->get_device_type(dev);
- printk(" Type: %s ", scsi_device_type(device_type));
- printk(" ANSI SCSI revision: %02x\n",
- TRANSPORT(dev)->get_device_rev(dev));
+ device_type = dev->transport->get_device_type(dev);
+ pr_debug(" Type: %s ", scsi_device_type(device_type));
+ pr_debug(" ANSI SCSI revision: %02x\n",
+ dev->transport->get_device_rev(dev));
}
struct se_device *transport_add_device_to_core_hba(
@@ -1630,33 +1411,15 @@ struct se_device *transport_add_device_to_core_hba(
struct se_device *dev;
dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
- if (!(dev)) {
- printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
- return NULL;
- }
- dev->dev_queue_obj = kzalloc(sizeof(struct se_queue_obj), GFP_KERNEL);
- if (!(dev->dev_queue_obj)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " dev->dev_queue_obj\n");
- kfree(dev);
+ if (!dev) {
+ pr_err("Unable to allocate memory for se_dev_t\n");
return NULL;
}
- transport_init_queue_obj(dev->dev_queue_obj);
-
- dev->dev_status_queue_obj = kzalloc(sizeof(struct se_queue_obj),
- GFP_KERNEL);
- if (!(dev->dev_status_queue_obj)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " dev->dev_status_queue_obj\n");
- kfree(dev->dev_queue_obj);
- kfree(dev);
- return NULL;
- }
- transport_init_queue_obj(dev->dev_status_queue_obj);
+ transport_init_queue_obj(&dev->dev_queue_obj);
dev->dev_flags = device_flags;
dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
- dev->dev_ptr = (void *) transport_dev;
+ dev->dev_ptr = transport_dev;
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
@@ -1668,6 +1431,7 @@ struct se_device *transport_add_device_to_core_hba(
INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
+ INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->ordered_cmd_lock);
@@ -1678,6 +1442,7 @@ struct se_device *transport_add_device_to_core_hba(
spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
+ spin_lock_init(&dev->qf_cmd_lock);
dev->queue_depth = dev_limits->queue_depth;
atomic_set(&dev->depth_left, dev->queue_depth);
@@ -1715,13 +1480,16 @@ struct se_device *transport_add_device_to_core_hba(
* Startup the struct se_device processing thread
*/
dev->process_thread = kthread_run(transport_processing_thread, dev,
- "LIO_%s", TRANSPORT(dev)->name);
+ "LIO_%s", dev->transport->name);
if (IS_ERR(dev->process_thread)) {
- printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
- TRANSPORT(dev)->name);
+ pr_err("Unable to create kthread: LIO_%s\n",
+ dev->transport->name);
goto out;
}
-
+ /*
+ * Setup work_queue for QUEUE_FULL
+ */
+ INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
/*
* Preload the initial INQUIRY const values if we are doing
* anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
@@ -1730,16 +1498,16 @@ struct se_device *transport_add_device_to_core_hba(
* originals once back into DEV_T10_WWN(dev) for the virtual device
* setup.
*/
- if (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
- if (!(inquiry_prod) || !(inquiry_prod)) {
- printk(KERN_ERR "All non TCM/pSCSI plugins require"
+ if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+ if (!inquiry_prod || !inquiry_rev) {
+ pr_err("All non TCM/pSCSI plugins require"
" INQUIRY consts\n");
goto out;
}
- strncpy(&DEV_T10_WWN(dev)->vendor[0], "LIO-ORG", 8);
- strncpy(&DEV_T10_WWN(dev)->model[0], inquiry_prod, 16);
- strncpy(&DEV_T10_WWN(dev)->revision[0], inquiry_rev, 4);
+ strncpy(&dev->se_sub_dev->t10_wwn.vendor[0], "LIO-ORG", 8);
+ strncpy(&dev->se_sub_dev->t10_wwn.model[0], inquiry_prod, 16);
+ strncpy(&dev->se_sub_dev->t10_wwn.revision[0], inquiry_rev, 4);
}
scsi_dump_inquiry(dev);
@@ -1754,8 +1522,6 @@ out:
se_release_vpd_for_dev(dev);
- kfree(dev->dev_status_queue_obj);
- kfree(dev->dev_queue_obj);
kfree(dev);
return NULL;
@@ -1794,12 +1560,11 @@ transport_generic_get_task(struct se_cmd *cmd,
enum dma_data_direction data_direction)
{
struct se_task *task;
- struct se_device *dev = SE_DEV(cmd);
- unsigned long flags;
+ struct se_device *dev = cmd->se_dev;
- task = dev->transport->alloc_task(cmd);
+ task = dev->transport->alloc_task(cmd->t_task_cdb);
if (!task) {
- printk(KERN_ERR "Unable to allocate struct se_task\n");
+ pr_err("Unable to allocate struct se_task\n");
return NULL;
}
@@ -1807,26 +1572,15 @@ transport_generic_get_task(struct se_cmd *cmd,
INIT_LIST_HEAD(&task->t_execute_list);
INIT_LIST_HEAD(&task->t_state_list);
init_completion(&task->task_stop_comp);
- task->task_no = T_TASK(cmd)->t_tasks_no++;
task->task_se_cmd = cmd;
task->se_dev = dev;
task->task_data_direction = data_direction;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- list_add_tail(&task->t_list, &T_TASK(cmd)->t_task_list);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
-
return task;
}
static int transport_generic_cmd_sequencer(struct se_cmd *, unsigned char *);
-void transport_device_setup_cmd(struct se_cmd *cmd)
-{
- cmd->se_dev = SE_LUN(cmd)->lun_se_dev;
-}
-EXPORT_SYMBOL(transport_device_setup_cmd);
-
/*
* Used by fabric modules containing a local struct se_cmd within their
* fabric dependent per I/O descriptor.
@@ -1840,20 +1594,17 @@ void transport_init_se_cmd(
int task_attr,
unsigned char *sense_buffer)
{
- INIT_LIST_HEAD(&cmd->se_lun_list);
- INIT_LIST_HEAD(&cmd->se_delayed_list);
- INIT_LIST_HEAD(&cmd->se_ordered_list);
- /*
- * Setup t_task pointer to t_task_backstore
- */
- cmd->t_task = &cmd->t_task_backstore;
+ INIT_LIST_HEAD(&cmd->se_lun_node);
+ INIT_LIST_HEAD(&cmd->se_delayed_node);
+ INIT_LIST_HEAD(&cmd->se_ordered_node);
+ INIT_LIST_HEAD(&cmd->se_qf_node);
- INIT_LIST_HEAD(&T_TASK(cmd)->t_task_list);
- init_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
- init_completion(&T_TASK(cmd)->transport_lun_stop_comp);
- init_completion(&T_TASK(cmd)->t_transport_stop_comp);
- spin_lock_init(&T_TASK(cmd)->t_state_lock);
- atomic_set(&T_TASK(cmd)->transport_dev_active, 1);
+ INIT_LIST_HEAD(&cmd->t_task_list);
+ init_completion(&cmd->transport_lun_fe_stop_comp);
+ init_completion(&cmd->transport_lun_stop_comp);
+ init_completion(&cmd->t_transport_stop_comp);
+ spin_lock_init(&cmd->t_state_lock);
+ atomic_set(&cmd->transport_dev_active, 1);
cmd->se_tfo = tfo;
cmd->se_sess = se_sess;
@@ -1870,23 +1621,23 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)
* Check if SAM Task Attribute emulation is enabled for this
* struct se_device storage object
*/
- if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
return 0;
if (cmd->sam_task_attr == MSG_ACA_TAG) {
- DEBUG_STA("SAM Task Attribute ACA"
+ pr_debug("SAM Task Attribute ACA"
" emulation is not supported\n");
- return -1;
+ return -EINVAL;
}
/*
* Used to determine when ORDERED commands should go from
* Dormant to Active status.
*/
- cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
+ cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
smp_mb__after_atomic_inc();
- DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+ pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
cmd->se_ordered_id, cmd->sam_task_attr,
- TRANSPORT(cmd->se_dev)->name);
+ cmd->se_dev->transport->name);
return 0;
}
@@ -1898,8 +1649,8 @@ void transport_free_se_cmd(
/*
* Check and free any extended CDB buffer that was allocated
*/
- if (T_TASK(se_cmd)->t_task_cdb != T_TASK(se_cmd)->__t_task_cdb)
- kfree(T_TASK(se_cmd)->t_task_cdb);
+ if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb)
+ kfree(se_cmd->t_task_cdb);
}
EXPORT_SYMBOL(transport_free_se_cmd);
@@ -1922,42 +1673,41 @@ int transport_generic_allocate_tasks(
*/
cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
- transport_device_setup_cmd(cmd);
/*
* Ensure that the received CDB is less than the max (252 + 8) bytes
* for VARIABLE_LENGTH_CMD
*/
if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
- printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
+ pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
- return -1;
+ return -EINVAL;
}
/*
* If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
* allocate the additional extended CDB buffer now.. Otherwise
* setup the pointer from __t_task_cdb to t_task_cdb.
*/
- if (scsi_command_size(cdb) > sizeof(T_TASK(cmd)->__t_task_cdb)) {
- T_TASK(cmd)->t_task_cdb = kzalloc(scsi_command_size(cdb),
+ if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
+ cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
GFP_KERNEL);
- if (!(T_TASK(cmd)->t_task_cdb)) {
- printk(KERN_ERR "Unable to allocate T_TASK(cmd)->t_task_cdb"
- " %u > sizeof(T_TASK(cmd)->__t_task_cdb): %lu ops\n",
+ if (!cmd->t_task_cdb) {
+ pr_err("Unable to allocate cmd->t_task_cdb"
+ " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
- (unsigned long)sizeof(T_TASK(cmd)->__t_task_cdb));
- return -1;
+ (unsigned long)sizeof(cmd->__t_task_cdb));
+ return -ENOMEM;
}
} else
- T_TASK(cmd)->t_task_cdb = &T_TASK(cmd)->__t_task_cdb[0];
+ cmd->t_task_cdb = &cmd->__t_task_cdb[0];
/*
- * Copy the original CDB into T_TASK(cmd).
+ * Copy the original CDB into cmd->
*/
- memcpy(T_TASK(cmd)->t_task_cdb, cdb, scsi_command_size(cdb));
+ memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
/*
* Setup the received CDB based on SCSI defined opcodes and
* perform unit attention, persistent reservations and ALUA
- * checks for virtual device backends. The T_TASK(cmd)->t_task_cdb
+ * checks for virtual device backends. The cmd->t_task_cdb
* pointer is expected to be setup before we reach this point.
*/
ret = transport_generic_cmd_sequencer(cmd, cdb);
@@ -1969,7 +1719,7 @@ int transport_generic_allocate_tasks(
if (transport_check_alloc_task_attr(cmd) < 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -2;
+ return -EINVAL;
}
spin_lock(&cmd->se_lun->lun_sep_lock);
if (cmd->se_lun->lun_sep)
@@ -1986,10 +1736,10 @@ EXPORT_SYMBOL(transport_generic_allocate_tasks);
int transport_generic_handle_cdb(
struct se_cmd *cmd)
{
- if (!SE_LUN(cmd)) {
+ if (!cmd->se_lun) {
dump_stack();
- printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
- return -1;
+ pr_err("cmd->se_lun is NULL\n");
+ return -EINVAL;
}
transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD);
@@ -1998,6 +1748,29 @@ int transport_generic_handle_cdb(
EXPORT_SYMBOL(transport_generic_handle_cdb);
/*
+ * Used by fabric module frontends to queue tasks directly.
+ * Many only be used from process context only
+ */
+int transport_handle_cdb_direct(
+ struct se_cmd *cmd)
+{
+ if (!cmd->se_lun) {
+ dump_stack();
+ pr_err("cmd->se_lun is NULL\n");
+ return -EINVAL;
+ }
+ if (in_interrupt()) {
+ dump_stack();
+ pr_err("transport_generic_handle_cdb cannot be called"
+ " from interrupt context\n");
+ return -EINVAL;
+ }
+
+ return transport_generic_new_cmd(cmd);
+}
+EXPORT_SYMBOL(transport_handle_cdb_direct);
+
+/*
* Used by fabric module frontends defining a TFO->new_cmd_map() caller
* to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
* complete setup in TCM process context w/ TFO->new_cmd_map().
@@ -2005,10 +1778,10 @@ EXPORT_SYMBOL(transport_generic_handle_cdb);
int transport_generic_handle_cdb_map(
struct se_cmd *cmd)
{
- if (!SE_LUN(cmd)) {
+ if (!cmd->se_lun) {
dump_stack();
- printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
- return -1;
+ pr_err("cmd->se_lun is NULL\n");
+ return -EINVAL;
}
transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP);
@@ -2030,7 +1803,7 @@ int transport_generic_handle_data(
* in interrupt code, the signal_pending() check is skipped.
*/
if (!in_interrupt() && signal_pending(current))
- return -1;
+ return -EPERM;
/*
* If the received CDB has aleady been ABORTED by the generic
* target engine, we now call transport_check_aborted_status()
@@ -2057,7 +1830,6 @@ int transport_generic_handle_tmr(
* This is needed for early exceptions.
*/
cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks;
- transport_device_setup_cmd(cmd);
transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR);
return 0;
@@ -2077,16 +1849,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
unsigned long flags;
int ret = 0;
- DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("ITT[0x%08x] - Stopping tasks\n",
+ cmd->se_tfo->get_task_tag(cmd));
/*
* No tasks remain in the execution queue
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list) {
- DEBUG_TS("task_no[%d] - Processing task %p\n",
+ &cmd->t_task_list, t_list) {
+ pr_debug("task_no[%d] - Processing task %p\n",
task->task_no, task);
/*
* If the struct se_task has not been sent and is not active,
@@ -2094,14 +1866,14 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
*/
if (!atomic_read(&task->task_sent) &&
!atomic_read(&task->task_active)) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
transport_remove_task_from_execute_queue(task,
task->se_dev);
- DEBUG_TS("task_no[%d] - Removed from execute queue\n",
+ pr_debug("task_no[%d] - Removed from execute queue\n",
task->task_no);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
continue;
}
@@ -2111,42 +1883,32 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
*/
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
- DEBUG_TS("task_no[%d] - Waiting to complete\n",
+ pr_debug("task_no[%d] - Waiting to complete\n",
task->task_no);
wait_for_completion(&task->task_stop_comp);
- DEBUG_TS("task_no[%d] - Stopped successfully\n",
+ pr_debug("task_no[%d] - Stopped successfully\n",
task->task_no);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
} else {
- DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
+ pr_debug("task_no[%d] - Did nothing\n", task->task_no);
ret++;
}
__transport_stop_task_timer(task, &flags);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return ret;
}
-static void transport_failure_reset_queue_depth(struct se_device *dev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
- atomic_inc(&dev->depth_left);
- atomic_inc(&SE_HBA(dev)->left_queue_depth);
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
-}
-
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
@@ -2156,29 +1918,31 @@ static void transport_generic_request_failure(
int complete,
int sc)
{
- DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
- " CDB: 0x%02x\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
- T_TASK(cmd)->t_task_cdb[0]);
- DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
+ int ret = 0;
+
+ pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+ " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+ cmd->t_task_cdb[0]);
+ pr_debug("-----[ i_state: %d t_state/def_t_state:"
" %d/%d transport_error_status: %d\n",
- CMD_TFO(cmd)->get_cmd_state(cmd),
+ cmd->se_tfo->get_cmd_state(cmd),
cmd->t_state, cmd->deferred_t_state,
cmd->transport_error_status);
- DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
+ pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
- " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
- atomic_read(&T_TASK(cmd)->t_transport_active),
- atomic_read(&T_TASK(cmd)->t_transport_stop),
- atomic_read(&T_TASK(cmd)->t_transport_sent));
+ " t_transport_sent: %d\n", cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+ atomic_read(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_task_cdbs_ex_left),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+ atomic_read(&cmd->t_transport_sent));
transport_stop_all_task_timers(cmd);
if (dev)
- transport_failure_reset_queue_depth(dev);
+ atomic_inc(&dev->depth_left);
/*
* For SAM Task Attribute emulation for failed struct se_cmd
*/
@@ -2211,8 +1975,8 @@ static void transport_generic_request_failure(
* we force this session to fall back to session
* recovery.
*/
- CMD_TFO(cmd)->fall_back_to_erl0(cmd->se_sess);
- CMD_TFO(cmd)->stop_session(cmd->se_sess, 0, 0);
+ cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
+ cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
goto check_stop;
case PYX_TRANSPORT_LU_COMM_FAILURE:
@@ -2240,13 +2004,15 @@ static void transport_generic_request_failure(
*
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
- if (SE_SESS(cmd) &&
- DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+ if (cmd->se_sess &&
+ cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+ core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- CMD_TFO(cmd)->queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
goto check_stop;
case PYX_TRANSPORT_USE_SENSE_REASON:
/*
@@ -2254,8 +2020,8 @@ static void transport_generic_request_failure(
*/
break;
default:
- printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
- T_TASK(cmd)->t_task_cdb[0],
+ pr_err("Unknown transport error for CDB 0x%02x: %d\n",
+ cmd->t_task_cdb[0],
cmd->transport_error_status);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
@@ -2263,32 +2029,41 @@ static void transport_generic_request_failure(
if (!sc)
transport_new_cmd_failure(cmd);
- else
- transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
+ else {
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+ if (ret == -EAGAIN)
+ goto queue_full;
+ }
+
check_stop:
transport_lun_remove_cmd(cmd);
- if (!(transport_cmd_check_stop_to_fabric(cmd)))
+ if (!transport_cmd_check_stop_to_fabric(cmd))
;
+ return;
+
+queue_full:
+ cmd->t_state = TRANSPORT_COMPLETE_OK;
+ transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
}
static void transport_direct_request_timeout(struct se_cmd *cmd)
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->t_transport_timeout))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->t_transport_timeout)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_timeout_left)) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ if (atomic_read(&cmd->t_task_cdbs_timeout_left)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- atomic_sub(atomic_read(&T_TASK(cmd)->t_transport_timeout),
- &T_TASK(cmd)->t_se_count);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_sub(atomic_read(&cmd->t_transport_timeout),
+ &cmd->t_se_count);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static void transport_generic_request_timeout(struct se_cmd *cmd)
@@ -2296,35 +2071,18 @@ static void transport_generic_request_timeout(struct se_cmd *cmd)
unsigned long flags;
/*
- * Reset T_TASK(cmd)->t_se_count to allow transport_generic_remove()
+ * Reset cmd->t_se_count to allow transport_generic_remove()
* to allow last call to free memory resources.
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_transport_timeout) > 1) {
- int tmp = (atomic_read(&T_TASK(cmd)->t_transport_timeout) - 1);
-
- atomic_sub(tmp, &T_TASK(cmd)->t_se_count);
- }
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
-
- transport_generic_remove(cmd, 0, 0);
-}
-
-static int
-transport_generic_allocate_buf(struct se_cmd *cmd, u32 data_length)
-{
- unsigned char *buf;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (atomic_read(&cmd->t_transport_timeout) > 1) {
+ int tmp = (atomic_read(&cmd->t_transport_timeout) - 1);
- buf = kzalloc(data_length, GFP_KERNEL);
- if (!(buf)) {
- printk(KERN_ERR "Unable to allocate memory for buffer\n");
- return -1;
+ atomic_sub(tmp, &cmd->t_se_count);
}
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- T_TASK(cmd)->t_tasks_se_num = 0;
- T_TASK(cmd)->t_task_buf = buf;
-
- return 0;
+ transport_generic_remove(cmd, 0);
}
static inline u32 transport_lba_21(unsigned char *cdb)
@@ -2364,9 +2122,9 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
se_cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
- spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
/*
@@ -2375,14 +2133,14 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
static void transport_task_timeout_handler(unsigned long data)
{
struct se_task *task = (struct se_task *)data;
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
unsigned long flags;
- DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
+ pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
if (task->task_flags & TF_STOP) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
task->task_flags &= ~TF_RUNNING;
@@ -2390,46 +2148,46 @@ static void transport_task_timeout_handler(unsigned long data)
/*
* Determine if transport_complete_task() has already been called.
*/
- if (!(atomic_read(&task->task_active))) {
- DEBUG_TT("transport task: %p cmd: %p timeout task_active"
+ if (!atomic_read(&task->task_active)) {
+ pr_debug("transport task: %p cmd: %p timeout task_active"
" == 0\n", task, cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- atomic_inc(&T_TASK(cmd)->t_se_count);
- atomic_inc(&T_TASK(cmd)->t_transport_timeout);
- T_TASK(cmd)->t_tasks_failed = 1;
+ atomic_inc(&cmd->t_se_count);
+ atomic_inc(&cmd->t_transport_timeout);
+ cmd->t_tasks_failed = 1;
atomic_set(&task->task_timeout, 1);
task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
task->task_scsi_status = 1;
if (atomic_read(&task->task_stop)) {
- DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
+ pr_debug("transport task: %p cmd: %p timeout task_stop"
" == 1\n", task, cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&task->task_stop_comp);
return;
}
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_left))) {
- DEBUG_TT("transport task: %p cmd: %p timeout non zero"
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
+ pr_debug("transport task: %p cmd: %p timeout non zero"
" t_task_cdbs_left\n", task, cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
- DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
+ pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
task, cmd);
cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE);
}
/*
- * Called with T_TASK(cmd)->t_state_lock held.
+ * Called with cmd->t_state_lock held.
*/
static void transport_start_task_timer(struct se_task *task)
{
@@ -2441,8 +2199,8 @@ static void transport_start_task_timer(struct se_task *task)
/*
* If the task_timeout is disabled, exit now.
*/
- timeout = DEV_ATTRIB(dev)->task_timeout;
- if (!(timeout))
+ timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
+ if (!timeout)
return;
init_timer(&task->task_timer);
@@ -2453,27 +2211,27 @@ static void transport_start_task_timer(struct se_task *task)
task->task_flags |= TF_RUNNING;
add_timer(&task->task_timer);
#if 0
- printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
+ pr_debug("Starting task timer for cmd: %p task: %p seconds:"
" %d\n", task->task_se_cmd, task, timeout);
#endif
}
/*
- * Called with spin_lock_irq(&T_TASK(cmd)->t_state_lock) held.
+ * Called with spin_lock_irq(&cmd->t_state_lock) held.
*/
void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
{
- struct se_cmd *cmd = TASK_CMD(task);
+ struct se_cmd *cmd = task->task_se_cmd;
- if (!(task->task_flags & TF_RUNNING))
+ if (!task->task_flags & TF_RUNNING)
return;
task->task_flags |= TF_STOP;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, *flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
del_timer_sync(&task->task_timer);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, *flags);
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
task->task_flags &= ~TF_RUNNING;
task->task_flags &= ~TF_STOP;
}
@@ -2483,11 +2241,11 @@ static void transport_stop_all_task_timers(struct se_cmd *cmd)
struct se_task *task = NULL, *task_tmp;
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list)
+ &cmd->t_task_list, t_list)
__transport_stop_task_timer(task, &flags);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
static inline int transport_tcq_window_closed(struct se_device *dev)
@@ -2498,7 +2256,7 @@ static inline int transport_tcq_window_closed(struct se_device *dev)
} else
msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
- wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
return 0;
}
@@ -2511,45 +2269,45 @@ static inline int transport_tcq_window_closed(struct se_device *dev)
*/
static inline int transport_execute_task_attr(struct se_cmd *cmd)
{
- if (SE_DEV(cmd)->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type != SAM_TASK_ATTR_EMULATED)
return 1;
/*
* Check for the existence of HEAD_OF_QUEUE, and if true return 1
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&SE_DEV(cmd)->dev_hoq_count);
+ atomic_inc(&cmd->se_dev->dev_hoq_count);
smp_mb__after_atomic_inc();
- DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
+ pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
- T_TASK(cmd)->t_task_cdb[0],
+ cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&SE_DEV(cmd)->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_list,
- &SE_DEV(cmd)->ordered_cmd_list);
- spin_unlock(&SE_DEV(cmd)->ordered_cmd_lock);
+ spin_lock(&cmd->se_dev->ordered_cmd_lock);
+ list_add_tail(&cmd->se_ordered_node,
+ &cmd->se_dev->ordered_cmd_list);
+ spin_unlock(&cmd->se_dev->ordered_cmd_lock);
- atomic_inc(&SE_DEV(cmd)->dev_ordered_sync);
+ atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
- DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
+ pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
" list, se_ordered_id: %u\n",
- T_TASK(cmd)->t_task_cdb[0],
+ cmd->t_task_cdb[0],
cmd->se_ordered_id);
/*
* Add ORDERED command to tail of execution queue if
* no other older commands exist that need to be
* completed first.
*/
- if (!(atomic_read(&SE_DEV(cmd)->simple_cmds)))
+ if (!atomic_read(&cmd->se_dev->simple_cmds))
return 1;
} else {
/*
* For SIMPLE and UNTAGGED Task Attribute commands
*/
- atomic_inc(&SE_DEV(cmd)->simple_cmds);
+ atomic_inc(&cmd->se_dev->simple_cmds);
smp_mb__after_atomic_inc();
}
/*
@@ -2557,20 +2315,20 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
* add the dormant task(s) built for the passed struct se_cmd to the
* execution queue and become in Active state for this struct se_device.
*/
- if (atomic_read(&SE_DEV(cmd)->dev_ordered_sync) != 0) {
+ if (atomic_read(&cmd->se_dev->dev_ordered_sync) != 0) {
/*
* Otherwise, add cmd w/ tasks to delayed cmd queue that
* will be drained upon completion of HEAD_OF_QUEUE task.
*/
- spin_lock(&SE_DEV(cmd)->delayed_cmd_lock);
+ spin_lock(&cmd->se_dev->delayed_cmd_lock);
cmd->se_cmd_flags |= SCF_DELAYED_CMD_FROM_SAM_ATTR;
- list_add_tail(&cmd->se_delayed_list,
- &SE_DEV(cmd)->delayed_cmd_list);
- spin_unlock(&SE_DEV(cmd)->delayed_cmd_lock);
+ list_add_tail(&cmd->se_delayed_node,
+ &cmd->se_dev->delayed_cmd_list);
+ spin_unlock(&cmd->se_dev->delayed_cmd_lock);
- DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
+ pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
" delayed CMD list, se_ordered_id: %u\n",
- T_TASK(cmd)->t_task_cdb[0], cmd->sam_task_attr,
+ cmd->t_task_cdb[0], cmd->sam_task_attr,
cmd->se_ordered_id);
/*
* Return zero to let transport_execute_tasks() know
@@ -2592,25 +2350,23 @@ static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
- if (!(cmd->se_cmd_flags & SCF_SE_DISABLE_ONLINE_CHECK)) {
- if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
- cmd->transport_error_status =
- PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, NULL, 0, 1);
- return 0;
- }
+ if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
+ cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
+ transport_generic_request_failure(cmd, NULL, 0, 1);
+ return 0;
}
+
/*
* Call transport_cmd_check_stop() to see if a fabric exception
* has occurred that prevents execution.
*/
- if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
+ if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
/*
* Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
* attribute for the tasks of the received struct se_cmd CDB
*/
add_tasks = transport_execute_task_attr(cmd);
- if (add_tasks == 0)
+ if (!add_tasks)
goto execute_tasks;
/*
* This calls transport_add_tasks_from_cmd() to handle
@@ -2625,7 +2381,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)
* storage object.
*/
execute_tasks:
- __transport_execute_tasks(SE_DEV(cmd));
+ __transport_execute_tasks(cmd->se_dev);
return 0;
}
@@ -2639,51 +2395,49 @@ static int __transport_execute_tasks(struct se_device *dev)
{
int error;
struct se_cmd *cmd = NULL;
- struct se_task *task;
+ struct se_task *task = NULL;
unsigned long flags;
/*
* Check if there is enough room in the device and HBA queue to send
- * struct se_transport_task's to the selected transport.
+ * struct se_tasks to the selected transport.
*/
check_depth:
- spin_lock_irqsave(&SE_HBA(dev)->hba_queue_lock, flags);
- if (!(atomic_read(&dev->depth_left)) ||
- !(atomic_read(&SE_HBA(dev)->left_queue_depth))) {
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ if (!atomic_read(&dev->depth_left))
return transport_tcq_window_closed(dev);
- }
- dev->dev_tcq_window_closed = 0;
- spin_lock(&dev->execute_task_lock);
- task = transport_get_task_from_execute_queue(dev);
- spin_unlock(&dev->execute_task_lock);
+ dev->dev_tcq_window_closed = 0;
- if (!task) {
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
+ spin_lock_irq(&dev->execute_task_lock);
+ if (list_empty(&dev->execute_task_list)) {
+ spin_unlock_irq(&dev->execute_task_lock);
return 0;
}
+ task = list_first_entry(&dev->execute_task_list,
+ struct se_task, t_execute_list);
+ list_del(&task->t_execute_list);
+ atomic_set(&task->task_execute_queue, 0);
+ atomic_dec(&dev->execute_tasks);
+ spin_unlock_irq(&dev->execute_task_lock);
atomic_dec(&dev->depth_left);
- atomic_dec(&SE_HBA(dev)->left_queue_depth);
- spin_unlock_irqrestore(&SE_HBA(dev)->hba_queue_lock, flags);
- cmd = TASK_CMD(task);
+ cmd = task->task_se_cmd;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_set(&task->task_active, 1);
atomic_set(&task->task_sent, 1);
- atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
+ atomic_inc(&cmd->t_task_cdbs_sent);
- if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
- T_TASK(cmd)->t_task_cdbs)
+ if (atomic_read(&cmd->t_task_cdbs_sent) ==
+ cmd->t_task_list_num)
atomic_set(&cmd->transport_sent, 1);
transport_start_task_timer(task);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
/*
* The struct se_cmd->transport_emulate_cdb() function pointer is used
- * to grab REPORT_LUNS CDBs before they hit the
+ * to grab REPORT_LUNS and other CDBs we want to handle before they hit the
* struct se_subsystem_api->do_task() caller below.
*/
if (cmd->transport_emulate_cdb) {
@@ -2718,11 +2472,11 @@ check_depth:
* call ->do_task() directly and let the underlying TCM subsystem plugin
* code handle the CDB emulation.
*/
- if ((TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
- (!(TASK_CMD(task)->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
+ if ((dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) &&
+ (!(task->task_se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
error = transport_emulate_control_cdb(task);
else
- error = TRANSPORT(dev)->do_task(task);
+ error = dev->transport->do_task(task);
if (error != 0) {
cmd->transport_error_status = error;
@@ -2745,12 +2499,10 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)
* Any unsolicited data will get dumped for failed command inside of
* the fabric plugin
*/
- spin_lock_irqsave(&T_TASK(se_cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- spin_unlock_irqrestore(&T_TASK(se_cmd)->t_state_lock, flags);
-
- CMD_TFO(se_cmd)->new_cmd_failure(se_cmd);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
static void transport_nop_wait_for_tasks(struct se_cmd *, int, int);
@@ -2760,7 +2512,7 @@ static inline u32 transport_get_sectors_6(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2772,7 +2524,7 @@ static inline u32 transport_get_sectors_6(
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[2] << 16) + (cdb[3] << 8) + cdb[4];
/*
@@ -2788,7 +2540,7 @@ static inline u32 transport_get_sectors_10(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2800,8 +2552,8 @@ static inline u32 transport_get_sectors_10(
/*
* XXX_10 is not defined in SSC, throw an exception
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
- *ret = -1;
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
+ *ret = -EINVAL;
return 0;
}
@@ -2818,7 +2570,7 @@ static inline u32 transport_get_sectors_12(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2830,8 +2582,8 @@ static inline u32 transport_get_sectors_12(
/*
* XXX_12 is not defined in SSC, throw an exception
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
- *ret = -1;
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
+ *ret = -EINVAL;
return 0;
}
@@ -2848,7 +2600,7 @@ static inline u32 transport_get_sectors_16(
struct se_cmd *cmd,
int *ret)
{
- struct se_device *dev = SE_LUN(cmd)->lun_se_dev;
+ struct se_device *dev = cmd->se_dev;
/*
* Assume TYPE_DISK for non struct se_device objects.
@@ -2860,7 +2612,7 @@ static inline u32 transport_get_sectors_16(
/*
* Use 24-bit allocation length for TYPE_TAPE.
*/
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE)
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE)
return (u32)(cdb[12] << 16) + (cdb[13] << 8) + cdb[14];
type_disk:
@@ -2890,57 +2642,30 @@ static inline u32 transport_get_size(
unsigned char *cdb,
struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
- if (TRANSPORT(dev)->get_device_type(dev) == TYPE_TAPE) {
+ if (dev->transport->get_device_type(dev) == TYPE_TAPE) {
if (cdb[1] & 1) { /* sectors */
- return DEV_ATTRIB(dev)->block_size * sectors;
+ return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
} else /* bytes */
return sectors;
}
#if 0
- printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
- " %s object\n", DEV_ATTRIB(dev)->block_size, sectors,
- DEV_ATTRIB(dev)->block_size * sectors,
- TRANSPORT(dev)->name);
+ pr_debug("Returning block_size: %u, sectors: %u == %u for"
+ " %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
+ dev->se_sub_dev->se_dev_attrib.block_size * sectors,
+ dev->transport->name);
#endif
- return DEV_ATTRIB(dev)->block_size * sectors;
-}
-
-unsigned char transport_asciihex_to_binaryhex(unsigned char val[2])
-{
- unsigned char result = 0;
- /*
- * MSB
- */
- if ((val[0] >= 'a') && (val[0] <= 'f'))
- result = ((val[0] - 'a' + 10) & 0xf) << 4;
- else
- if ((val[0] >= 'A') && (val[0] <= 'F'))
- result = ((val[0] - 'A' + 10) & 0xf) << 4;
- else /* digit */
- result = ((val[0] - '0') & 0xf) << 4;
- /*
- * LSB
- */
- if ((val[1] >= 'a') && (val[1] <= 'f'))
- result |= ((val[1] - 'a' + 10) & 0xf);
- else
- if ((val[1] >= 'A') && (val[1] <= 'F'))
- result |= ((val[1] - 'A' + 10) & 0xf);
- else /* digit */
- result |= ((val[1] - '0') & 0xf);
-
- return result;
+ return dev->se_sub_dev->se_dev_attrib.block_size * sectors;
}
-EXPORT_SYMBOL(transport_asciihex_to_binaryhex);
static void transport_xor_callback(struct se_cmd *cmd)
{
unsigned char *buf, *addr;
- struct se_mem *se_mem;
+ struct scatterlist *sg;
unsigned int offset;
int i;
+ int count;
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
@@ -2953,32 +2678,37 @@ static void transport_xor_callback(struct se_cmd *cmd)
* 5) transfer the resulting XOR data to the data-in buffer.
*/
buf = kmalloc(cmd->data_length, GFP_KERNEL);
- if (!(buf)) {
- printk(KERN_ERR "Unable to allocate xor_callback buf\n");
+ if (!buf) {
+ pr_err("Unable to allocate xor_callback buf\n");
return;
}
/*
- * Copy the scatterlist WRITE buffer located at T_TASK(cmd)->t_mem_list
+ * Copy the scatterlist WRITE buffer located at cmd->t_data_sg
* into the locally allocated *buf
*/
- transport_memcpy_se_mem_read_contig(cmd, buf, T_TASK(cmd)->t_mem_list);
+ sg_copy_to_buffer(cmd->t_data_sg,
+ cmd->t_data_nents,
+ buf,
+ cmd->data_length);
+
/*
* Now perform the XOR against the BIDI read memory located at
- * T_TASK(cmd)->t_mem_bidi_list
+ * cmd->t_mem_bidi_list
*/
offset = 0;
- list_for_each_entry(se_mem, T_TASK(cmd)->t_mem_bidi_list, se_list) {
- addr = (unsigned char *)kmap_atomic(se_mem->se_page, KM_USER0);
- if (!(addr))
+ for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
+ addr = kmap_atomic(sg_page(sg), KM_USER0);
+ if (!addr)
goto out;
- for (i = 0; i < se_mem->se_len; i++)
- *(addr + se_mem->se_off + i) ^= *(buf + offset + i);
+ for (i = 0; i < sg->length; i++)
+ *(addr + sg->offset + i) ^= *(buf + offset + i);
- offset += se_mem->se_len;
+ offset += sg->length;
kunmap_atomic(addr, KM_USER0);
}
+
out:
kfree(buf);
}
@@ -2994,75 +2724,60 @@ static int transport_get_sense_data(struct se_cmd *cmd)
unsigned long flags;
u32 offset = 0;
- if (!SE_LUN(cmd)) {
- printk(KERN_ERR "SE_LUN(cmd) is NULL\n");
- return -1;
- }
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ WARN_ON(!cmd->se_lun);
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list) {
+ &cmd->t_task_list, t_list) {
if (!task->task_sense)
continue;
dev = task->se_dev;
- if (!(dev))
+ if (!dev)
continue;
- if (!TRANSPORT(dev)->get_sense_buffer) {
- printk(KERN_ERR "TRANSPORT(dev)->get_sense_buffer"
+ if (!dev->transport->get_sense_buffer) {
+ pr_err("dev->transport->get_sense_buffer"
" is NULL\n");
continue;
}
- sense_buffer = TRANSPORT(dev)->get_sense_buffer(task);
- if (!(sense_buffer)) {
- printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
+ sense_buffer = dev->transport->get_sense_buffer(task);
+ if (!sense_buffer) {
+ pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
" sense buffer for task with sense\n",
- CMD_TFO(cmd)->get_task_tag(cmd), task->task_no);
+ cmd->se_tfo->get_task_tag(cmd), task->task_no);
continue;
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+ offset = cmd->se_tfo->set_fabric_sense_len(cmd,
TRANSPORT_SENSE_BUFFER);
- memcpy((void *)&buffer[offset], (void *)sense_buffer,
+ memcpy(&buffer[offset], sense_buffer,
TRANSPORT_SENSE_BUFFER);
cmd->scsi_status = task->task_scsi_status;
/* Automatically padded */
cmd->scsi_sense_length =
(TRANSPORT_SENSE_BUFFER + offset);
- printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+ pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
" and sense\n",
- dev->se_hba->hba_id, TRANSPORT(dev)->name,
+ dev->se_hba->hba_id, dev->transport->name,
cmd->scsi_status);
return 0;
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return -1;
}
-static int transport_allocate_resources(struct se_cmd *cmd)
-{
- u32 length = cmd->data_length;
-
- if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
- (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB))
- return transport_generic_get_mem(cmd, length, PAGE_SIZE);
- else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB)
- return transport_generic_allocate_buf(cmd, length);
- else
- return 0;
-}
-
static int
transport_handle_reservation_conflict(struct se_cmd *cmd)
{
@@ -3077,12 +2792,40 @@ transport_handle_reservation_conflict(struct se_cmd *cmd)
*
* See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/
- if (SE_SESS(cmd) &&
- DEV_ATTRIB(cmd->se_dev)->emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(SE_SESS(cmd)->se_node_acl,
+ if (cmd->se_sess &&
+ cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
+ core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- return -2;
+ return -EINVAL;
+}
+
+static inline long long transport_dev_end_lba(struct se_device *dev)
+{
+ return dev->transport->get_blocks(dev) + 1;
+}
+
+static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
+{
+ struct se_device *dev = cmd->se_dev;
+ u32 sectors;
+
+ if (dev->transport->get_device_type(dev) != TYPE_DISK)
+ return 0;
+
+ sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
+
+ if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
+ pr_err("LBA: %llu Sectors: %u exceeds"
+ " transport_dev_end_lba(): %llu\n",
+ cmd->t_task_lba, sectors,
+ transport_dev_end_lba(dev));
+ pr_err(" We should return CHECK_CONDITION"
+ " but we don't yet\n");
+ return 0;
+ }
+
+ return sectors;
}
/* transport_generic_cmd_sequencer():
@@ -3099,7 +2842,7 @@ static int transport_generic_cmd_sequencer(
struct se_cmd *cmd,
unsigned char *cdb)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
int ret = 0, sector_ret = 0, passthrough;
u32 sectors = 0, size = 0, pr_reg_type = 0;
@@ -3113,12 +2856,12 @@ static int transport_generic_cmd_sequencer(
&transport_nop_wait_for_tasks;
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;
- return -2;
+ return -EINVAL;
}
/*
* Check status of Asymmetric Logical Unit Assignment port
*/
- ret = T10_ALUA(su_dev)->alua_state_check(cmd, cdb, &alua_ascq);
+ ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);
if (ret != 0) {
cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
/*
@@ -3128,22 +2871,22 @@ static int transport_generic_cmd_sequencer(
*/
if (ret > 0) {
#if 0
- printk(KERN_INFO "[%s]: ALUA TG Port not available,"
+ pr_debug("[%s]: ALUA TG Port not available,"
" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
- CMD_TFO(cmd)->get_fabric_name(), alua_ascq);
+ cmd->se_tfo->get_fabric_name(), alua_ascq);
#endif
transport_set_sense_codes(cmd, 0x04, alua_ascq);
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_CHECK_CONDITION_NOT_READY;
- return -2;
+ return -EINVAL;
}
goto out_invalid_cdb_field;
}
/*
* Check status for SPC-3 Persistent Reservations
*/
- if (T10_PR_OPS(su_dev)->t10_reservation_check(cmd, &pr_reg_type) != 0) {
- if (T10_PR_OPS(su_dev)->t10_seq_non_holder(
+ if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
+ if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
cmd, cdb, pr_reg_type) != 0)
return transport_handle_reservation_conflict(cmd);
/*
@@ -3160,7 +2903,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_6;
- T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_10:
@@ -3169,7 +2912,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_10;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_12:
@@ -3178,7 +2921,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_12;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case READ_16:
@@ -3187,7 +2930,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_16;
- T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_6:
@@ -3196,7 +2939,7 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_6;
- T_TASK(cmd)->t_task_lba = transport_lba_21(cdb);
+ cmd->t_task_lba = transport_lba_21(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_10:
@@ -3205,8 +2948,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_10;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
@@ -3215,8 +2958,8 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_12;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
@@ -3225,22 +2968,22 @@ static int transport_generic_cmd_sequencer(
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_16;
- T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(T_TASK(cmd)->t_tasks_bidi))
+ !(cmd->t_tasks_bidi))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->transport_split_cdb = &split_cdb_XX_10;
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
- passthrough = (TRANSPORT(dev)->transport_type ==
+ passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
/*
* Skip the remaining assignments for TCM/PSCSI passthrough
@@ -3251,7 +2994,7 @@ static int transport_generic_cmd_sequencer(
* Setup BIDI XOR callback to be run during transport_generic_complete_ok()
*/
cmd->transport_complete_callback = &transport_xor_callback;
- T_TASK(cmd)->t_tasks_fua = (cdb[1] & 0x8);
+ cmd->t_tasks_fua = (cdb[1] & 0x8);
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
@@ -3259,7 +3002,7 @@ static int transport_generic_cmd_sequencer(
* Determine if this is TCM/PSCSI device and we should disable
* internal emulation for this CDB.
*/
- passthrough = (TRANSPORT(dev)->transport_type ==
+ passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
switch (service_action) {
@@ -3273,7 +3016,7 @@ static int transport_generic_cmd_sequencer(
* XDWRITE_READ_32 logic.
*/
cmd->transport_split_cdb = &split_cdb_XX_32;
- T_TASK(cmd)->t_task_lba = transport_lba_64_ext(cdb);
+ cmd->t_task_lba = transport_lba_64_ext(cdb);
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
/*
@@ -3287,14 +3030,22 @@ static int transport_generic_cmd_sequencer(
* transport_generic_complete_ok()
*/
cmd->transport_complete_callback = &transport_xor_callback;
- T_TASK(cmd)->t_tasks_fua = (cdb[10] & 0x8);
+ cmd->t_tasks_fua = (cdb[10] & 0x8);
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- T_TASK(cmd)->t_task_lba = get_unaligned_be64(&cdb[12]);
+
+ if (sectors)
+ size = transport_get_size(sectors, cdb, cmd);
+ else {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
+ " supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
/*
@@ -3304,7 +3055,7 @@ static int transport_generic_cmd_sequencer(
break;
if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
- printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+ pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
goto out_invalid_cdb_field;
@@ -3314,28 +3065,28 @@ static int transport_generic_cmd_sequencer(
* tpws with the UNMAP=1 bit set.
*/
if (!(cdb[10] & 0x08)) {
- printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
+ pr_err("WRITE_SAME w/o UNMAP bit not"
" supported for Block Discard Emulation\n");
goto out_invalid_cdb_field;
}
break;
default:
- printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
+ pr_err("VARIABLE_LENGTH_CMD service action"
" 0x%04x not supported\n", service_action);
goto out_unsupported_cdb;
}
break;
- case 0xa3:
- if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+ case MAINTENANCE_IN:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/* MAINTENANCE_IN from SCC-2 */
/*
* Check for emulated MI_REPORT_TARGET_PGS.
*/
if (cdb[1] == MI_REPORT_TARGET_PGS) {
cmd->transport_emulate_cdb =
- (T10_ALUA(su_dev)->alua_type ==
+ (su_dev->t10_alua.alua_type ==
SPC3_ALUA_EMULATED) ?
- &core_emulate_report_target_port_groups :
+ core_emulate_report_target_port_groups :
NULL;
}
size = (cdb[6] << 24) | (cdb[7] << 16) |
@@ -3344,7 +3095,7 @@ static int transport_generic_cmd_sequencer(
/* GPCMD_SEND_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SELECT:
size = cdb[4];
@@ -3356,7 +3107,7 @@ static int transport_generic_cmd_sequencer(
break;
case MODE_SENSE:
size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case MODE_SENSE_10:
case GPCMD_READ_BUFFER_CAPACITY:
@@ -3364,11 +3115,11 @@ static int transport_generic_cmd_sequencer(
case LOG_SELECT:
case LOG_SENSE:
size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_BLOCK_LIMITS:
size = READ_BLOCK_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GPCMD_GET_CONFIGURATION:
case GPCMD_READ_FORMAT_CAPACITIES:
@@ -3380,11 +3131,11 @@ static int transport_generic_cmd_sequencer(
case PERSISTENT_RESERVE_IN:
case PERSISTENT_RESERVE_OUT:
cmd->transport_emulate_cdb =
- (T10_RES(su_dev)->res_type ==
+ (su_dev->t10_pr.res_type ==
SPC3_PERSISTENT_RESERVATIONS) ?
- &core_scsi3_emulate_pr : NULL;
+ core_scsi3_emulate_pr : NULL;
size = (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case GPCMD_MECHANISM_STATUS:
case GPCMD_READ_DVD_STRUCTURE:
@@ -3393,19 +3144,19 @@ static int transport_generic_cmd_sequencer(
break;
case READ_POSITION:
size = READ_POSITION_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
- case 0xa4:
- if (TRANSPORT(dev)->get_device_type(dev) != TYPE_ROM) {
+ case MAINTENANCE_OUT:
+ if (dev->transport->get_device_type(dev) != TYPE_ROM) {
/* MAINTENANCE_OUT from SCC-2
*
* Check for emulated MO_SET_TARGET_PGS.
*/
if (cdb[1] == MO_SET_TARGET_PGS) {
cmd->transport_emulate_cdb =
- (T10_ALUA(su_dev)->alua_type ==
+ (su_dev->t10_alua.alua_type ==
SPC3_ALUA_EMULATED) ?
- &core_emulate_set_target_port_groups :
+ core_emulate_set_target_port_groups :
NULL;
}
@@ -3415,7 +3166,7 @@ static int transport_generic_cmd_sequencer(
/* GPCMD_REPORT_KEY from multi media commands */
size = (cdb[8] << 8) + cdb[9];
}
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case INQUIRY:
size = (cdb[3] << 8) + cdb[4];
@@ -3423,23 +3174,23 @@ static int transport_generic_cmd_sequencer(
* Do implict HEAD_OF_QUEUE processing for INQUIRY.
* See spc4r17 section 5.3
*/
- if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_CAPACITY:
size = READ_CAP_LEN;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_MEDIA_SERIAL_NUMBER:
case SECURITY_PROTOCOL_IN:
case SECURITY_PROTOCOL_OUT:
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case SERVICE_ACTION_IN:
case ACCESS_CONTROL_IN:
@@ -3450,36 +3201,36 @@ static int transport_generic_cmd_sequencer(
case WRITE_ATTRIBUTE:
size = (cdb[10] << 24) | (cdb[11] << 16) |
(cdb[12] << 8) | cdb[13];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
size = (cdb[3] << 8) | cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
/* #warning FIXME: Figure out correct GPCMD_READ_CD blocksize. */
#if 0
case GPCMD_READ_CD:
sectors = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
size = (2336 * sectors);
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
#endif
case READ_TOC:
size = cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case REQUEST_SENSE:
size = cdb[4];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case READ_ELEMENT_STATUS:
size = 65536 * cdb[7] + 256 * cdb[8] + cdb[9];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case WRITE_BUFFER:
size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8];
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case RESERVE:
case RESERVE_10:
@@ -3500,9 +3251,9 @@ static int transport_generic_cmd_sequencer(
* emulation disabled.
*/
cmd->transport_emulate_cdb =
- (T10_RES(su_dev)->res_type !=
+ (su_dev->t10_pr.res_type !=
SPC_PASSTHROUGH) ?
- &core_scsi2_emulate_crh : NULL;
+ core_scsi2_emulate_crh : NULL;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case RELEASE:
@@ -3517,9 +3268,9 @@ static int transport_generic_cmd_sequencer(
size = cmd->data_length;
cmd->transport_emulate_cdb =
- (T10_RES(su_dev)->res_type !=
+ (su_dev->t10_pr.res_type !=
SPC_PASSTHROUGH) ?
- &core_scsi2_emulate_crh : NULL;
+ core_scsi2_emulate_crh : NULL;
cmd->se_cmd_flags |= SCF_SCSI_NON_DATA_CDB;
break;
case SYNCHRONIZE_CACHE:
@@ -3529,10 +3280,10 @@ static int transport_generic_cmd_sequencer(
*/
if (cdb[0] == SYNCHRONIZE_CACHE) {
sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
- T_TASK(cmd)->t_task_lba = transport_lba_32(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
} else {
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
- T_TASK(cmd)->t_task_lba = transport_lba_64(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
}
if (sector_ret)
goto out_unsupported_cdb;
@@ -3543,7 +3294,7 @@ static int transport_generic_cmd_sequencer(
/*
* For TCM/pSCSI passthrough, skip cmd->transport_emulate_cdb()
*/
- if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+ if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
break;
/*
* Set SCF_EMULATE_CDB_ASYNC to ensure asynchronous operation
@@ -3554,32 +3305,27 @@ static int transport_generic_cmd_sequencer(
* Check to ensure that LBA + Range does not exceed past end of
* device.
*/
- if (transport_get_sectors(cmd) < 0)
+ if (!transport_cmd_get_valid_sectors(cmd))
goto out_invalid_cdb_field;
break;
case UNMAP:
size = get_unaligned_be16(&cdb[7]);
- passthrough = (TRANSPORT(dev)->transport_type ==
- TRANSPORT_PLUGIN_PHBA_PDEV);
- /*
- * Determine if the received UNMAP used to for direct passthrough
- * into Linux/SCSI with struct request via TCM/pSCSI or we are
- * signaling the use of internal transport_generic_unmap() emulation
- * for UNMAP -> Linux/BLOCK disbard with TCM/IBLOCK and TCM/FILEIO
- * subsystem plugin backstores.
- */
- if (!(passthrough))
- cmd->se_cmd_flags |= SCF_EMULATE_SYNC_UNMAP;
-
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
case WRITE_SAME_16:
sectors = transport_get_sectors_16(cdb, cmd, &sector_ret);
if (sector_ret)
goto out_unsupported_cdb;
- size = transport_get_size(sectors, cdb, cmd);
- T_TASK(cmd)->t_task_lba = get_unaligned_be16(&cdb[2]);
- passthrough = (TRANSPORT(dev)->transport_type ==
+
+ if (sectors)
+ size = transport_get_size(sectors, cdb, cmd);
+ else {
+ pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+ goto out_invalid_cdb_field;
+ }
+
+ cmd->t_task_lba = get_unaligned_be16(&cdb[2]);
+ passthrough = (dev->transport->transport_type ==
TRANSPORT_PLUGIN_PHBA_PDEV);
/*
* Determine if the received WRITE_SAME_16 is used to for direct
@@ -3588,9 +3334,9 @@ static int transport_generic_cmd_sequencer(
* emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
* TCM/FILEIO subsystem plugin backstores.
*/
- if (!(passthrough)) {
+ if (!passthrough) {
if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
- printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+ pr_err("WRITE_SAME PBDATA and LBDATA"
" bits not supported for Block Discard"
" Emulation\n");
goto out_invalid_cdb_field;
@@ -3600,7 +3346,7 @@ static int transport_generic_cmd_sequencer(
* tpws with the UNMAP=1 bit set.
*/
if (!(cdb[1] & 0x08)) {
- printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
+ pr_err("WRITE_SAME w/o UNMAP bit not "
" supported for Block Discard Emulation\n");
goto out_invalid_cdb_field;
}
@@ -3625,34 +3371,34 @@ static int transport_generic_cmd_sequencer(
break;
case REPORT_LUNS:
cmd->transport_emulate_cdb =
- &transport_core_report_lun_response;
+ transport_core_report_lun_response;
size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9];
/*
* Do implict HEAD_OF_QUEUE processing for REPORT_LUNS
* See spc4r17 section 5.3
*/
- if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
cmd->sam_task_attr = MSG_HEAD_TAG;
- cmd->se_cmd_flags |= SCF_SCSI_CONTROL_NONSG_IO_CDB;
+ cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
break;
default:
- printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
+ pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
" 0x%02x, sending CHECK_CONDITION.\n",
- CMD_TFO(cmd)->get_fabric_name(), cdb[0]);
+ cmd->se_tfo->get_fabric_name(), cdb[0]);
cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
goto out_unsupported_cdb;
}
if (size != cmd->data_length) {
- printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
+ pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
- " 0x%02x\n", CMD_TFO(cmd)->get_fabric_name(),
+ " 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cdb[0]);
cmd->cmd_spdtl = size;
if (cmd->data_direction == DMA_TO_DEVICE) {
- printk(KERN_ERR "Rejecting underflow/overflow"
+ pr_err("Rejecting underflow/overflow"
" WRITE data\n");
goto out_invalid_cdb_field;
}
@@ -3660,10 +3406,10 @@ static int transport_generic_cmd_sequencer(
* Reject READ_* or WRITE_* with overflow/underflow for
* type SCF_SCSI_DATA_SG_IO_CDB.
*/
- if (!(ret) && (DEV_ATTRIB(dev)->block_size != 512)) {
- printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
+ if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512)) {
+ pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
" CDB on non 512-byte sector setup subsystem"
- " plugin: %s\n", TRANSPORT(dev)->name);
+ " plugin: %s\n", dev->transport->name);
/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
goto out_invalid_cdb_field;
}
@@ -3678,105 +3424,22 @@ static int transport_generic_cmd_sequencer(
cmd->data_length = size;
}
+ /* Let's limit control cdbs to a page, for simplicity's sake. */
+ if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+ size > PAGE_SIZE)
+ goto out_invalid_cdb_field;
+
transport_set_supported_SAM_opcode(cmd);
return ret;
out_unsupported_cdb:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- return -2;
+ return -EINVAL;
out_invalid_cdb_field:
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- return -2;
-}
-
-static inline void transport_release_tasks(struct se_cmd *);
-
-/*
- * This function will copy a contiguous *src buffer into a destination
- * struct scatterlist array.
- */
-static void transport_memcpy_write_contig(
- struct se_cmd *cmd,
- struct scatterlist *sg_d,
- unsigned char *src)
-{
- u32 i = 0, length = 0, total_length = cmd->data_length;
- void *dst;
-
- while (total_length) {
- length = sg_d[i].length;
-
- if (length > total_length)
- length = total_length;
-
- dst = sg_virt(&sg_d[i]);
-
- memcpy(dst, src, length);
-
- if (!(total_length -= length))
- return;
-
- src += length;
- i++;
- }
-}
-
-/*
- * This function will copy a struct scatterlist array *sg_s into a destination
- * contiguous *dst buffer.
- */
-static void transport_memcpy_read_contig(
- struct se_cmd *cmd,
- unsigned char *dst,
- struct scatterlist *sg_s)
-{
- u32 i = 0, length = 0, total_length = cmd->data_length;
- void *src;
-
- while (total_length) {
- length = sg_s[i].length;
-
- if (length > total_length)
- length = total_length;
-
- src = sg_virt(&sg_s[i]);
-
- memcpy(dst, src, length);
-
- if (!(total_length -= length))
- return;
-
- dst += length;
- i++;
- }
-}
-
-static void transport_memcpy_se_mem_read_contig(
- struct se_cmd *cmd,
- unsigned char *dst,
- struct list_head *se_mem_list)
-{
- struct se_mem *se_mem;
- void *src;
- u32 length = 0, total_length = cmd->data_length;
-
- list_for_each_entry(se_mem, se_mem_list, se_list) {
- length = se_mem->se_len;
-
- if (length > total_length)
- length = total_length;
-
- src = page_address(se_mem->se_page) + se_mem->se_off;
-
- memcpy(dst, src, length);
-
- if (!(total_length -= length))
- return;
-
- dst += length;
- }
+ return -EINVAL;
}
/*
@@ -3786,7 +3449,7 @@ static void transport_memcpy_se_mem_read_contig(
*/
static void transport_complete_task_attr(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_cmd *cmd_p, *cmd_tmp;
int new_active_tasks = 0;
@@ -3794,25 +3457,25 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
atomic_dec(&dev->simple_cmds);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
- DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
+ pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
atomic_dec(&dev->dev_hoq_count);
smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
- DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
+ pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_list);
+ list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
- DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
+ pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
}
/*
@@ -3822,15 +3485,15 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
*/
spin_lock(&dev->delayed_cmd_lock);
list_for_each_entry_safe(cmd_p, cmd_tmp,
- &dev->delayed_cmd_list, se_delayed_list) {
+ &dev->delayed_cmd_list, se_delayed_node) {
- list_del(&cmd_p->se_delayed_list);
+ list_del(&cmd_p->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
- DEBUG_STA("Calling add_tasks() for"
+ pr_debug("Calling add_tasks() for"
" cmd_p: 0x%02x Task Attr: 0x%02x"
" Dormant -> Active, se_ordered_id: %u\n",
- T_TASK(cmd_p)->t_task_cdb[0],
+ cmd_p->t_task_cdb[0],
cmd_p->sam_task_attr, cmd_p->se_ordered_id);
transport_add_tasks_from_cmd(cmd_p);
@@ -3846,20 +3509,79 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
* to do the processing of the Active tasks.
*/
if (new_active_tasks != 0)
- wake_up_interruptible(&dev->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
+}
+
+static int transport_complete_qf(struct se_cmd *cmd)
+{
+ int ret = 0;
+
+ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
+ return cmd->se_tfo->queue_status(cmd);
+
+ switch (cmd->data_direction) {
+ case DMA_FROM_DEVICE:
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ break;
+ case DMA_TO_DEVICE:
+ if (cmd->t_bidi_data_sg) {
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ if (ret < 0)
+ return ret;
+ }
+ /* Fall through for DMA_TO_DEVICE */
+ case DMA_NONE:
+ ret = cmd->se_tfo->queue_status(cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void transport_handle_queue_full(
+ struct se_cmd *cmd,
+ struct se_device *dev,
+ int (*qf_callback)(struct se_cmd *))
+{
+ spin_lock_irq(&dev->qf_cmd_lock);
+ cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL;
+ cmd->transport_qf_callback = qf_callback;
+ list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
+ atomic_inc(&dev->dev_qf_count);
+ smp_mb__after_atomic_inc();
+ spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
+
+ schedule_work(&cmd->se_dev->qf_work_queue);
}
static void transport_generic_complete_ok(struct se_cmd *cmd)
{
- int reason = 0;
+ int reason = 0, ret;
/*
* Check if we need to move delayed/dormant tasks from cmds on the
* delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
* Attribute.
*/
- if (SE_DEV(cmd)->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
+ if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
/*
+ * Check to schedule QUEUE_FULL work, or execute an existing
+ * cmd->transport_qf_callback()
+ */
+ if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
+ schedule_work(&cmd->se_dev->qf_work_queue);
+
+ if (cmd->transport_qf_callback) {
+ ret = cmd->transport_qf_callback(cmd);
+ if (ret < 0)
+ goto queue_full;
+
+ cmd->transport_qf_callback = NULL;
+ goto done;
+ }
+ /*
* Check if we need to retrieve a sense buffer from
* the struct se_cmd in question.
*/
@@ -3872,8 +3594,11 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
* a non GOOD status.
*/
if (cmd->scsi_status) {
- transport_send_check_condition_and_sense(
+ ret = transport_send_check_condition_and_sense(
cmd, reason, 1);
+ if (ret == -EAGAIN)
+ goto queue_full;
+
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
@@ -3889,53 +3614,57 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock);
- if (SE_LUN(cmd)->lun_sep) {
- SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+ if (cmd->se_lun->lun_sep) {
+ cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
- /*
- * If enabled by TCM fabirc module pre-registered SGL
- * memory, perform the memcpy() from the TCM internal
- * contigious buffer back to the original SGL.
- */
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
- transport_memcpy_write_contig(cmd,
- T_TASK(cmd)->t_task_pt_sgl,
- T_TASK(cmd)->t_task_buf);
- CMD_TFO(cmd)->queue_data_in(cmd);
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
break;
case DMA_TO_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock);
- if (SE_LUN(cmd)->lun_sep) {
- SE_LUN(cmd)->lun_sep->sep_stats.rx_data_octets +=
+ if (cmd->se_lun->lun_sep) {
+ cmd->se_lun->lun_sep->sep_stats.rx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
/*
* Check if we need to send READ payload for BIDI-COMMAND
*/
- if (T_TASK(cmd)->t_mem_bidi_list != NULL) {
+ if (cmd->t_bidi_data_sg) {
spin_lock(&cmd->se_lun->lun_sep_lock);
- if (SE_LUN(cmd)->lun_sep) {
- SE_LUN(cmd)->lun_sep->sep_stats.tx_data_octets +=
+ if (cmd->se_lun->lun_sep) {
+ cmd->se_lun->lun_sep->sep_stats.tx_data_octets +=
cmd->data_length;
}
spin_unlock(&cmd->se_lun->lun_sep_lock);
- CMD_TFO(cmd)->queue_data_in(cmd);
+ ret = cmd->se_tfo->queue_data_in(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
break;
}
/* Fall through for DMA_TO_DEVICE */
case DMA_NONE:
- CMD_TFO(cmd)->queue_status(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
break;
default:
break;
}
+done:
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
+ return;
+
+queue_full:
+ pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
+ " data_direction: %d\n", cmd, cmd->data_direction);
+ transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
}
static void transport_free_dev_tasks(struct se_cmd *cmd)
@@ -3943,9 +3672,9 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
struct se_task *task, *task_tmp;
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp,
- &T_TASK(cmd)->t_task_list, t_list) {
+ &cmd->t_task_list, t_list) {
if (atomic_read(&task->task_active))
continue;
@@ -3954,75 +3683,40 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
list_del(&task->t_list);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (task->se_dev)
- TRANSPORT(task->se_dev)->free_task(task);
+ task->se_dev->transport->free_task(task);
else
- printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
+ pr_err("task[%u] - task->se_dev is NULL\n",
task->task_no);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
-static inline void transport_free_pages(struct se_cmd *cmd)
+static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
{
- struct se_mem *se_mem, *se_mem_tmp;
- int free_page = 1;
-
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
- free_page = 0;
- if (cmd->se_dev->transport->do_se_mem_map)
- free_page = 0;
+ struct scatterlist *sg;
+ int count;
- if (T_TASK(cmd)->t_task_buf) {
- kfree(T_TASK(cmd)->t_task_buf);
- T_TASK(cmd)->t_task_buf = NULL;
- return;
- }
+ for_each_sg(sgl, sg, nents, count)
+ __free_page(sg_page(sg));
- /*
- * Caller will handle releasing of struct se_mem.
- */
- if (cmd->se_cmd_flags & SCF_CMD_PASSTHROUGH_NOALLOC)
- return;
+ kfree(sgl);
+}
- if (!(T_TASK(cmd)->t_tasks_se_num))
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
+ if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
return;
- list_for_each_entry_safe(se_mem, se_mem_tmp,
- T_TASK(cmd)->t_mem_list, se_list) {
- /*
- * We only release call __free_page(struct se_mem->se_page) when
- * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
- */
- if (free_page)
- __free_page(se_mem->se_page);
-
- list_del(&se_mem->se_list);
- kmem_cache_free(se_mem_cache, se_mem);
- }
-
- if (T_TASK(cmd)->t_mem_bidi_list && T_TASK(cmd)->t_tasks_se_bidi_num) {
- list_for_each_entry_safe(se_mem, se_mem_tmp,
- T_TASK(cmd)->t_mem_bidi_list, se_list) {
- /*
- * We only release call __free_page(struct se_mem->se_page) when
- * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
- */
- if (free_page)
- __free_page(se_mem->se_page);
-
- list_del(&se_mem->se_list);
- kmem_cache_free(se_mem_cache, se_mem);
- }
- }
+ transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
+ cmd->t_data_sg = NULL;
+ cmd->t_data_nents = 0;
- kfree(T_TASK(cmd)->t_mem_bidi_list);
- T_TASK(cmd)->t_mem_bidi_list = NULL;
- kfree(T_TASK(cmd)->t_mem_list);
- T_TASK(cmd)->t_mem_list = NULL;
- T_TASK(cmd)->t_tasks_se_num = 0;
+ transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
+ cmd->t_bidi_data_sg = NULL;
+ cmd->t_bidi_data_nents = 0;
}
static inline void transport_release_tasks(struct se_cmd *cmd)
@@ -4034,23 +3728,23 @@ static inline int transport_dec_and_check(struct se_cmd *cmd)
{
unsigned long flags;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_fe_count))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (atomic_read(&cmd->t_fe_count)) {
+ if (!atomic_dec_and_test(&cmd->t_fe_count)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
return 1;
}
}
- if (atomic_read(&T_TASK(cmd)->t_se_count)) {
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_se_count))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ if (atomic_read(&cmd->t_se_count)) {
+ if (!atomic_dec_and_test(&cmd->t_se_count)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
return 1;
}
}
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
@@ -4062,68 +3756,57 @@ static void transport_release_fe_cmd(struct se_cmd *cmd)
if (transport_dec_and_check(cmd))
return;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto free_pages;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_release_tasks(cmd);
free_pages:
transport_free_pages(cmd);
transport_free_se_cmd(cmd);
- CMD_TFO(cmd)->release_cmd_direct(cmd);
+ cmd->se_tfo->release_cmd(cmd);
}
-static int transport_generic_remove(
- struct se_cmd *cmd,
- int release_to_pool,
- int session_reinstatement)
+static int
+transport_generic_remove(struct se_cmd *cmd, int session_reinstatement)
{
unsigned long flags;
- if (!(T_TASK(cmd)))
- goto release_cmd;
-
if (transport_dec_and_check(cmd)) {
if (session_reinstatement) {
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
flags);
}
return 1;
}
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
goto free_pages;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_release_tasks(cmd);
+
free_pages:
transport_free_pages(cmd);
-
-release_cmd:
- if (release_to_pool) {
- transport_release_cmd_to_pool(cmd);
- } else {
- transport_free_se_cmd(cmd);
- CMD_TFO(cmd)->release_cmd_direct(cmd);
- }
-
+ transport_release_cmd(cmd);
return 0;
}
/*
- * transport_generic_map_mem_to_cmd - Perform SGL -> struct se_mem map
+ * transport_generic_map_mem_to_cmd - Use fabric-alloced pages instead of
+ * allocating in the core.
* @cmd: Associated se_cmd descriptor
* @mem: SGL style memory for TCM WRITE / READ
* @sg_mem_num: Number of SGL elements
@@ -4135,614 +3818,163 @@ release_cmd:
*/
int transport_generic_map_mem_to_cmd(
struct se_cmd *cmd,
- struct scatterlist *mem,
- u32 sg_mem_num,
- struct scatterlist *mem_bidi_in,
- u32 sg_mem_bidi_num)
+ struct scatterlist *sgl,
+ u32 sgl_count,
+ struct scatterlist *sgl_bidi,
+ u32 sgl_bidi_count)
{
- u32 se_mem_cnt_out = 0;
- int ret;
-
- if (!(mem) || !(sg_mem_num))
+ if (!sgl || !sgl_count)
return 0;
- /*
- * Passed *mem will contain a list_head containing preformatted
- * struct se_mem elements...
- */
- if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM)) {
- if ((mem_bidi_in) || (sg_mem_bidi_num)) {
- printk(KERN_ERR "SCF_CMD_PASSTHROUGH_NOALLOC not supported"
- " with BIDI-COMMAND\n");
- return -ENOSYS;
- }
- T_TASK(cmd)->t_mem_list = (struct list_head *)mem;
- T_TASK(cmd)->t_tasks_se_num = sg_mem_num;
- cmd->se_cmd_flags |= SCF_CMD_PASSTHROUGH_NOALLOC;
- return 0;
- }
- /*
- * Otherwise, assume the caller is passing a struct scatterlist
- * array from include/linux/scatterlist.h
- */
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
- /*
- * For CDB using TCM struct se_mem linked list scatterlist memory
- * processed into a TCM struct se_subsystem_dev, we do the mapping
- * from the passed physical memory to struct se_mem->se_page here.
- */
- T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_list))
- return -ENOMEM;
- ret = transport_map_sg_to_mem(cmd,
- T_TASK(cmd)->t_mem_list, mem, &se_mem_cnt_out);
- if (ret < 0)
- return -ENOMEM;
+ cmd->t_data_sg = sgl;
+ cmd->t_data_nents = sgl_count;
- T_TASK(cmd)->t_tasks_se_num = se_mem_cnt_out;
- /*
- * Setup BIDI READ list of struct se_mem elements
- */
- if ((mem_bidi_in) && (sg_mem_bidi_num)) {
- T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_bidi_list)) {
- kfree(T_TASK(cmd)->t_mem_list);
- return -ENOMEM;
- }
- se_mem_cnt_out = 0;
-
- ret = transport_map_sg_to_mem(cmd,
- T_TASK(cmd)->t_mem_bidi_list, mem_bidi_in,
- &se_mem_cnt_out);
- if (ret < 0) {
- kfree(T_TASK(cmd)->t_mem_list);
- return -ENOMEM;
- }
-
- T_TASK(cmd)->t_tasks_se_bidi_num = se_mem_cnt_out;
+ if (sgl_bidi && sgl_bidi_count) {
+ cmd->t_bidi_data_sg = sgl_bidi;
+ cmd->t_bidi_data_nents = sgl_bidi_count;
}
cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
-
- } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
- if (mem_bidi_in || sg_mem_bidi_num) {
- printk(KERN_ERR "BIDI-Commands not supported using "
- "SCF_SCSI_CONTROL_NONSG_IO_CDB\n");
- return -ENOSYS;
- }
- /*
- * For incoming CDBs using a contiguous buffer internall with TCM,
- * save the passed struct scatterlist memory. After TCM storage object
- * processing has completed for this struct se_cmd, TCM core will call
- * transport_memcpy_[write,read]_contig() as necessary from
- * transport_generic_complete_ok() and transport_write_pending() in order
- * to copy the TCM buffer to/from the original passed *mem in SGL ->
- * struct scatterlist format.
- */
- cmd->se_cmd_flags |= SCF_PASSTHROUGH_CONTIG_TO_SG;
- T_TASK(cmd)->t_task_pt_sgl = mem;
}
return 0;
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
-
-static inline long long transport_dev_end_lba(struct se_device *dev)
-{
- return dev->transport->get_blocks(dev) + 1;
-}
-
-static int transport_get_sectors(struct se_cmd *cmd)
-{
- struct se_device *dev = SE_DEV(cmd);
-
- T_TASK(cmd)->t_tasks_sectors =
- (cmd->data_length / DEV_ATTRIB(dev)->block_size);
- if (!(T_TASK(cmd)->t_tasks_sectors))
- T_TASK(cmd)->t_tasks_sectors = 1;
-
- if (TRANSPORT(dev)->get_device_type(dev) != TYPE_DISK)
- return 0;
-
- if ((T_TASK(cmd)->t_task_lba + T_TASK(cmd)->t_tasks_sectors) >
- transport_dev_end_lba(dev)) {
- printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
- " transport_dev_end_lba(): %llu\n",
- T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
- transport_dev_end_lba(dev));
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
- return PYX_TRANSPORT_REQ_TOO_MANY_SECTORS;
- }
-
- return 0;
-}
-
static int transport_new_cmd_obj(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
- u32 task_cdbs = 0, rc;
-
- if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
- task_cdbs++;
- T_TASK(cmd)->t_task_cdbs++;
- } else {
- int set_counts = 1;
+ struct se_device *dev = cmd->se_dev;
+ u32 task_cdbs;
+ u32 rc;
+ int set_counts = 1;
- /*
- * Setup any BIDI READ tasks and memory from
- * T_TASK(cmd)->t_mem_bidi_list so the READ struct se_tasks
- * are queued first for the non pSCSI passthrough case.
- */
- if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
- (TRANSPORT(dev)->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
- rc = transport_generic_get_cdb_count(cmd,
- T_TASK(cmd)->t_task_lba,
- T_TASK(cmd)->t_tasks_sectors,
- DMA_FROM_DEVICE, T_TASK(cmd)->t_mem_bidi_list,
- set_counts);
- if (!(rc)) {
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- }
- set_counts = 0;
- }
- /*
- * Setup the tasks and memory from T_TASK(cmd)->t_mem_list
- * Note for BIDI transfers this will contain the WRITE payload
- */
- task_cdbs = transport_generic_get_cdb_count(cmd,
- T_TASK(cmd)->t_task_lba,
- T_TASK(cmd)->t_tasks_sectors,
- cmd->data_direction, T_TASK(cmd)->t_mem_list,
- set_counts);
- if (!(task_cdbs)) {
+ /*
+ * Setup any BIDI READ tasks and memory from
+ * cmd->t_mem_bidi_list so the READ struct se_tasks
+ * are queued first for the non pSCSI passthrough case.
+ */
+ if (cmd->t_bidi_data_sg &&
+ (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) {
+ rc = transport_allocate_tasks(cmd,
+ cmd->t_task_lba,
+ DMA_FROM_DEVICE,
+ cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents);
+ if (rc <= 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
- T_TASK(cmd)->t_task_cdbs += task_cdbs;
-
-#if 0
- printk(KERN_INFO "data_length: %u, LBA: %llu t_tasks_sectors:"
- " %u, t_task_cdbs: %u\n", obj_ptr, cmd->data_length,
- T_TASK(cmd)->t_task_lba, T_TASK(cmd)->t_tasks_sectors,
- T_TASK(cmd)->t_task_cdbs);
-#endif
- }
-
- atomic_set(&T_TASK(cmd)->t_task_cdbs_left, task_cdbs);
- atomic_set(&T_TASK(cmd)->t_task_cdbs_ex_left, task_cdbs);
- atomic_set(&T_TASK(cmd)->t_task_cdbs_timeout_left, task_cdbs);
- return 0;
-}
-
-static struct list_head *transport_init_se_mem_list(void)
-{
- struct list_head *se_mem_list;
-
- se_mem_list = kzalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!(se_mem_list)) {
- printk(KERN_ERR "Unable to allocate memory for se_mem_list\n");
- return NULL;
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
+ set_counts = 0;
}
- INIT_LIST_HEAD(se_mem_list);
-
- return se_mem_list;
-}
-
-static int
-transport_generic_get_mem(struct se_cmd *cmd, u32 length, u32 dma_size)
-{
- unsigned char *buf;
- struct se_mem *se_mem;
-
- T_TASK(cmd)->t_mem_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_list))
- return -ENOMEM;
-
/*
- * If the device uses memory mapping this is enough.
+ * Setup the tasks and memory from cmd->t_mem_list
+ * Note for BIDI transfers this will contain the WRITE payload
*/
- if (cmd->se_dev->transport->do_se_mem_map)
- return 0;
-
- /*
- * Setup BIDI-COMMAND READ list of struct se_mem elements
- */
- if (T_TASK(cmd)->t_tasks_bidi) {
- T_TASK(cmd)->t_mem_bidi_list = transport_init_se_mem_list();
- if (!(T_TASK(cmd)->t_mem_bidi_list)) {
- kfree(T_TASK(cmd)->t_mem_list);
- return -ENOMEM;
- }
+ task_cdbs = transport_allocate_tasks(cmd,
+ cmd->t_task_lba,
+ cmd->data_direction,
+ cmd->t_data_sg,
+ cmd->t_data_nents);
+ if (task_cdbs <= 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return PYX_TRANSPORT_LU_COMM_FAILURE;
}
- while (length) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- goto out;
- }
-
-/* #warning FIXME Allocate contigous pages for struct se_mem elements */
- se_mem->se_page = alloc_pages(GFP_KERNEL, 0);
- if (!(se_mem->se_page)) {
- printk(KERN_ERR "alloc_pages() failed\n");
- goto out;
- }
-
- buf = kmap_atomic(se_mem->se_page, KM_IRQ0);
- if (!(buf)) {
- printk(KERN_ERR "kmap_atomic() failed\n");
- goto out;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
- se_mem->se_len = (length > dma_size) ? dma_size : length;
- memset(buf, 0, se_mem->se_len);
- kunmap_atomic(buf, KM_IRQ0);
-
- list_add_tail(&se_mem->se_list, T_TASK(cmd)->t_mem_list);
- T_TASK(cmd)->t_tasks_se_num++;
-
- DEBUG_MEM("Allocated struct se_mem page(%p) Length(%u)"
- " Offset(%u)\n", se_mem->se_page, se_mem->se_len,
- se_mem->se_off);
-
- length -= se_mem->se_len;
+ if (set_counts) {
+ atomic_inc(&cmd->t_fe_count);
+ atomic_inc(&cmd->t_se_count);
}
- DEBUG_MEM("Allocated total struct se_mem elements(%u)\n",
- T_TASK(cmd)->t_tasks_se_num);
+ cmd->t_task_list_num = task_cdbs;
+ atomic_set(&cmd->t_task_cdbs_left, task_cdbs);
+ atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs);
+ atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs);
return 0;
-out:
- if (se_mem)
- __free_pages(se_mem->se_page, 0);
- kmem_cache_free(se_mem_cache, se_mem);
- return -1;
}
-u32 transport_calc_sg_num(
- struct se_task *task,
- struct se_mem *in_se_mem,
- u32 task_offset)
+void *transport_kmap_first_data_page(struct se_cmd *cmd)
{
- struct se_cmd *se_cmd = task->task_se_cmd;
- struct se_device *se_dev = SE_DEV(se_cmd);
- struct se_mem *se_mem = in_se_mem;
- struct target_core_fabric_ops *tfo = CMD_TFO(se_cmd);
- u32 sg_length, task_size = task->task_size, task_sg_num_padded;
-
- while (task_size != 0) {
- DEBUG_SC("se_mem->se_page(%p) se_mem->se_len(%u)"
- " se_mem->se_off(%u) task_offset(%u)\n",
- se_mem->se_page, se_mem->se_len,
- se_mem->se_off, task_offset);
-
- if (task_offset == 0) {
- if (task_size >= se_mem->se_len) {
- sg_length = se_mem->se_len;
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list)))
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- } else {
- sg_length = task_size;
- task_size -= sg_length;
- goto next;
- }
+ struct scatterlist *sg = cmd->t_data_sg;
- DEBUG_SC("sg_length(%u) task_size(%u)\n",
- sg_length, task_size);
- } else {
- if ((se_mem->se_len - task_offset) > task_size) {
- sg_length = task_size;
- task_size -= sg_length;
- goto next;
- } else {
- sg_length = (se_mem->se_len - task_offset);
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list)))
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- }
-
- DEBUG_SC("sg_length(%u) task_size(%u)\n",
- sg_length, task_size);
-
- task_offset = 0;
- }
- task_size -= sg_length;
-next:
- DEBUG_SC("task[%u] - Reducing task_size to(%u)\n",
- task->task_no, task_size);
-
- task->task_sg_num++;
- }
- /*
- * Check if the fabric module driver is requesting that all
- * struct se_task->task_sg[] be chained together.. If so,
- * then allocate an extra padding SG entry for linking and
- * marking the end of the chained SGL.
- */
- if (tfo->task_sg_chaining) {
- task_sg_num_padded = (task->task_sg_num + 1);
- task->task_padded_sg = 1;
- } else
- task_sg_num_padded = task->task_sg_num;
-
- task->task_sg = kzalloc(task_sg_num_padded *
- sizeof(struct scatterlist), GFP_KERNEL);
- if (!(task->task_sg)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " task->task_sg\n");
- return 0;
- }
- sg_init_table(&task->task_sg[0], task_sg_num_padded);
+ BUG_ON(!sg);
/*
- * Setup task->task_sg_bidi for SCSI READ payload for
- * TCM/pSCSI passthrough if present for BIDI-COMMAND
+ * We need to take into account a possible offset here for fabrics like
+ * tcm_loop who may be using a contig buffer from the SCSI midlayer for
+ * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
*/
- if ((T_TASK(se_cmd)->t_mem_bidi_list != NULL) &&
- (TRANSPORT(se_dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)) {
- task->task_sg_bidi = kzalloc(task_sg_num_padded *
- sizeof(struct scatterlist), GFP_KERNEL);
- if (!(task->task_sg_bidi)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " task->task_sg_bidi\n");
- return 0;
- }
- sg_init_table(&task->task_sg_bidi[0], task_sg_num_padded);
- }
- /*
- * For the chaining case, setup the proper end of SGL for the
- * initial submission struct task into struct se_subsystem_api.
- * This will be cleared later by transport_do_task_sg_chain()
- */
- if (task->task_padded_sg) {
- sg_mark_end(&task->task_sg[task->task_sg_num - 1]);
- /*
- * Added the 'if' check before marking end of bi-directional
- * scatterlist (which gets created only in case of request
- * (RD + WR).
- */
- if (task->task_sg_bidi)
- sg_mark_end(&task->task_sg_bidi[task->task_sg_num - 1]);
- }
-
- DEBUG_SC("Successfully allocated task->task_sg_num(%u),"
- " task_sg_num_padded(%u)\n", task->task_sg_num,
- task_sg_num_padded);
-
- return task->task_sg_num;
+ return kmap(sg_page(sg)) + sg->offset;
}
+EXPORT_SYMBOL(transport_kmap_first_data_page);
-static inline int transport_set_tasks_sectors_disk(
- struct se_task *task,
- struct se_device *dev,
- unsigned long long lba,
- u32 sectors,
- int *max_sectors_set)
+void transport_kunmap_first_data_page(struct se_cmd *cmd)
{
- if ((lba + sectors) > transport_dev_end_lba(dev)) {
- task->task_sectors = ((transport_dev_end_lba(dev) - lba) + 1);
-
- if (task->task_sectors > DEV_ATTRIB(dev)->max_sectors) {
- task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
- *max_sectors_set = 1;
- }
- } else {
- if (sectors > DEV_ATTRIB(dev)->max_sectors) {
- task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
- *max_sectors_set = 1;
- } else
- task->task_sectors = sectors;
- }
-
- return 0;
+ kunmap(sg_page(cmd->t_data_sg));
}
+EXPORT_SYMBOL(transport_kunmap_first_data_page);
-static inline int transport_set_tasks_sectors_non_disk(
- struct se_task *task,
- struct se_device *dev,
- unsigned long long lba,
- u32 sectors,
- int *max_sectors_set)
+static int
+transport_generic_get_mem(struct se_cmd *cmd)
{
- if (sectors > DEV_ATTRIB(dev)->max_sectors) {
- task->task_sectors = DEV_ATTRIB(dev)->max_sectors;
- *max_sectors_set = 1;
- } else
- task->task_sectors = sectors;
+ u32 length = cmd->data_length;
+ unsigned int nents;
+ struct page *page;
+ int i = 0;
- return 0;
-}
+ nents = DIV_ROUND_UP(length, PAGE_SIZE);
+ cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
+ if (!cmd->t_data_sg)
+ return -ENOMEM;
-static inline int transport_set_tasks_sectors(
- struct se_task *task,
- struct se_device *dev,
- unsigned long long lba,
- u32 sectors,
- int *max_sectors_set)
-{
- return (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) ?
- transport_set_tasks_sectors_disk(task, dev, lba, sectors,
- max_sectors_set) :
- transport_set_tasks_sectors_non_disk(task, dev, lba, sectors,
- max_sectors_set);
-}
+ cmd->t_data_nents = nents;
+ sg_init_table(cmd->t_data_sg, nents);
-static int transport_map_sg_to_mem(
- struct se_cmd *cmd,
- struct list_head *se_mem_list,
- void *in_mem,
- u32 *se_mem_cnt)
-{
- struct se_mem *se_mem;
- struct scatterlist *sg;
- u32 sg_count = 1, cmd_size = cmd->data_length;
+ while (length) {
+ u32 page_len = min_t(u32, length, PAGE_SIZE);
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto out;
- if (!in_mem) {
- printk(KERN_ERR "No source scatterlist\n");
- return -1;
+ sg_set_page(&cmd->t_data_sg[i], page, page_len, 0);
+ length -= page_len;
+ i++;
}
- sg = (struct scatterlist *)in_mem;
-
- while (cmd_size) {
- se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
- if (!(se_mem)) {
- printk(KERN_ERR "Unable to allocate struct se_mem\n");
- return -1;
- }
- INIT_LIST_HEAD(&se_mem->se_list);
- DEBUG_MEM("sg_to_mem: Starting loop with cmd_size: %u"
- " sg_page: %p offset: %d length: %d\n", cmd_size,
- sg_page(sg), sg->offset, sg->length);
-
- se_mem->se_page = sg_page(sg);
- se_mem->se_off = sg->offset;
-
- if (cmd_size > sg->length) {
- se_mem->se_len = sg->length;
- sg = sg_next(sg);
- sg_count++;
- } else
- se_mem->se_len = cmd_size;
-
- cmd_size -= se_mem->se_len;
-
- DEBUG_MEM("sg_to_mem: *se_mem_cnt: %u cmd_size: %u\n",
- *se_mem_cnt, cmd_size);
- DEBUG_MEM("sg_to_mem: Final se_page: %p se_off: %d se_len: %d\n",
- se_mem->se_page, se_mem->se_off, se_mem->se_len);
+ return 0;
- list_add_tail(&se_mem->se_list, se_mem_list);
- (*se_mem_cnt)++;
+out:
+ while (i >= 0) {
+ __free_page(sg_page(&cmd->t_data_sg[i]));
+ i--;
}
-
- DEBUG_MEM("task[0] - Mapped(%u) struct scatterlist segments to(%u)"
- " struct se_mem\n", sg_count, *se_mem_cnt);
-
- if (sg_count != *se_mem_cnt)
- BUG();
-
- return 0;
+ kfree(cmd->t_data_sg);
+ cmd->t_data_sg = NULL;
+ return -ENOMEM;
}
-/* transport_map_mem_to_sg():
- *
- *
- */
-int transport_map_mem_to_sg(
- struct se_task *task,
- struct list_head *se_mem_list,
- void *in_mem,
- struct se_mem *in_se_mem,
- struct se_mem **out_se_mem,
- u32 *se_mem_cnt,
- u32 *task_offset)
+/* Reduce sectors if they are too long for the device */
+static inline sector_t transport_limit_task_sectors(
+ struct se_device *dev,
+ unsigned long long lba,
+ sector_t sectors)
{
- struct se_cmd *se_cmd = task->task_se_cmd;
- struct se_mem *se_mem = in_se_mem;
- struct scatterlist *sg = (struct scatterlist *)in_mem;
- u32 task_size = task->task_size, sg_no = 0;
+ sectors = min_t(sector_t, sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
- if (!sg) {
- printk(KERN_ERR "Unable to locate valid struct"
- " scatterlist pointer\n");
- return -1;
- }
-
- while (task_size != 0) {
- /*
- * Setup the contigious array of scatterlists for
- * this struct se_task.
- */
- sg_assign_page(sg, se_mem->se_page);
-
- if (*task_offset == 0) {
- sg->offset = se_mem->se_off;
-
- if (task_size >= se_mem->se_len) {
- sg->length = se_mem->se_len;
+ if (dev->transport->get_device_type(dev) == TYPE_DISK)
+ if ((lba + sectors) > transport_dev_end_lba(dev))
+ sectors = ((transport_dev_end_lba(dev) - lba) + 1);
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list))) {
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- (*se_mem_cnt)++;
- }
- } else {
- sg->length = task_size;
- /*
- * Determine if we need to calculate an offset
- * into the struct se_mem on the next go around..
- */
- task_size -= sg->length;
- if (!(task_size))
- *task_offset = sg->length;
-
- goto next;
- }
-
- } else {
- sg->offset = (*task_offset + se_mem->se_off);
-
- if ((se_mem->se_len - *task_offset) > task_size) {
- sg->length = task_size;
- /*
- * Determine if we need to calculate an offset
- * into the struct se_mem on the next go around..
- */
- task_size -= sg->length;
- if (!(task_size))
- *task_offset += sg->length;
-
- goto next;
- } else {
- sg->length = (se_mem->se_len - *task_offset);
-
- if (!(list_is_last(&se_mem->se_list,
- T_TASK(se_cmd)->t_mem_list))) {
- se_mem = list_entry(se_mem->se_list.next,
- struct se_mem, se_list);
- (*se_mem_cnt)++;
- }
- }
-
- *task_offset = 0;
- }
- task_size -= sg->length;
-next:
- DEBUG_MEM("task[%u] mem_to_sg - sg[%u](%p)(%u)(%u) - Reducing"
- " task_size to(%u), task_offset: %u\n", task->task_no, sg_no,
- sg_page(sg), sg->length, sg->offset, task_size, *task_offset);
-
- sg_no++;
- if (!(task_size))
- break;
-
- sg = sg_next(sg);
-
- if (task_size > se_cmd->data_length)
- BUG();
- }
- *out_se_mem = se_mem;
-
- DEBUG_MEM("task[%u] - Mapped(%u) struct se_mem segments to total(%u)"
- " SGs\n", task->task_no, *se_mem_cnt, sg_no);
-
- return 0;
+ return sectors;
}
+
/*
* This function can be used by HW target mode drivers to create a linked
* scatterlist from all contiguously allocated struct se_task->task_sg[].
@@ -4751,334 +3983,236 @@ next:
*/
void transport_do_task_sg_chain(struct se_cmd *cmd)
{
- struct scatterlist *sg_head = NULL, *sg_link = NULL, *sg_first = NULL;
- struct scatterlist *sg_head_cur = NULL, *sg_link_cur = NULL;
- struct scatterlist *sg, *sg_end = NULL, *sg_end_cur = NULL;
+ struct scatterlist *sg_first = NULL;
+ struct scatterlist *sg_prev = NULL;
+ int sg_prev_nents = 0;
+ struct scatterlist *sg;
struct se_task *task;
- struct target_core_fabric_ops *tfo = CMD_TFO(cmd);
- u32 task_sg_num = 0, sg_count = 0;
+ u32 chained_nents = 0;
int i;
- if (tfo->task_sg_chaining == 0) {
- printk(KERN_ERR "task_sg_chaining is diabled for fabric module:"
- " %s\n", tfo->get_fabric_name());
- dump_stack();
- return;
- }
+ BUG_ON(!cmd->se_tfo->task_sg_chaining);
+
/*
* Walk the struct se_task list and setup scatterlist chains
- * for each contiguosly allocated struct se_task->task_sg[].
+ * for each contiguously allocated struct se_task->task_sg[].
*/
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
- if (!(task->task_sg) || !(task->task_padded_sg))
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
+ if (!task->task_sg)
continue;
- if (sg_head && sg_link) {
- sg_head_cur = &task->task_sg[0];
- sg_link_cur = &task->task_sg[task->task_sg_num];
- /*
- * Either add chain or mark end of scatterlist
- */
- if (!(list_is_last(&task->t_list,
- &T_TASK(cmd)->t_task_list))) {
- /*
- * Clear existing SGL termination bit set in
- * transport_calc_sg_num(), see sg_mark_end()
- */
- sg_end_cur = &task->task_sg[task->task_sg_num - 1];
- sg_end_cur->page_link &= ~0x02;
-
- sg_chain(sg_head, task_sg_num, sg_head_cur);
- sg_count += task->task_sg_num;
- task_sg_num = (task->task_sg_num + 1);
- } else {
- sg_chain(sg_head, task_sg_num, sg_head_cur);
- sg_count += task->task_sg_num;
- task_sg_num = task->task_sg_num;
- }
+ BUG_ON(!task->task_padded_sg);
- sg_head = sg_head_cur;
- sg_link = sg_link_cur;
- continue;
- }
- sg_head = sg_first = &task->task_sg[0];
- sg_link = &task->task_sg[task->task_sg_num];
- /*
- * Check for single task..
- */
- if (!(list_is_last(&task->t_list, &T_TASK(cmd)->t_task_list))) {
- /*
- * Clear existing SGL termination bit set in
- * transport_calc_sg_num(), see sg_mark_end()
- */
- sg_end = &task->task_sg[task->task_sg_num - 1];
- sg_end->page_link &= ~0x02;
- sg_count += task->task_sg_num;
- task_sg_num = (task->task_sg_num + 1);
+ if (!sg_first) {
+ sg_first = task->task_sg;
+ chained_nents = task->task_sg_nents;
} else {
- sg_count += task->task_sg_num;
- task_sg_num = task->task_sg_num;
+ sg_chain(sg_prev, sg_prev_nents, task->task_sg);
+ chained_nents += task->task_sg_nents;
}
+
+ sg_prev = task->task_sg;
+ sg_prev_nents = task->task_sg_nents;
}
/*
* Setup the starting pointer and total t_tasks_sg_linked_no including
* padding SGs for linking and to mark the end.
*/
- T_TASK(cmd)->t_tasks_sg_chained = sg_first;
- T_TASK(cmd)->t_tasks_sg_chained_no = sg_count;
+ cmd->t_tasks_sg_chained = sg_first;
+ cmd->t_tasks_sg_chained_no = chained_nents;
- DEBUG_CMD_M("Setup cmd: %p T_TASK(cmd)->t_tasks_sg_chained: %p and"
- " t_tasks_sg_chained_no: %u\n", cmd, T_TASK(cmd)->t_tasks_sg_chained,
- T_TASK(cmd)->t_tasks_sg_chained_no);
+ pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
+ " t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
+ cmd->t_tasks_sg_chained_no);
- for_each_sg(T_TASK(cmd)->t_tasks_sg_chained, sg,
- T_TASK(cmd)->t_tasks_sg_chained_no, i) {
+ for_each_sg(cmd->t_tasks_sg_chained, sg,
+ cmd->t_tasks_sg_chained_no, i) {
- DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d, magic: 0x%08x\n",
- i, sg, sg_page(sg), sg->length, sg->offset, sg->sg_magic);
+ pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
+ i, sg, sg_page(sg), sg->length, sg->offset);
if (sg_is_chain(sg))
- DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+ pr_debug("SG: %p sg_is_chain=1\n", sg);
if (sg_is_last(sg))
- DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+ pr_debug("SG: %p sg_is_last=1\n", sg);
}
}
EXPORT_SYMBOL(transport_do_task_sg_chain);
-static int transport_do_se_mem_map(
- struct se_device *dev,
- struct se_task *task,
- struct list_head *se_mem_list,
- void *in_mem,
- struct se_mem *in_se_mem,
- struct se_mem **out_se_mem,
- u32 *se_mem_cnt,
- u32 *task_offset_in)
-{
- u32 task_offset = *task_offset_in;
- int ret = 0;
- /*
- * se_subsystem_api_t->do_se_mem_map is used when internal allocation
- * has been done by the transport plugin.
- */
- if (TRANSPORT(dev)->do_se_mem_map) {
- ret = TRANSPORT(dev)->do_se_mem_map(task, se_mem_list,
- in_mem, in_se_mem, out_se_mem, se_mem_cnt,
- task_offset_in);
- if (ret == 0)
- T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
-
- return ret;
- }
-
- BUG_ON(list_empty(se_mem_list));
- /*
- * This is the normal path for all normal non BIDI and BIDI-COMMAND
- * WRITE payloads.. If we need to do BIDI READ passthrough for
- * TCM/pSCSI the first call to transport_do_se_mem_map ->
- * transport_calc_sg_num() -> transport_map_mem_to_sg() will do the
- * allocation for task->task_sg_bidi, and the subsequent call to
- * transport_do_se_mem_map() from transport_generic_get_cdb_count()
- */
- if (!(task->task_sg_bidi)) {
- /*
- * Assume default that transport plugin speaks preallocated
- * scatterlists.
- */
- if (!(transport_calc_sg_num(task, in_se_mem, task_offset)))
- return -1;
- /*
- * struct se_task->task_sg now contains the struct scatterlist array.
- */
- return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
- in_se_mem, out_se_mem, se_mem_cnt,
- task_offset_in);
- }
- /*
- * Handle the se_mem_list -> struct task->task_sg_bidi
- * memory map for the extra BIDI READ payload
- */
- return transport_map_mem_to_sg(task, se_mem_list, task->task_sg_bidi,
- in_se_mem, out_se_mem, se_mem_cnt,
- task_offset_in);
-}
-
-static u32 transport_generic_get_cdb_count(
+/*
+ * Break up cmd into chunks transport can handle
+ */
+static int transport_allocate_data_tasks(
struct se_cmd *cmd,
unsigned long long lba,
- u32 sectors,
enum dma_data_direction data_direction,
- struct list_head *mem_list,
- int set_counts)
+ struct scatterlist *sgl,
+ unsigned int sgl_nents)
{
unsigned char *cdb = NULL;
struct se_task *task;
- struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
- struct se_mem *se_mem_bidi = NULL, *se_mem_bidi_lout = NULL;
- struct se_device *dev = SE_DEV(cmd);
- int max_sectors_set = 0, ret;
- u32 task_offset_in = 0, se_mem_cnt = 0, se_mem_bidi_cnt = 0, task_cdbs = 0;
-
- if (!mem_list) {
- printk(KERN_ERR "mem_list is NULL in transport_generic_get"
- "_cdb_count()\n");
- return 0;
- }
- /*
- * While using RAMDISK_DR backstores is the only case where
- * mem_list will ever be empty at this point.
- */
- if (!(list_empty(mem_list)))
- se_mem = list_entry(mem_list->next, struct se_mem, se_list);
- /*
- * Check for extra se_mem_bidi mapping for BIDI-COMMANDs to
- * struct se_task->task_sg_bidi for TCM/pSCSI passthrough operation
- */
- if ((T_TASK(cmd)->t_mem_bidi_list != NULL) &&
- !(list_empty(T_TASK(cmd)->t_mem_bidi_list)) &&
- (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV))
- se_mem_bidi = list_entry(T_TASK(cmd)->t_mem_bidi_list->next,
- struct se_mem, se_list);
-
- while (sectors) {
- DEBUG_VOL("ITT[0x%08x] LBA(%llu) SectorsLeft(%u) EOBJ(%llu)\n",
- CMD_TFO(cmd)->get_task_tag(cmd), lba, sectors,
- transport_dev_end_lba(dev));
+ struct se_device *dev = cmd->se_dev;
+ unsigned long flags;
+ sector_t sectors;
+ int task_count, i, ret;
+ sector_t dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors;
+ u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size;
+ struct scatterlist *sg;
+ struct scatterlist *cmd_sg;
- task = transport_generic_get_task(cmd, data_direction);
- if (!(task))
- goto out;
+ WARN_ON(cmd->data_length % sector_size);
+ sectors = DIV_ROUND_UP(cmd->data_length, sector_size);
+ task_count = DIV_ROUND_UP(sectors, dev_max_sectors);
- transport_set_tasks_sectors(task, dev, lba, sectors,
- &max_sectors_set);
+ cmd_sg = sgl;
+ for (i = 0; i < task_count; i++) {
+ unsigned int task_size;
+ int count;
+
+ task = transport_generic_get_task(cmd, data_direction);
+ if (!task)
+ return -ENOMEM;
task->task_lba = lba;
- lba += task->task_sectors;
- sectors -= task->task_sectors;
- task->task_size = (task->task_sectors *
- DEV_ATTRIB(dev)->block_size);
-
- cdb = TRANSPORT(dev)->get_cdb(task);
- if ((cdb)) {
- memcpy(cdb, T_TASK(cmd)->t_task_cdb,
- scsi_command_size(T_TASK(cmd)->t_task_cdb));
- cmd->transport_split_cdb(task->task_lba,
- &task->task_sectors, cdb);
- }
+ task->task_sectors = min(sectors, dev_max_sectors);
+ task->task_size = task->task_sectors * sector_size;
- /*
- * Perform the SE OBJ plugin and/or Transport plugin specific
- * mapping for T_TASK(cmd)->t_mem_list. And setup the
- * task->task_sg and if necessary task->task_sg_bidi
- */
- ret = transport_do_se_mem_map(dev, task, mem_list,
- NULL, se_mem, &se_mem_lout, &se_mem_cnt,
- &task_offset_in);
- if (ret < 0)
- goto out;
+ cdb = dev->transport->get_cdb(task);
+ BUG_ON(!cdb);
+
+ memcpy(cdb, cmd->t_task_cdb,
+ scsi_command_size(cmd->t_task_cdb));
+
+ /* Update new cdb with updated lba/sectors */
+ cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
- se_mem = se_mem_lout;
/*
- * Setup the T_TASK(cmd)->t_mem_bidi_list -> task->task_sg_bidi
- * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI
- *
- * Note that the first call to transport_do_se_mem_map() above will
- * allocate struct se_task->task_sg_bidi in transport_do_se_mem_map()
- * -> transport_calc_sg_num(), and the second here will do the
- * mapping for SCSI READ for BIDI-COMMAND passthrough with TCM/pSCSI.
+ * Check if the fabric module driver is requesting that all
+ * struct se_task->task_sg[] be chained together.. If so,
+ * then allocate an extra padding SG entry for linking and
+ * marking the end of the chained SGL.
+ * Possibly over-allocate task sgl size by using cmd sgl size.
+ * It's so much easier and only a waste when task_count > 1.
+ * That is extremely rare.
*/
- if (task->task_sg_bidi != NULL) {
- ret = transport_do_se_mem_map(dev, task,
- T_TASK(cmd)->t_mem_bidi_list, NULL,
- se_mem_bidi, &se_mem_bidi_lout, &se_mem_bidi_cnt,
- &task_offset_in);
- if (ret < 0)
- goto out;
+ task->task_sg_nents = sgl_nents;
+ if (cmd->se_tfo->task_sg_chaining) {
+ task->task_sg_nents++;
+ task->task_padded_sg = 1;
+ }
- se_mem_bidi = se_mem_bidi_lout;
+ task->task_sg = kmalloc(sizeof(struct scatterlist) *
+ task->task_sg_nents, GFP_KERNEL);
+ if (!task->task_sg) {
+ cmd->se_dev->transport->free_task(task);
+ return -ENOMEM;
}
- task_cdbs++;
- DEBUG_VOL("Incremented task_cdbs(%u) task->task_sg_num(%u)\n",
- task_cdbs, task->task_sg_num);
+ sg_init_table(task->task_sg, task->task_sg_nents);
- if (max_sectors_set) {
- max_sectors_set = 0;
- continue;
+ task_size = task->task_size;
+
+ /* Build new sgl, only up to task_size */
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
+ if (cmd_sg->length > task_size)
+ break;
+
+ *sg = *cmd_sg;
+ task_size -= cmd_sg->length;
+ cmd_sg = sg_next(cmd_sg);
}
- if (!sectors)
- break;
- }
+ lba += task->task_sectors;
+ sectors -= task->task_sectors;
- if (set_counts) {
- atomic_inc(&T_TASK(cmd)->t_fe_count);
- atomic_inc(&T_TASK(cmd)->t_se_count);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_add_tail(&task->t_list, &cmd->t_task_list);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
+ /*
+ * Now perform the memory map of task->task_sg[] into backend
+ * subsystem memory..
+ */
+ list_for_each_entry(task, &cmd->t_task_list, t_list) {
+ if (atomic_read(&task->task_sent))
+ continue;
+ if (!dev->transport->map_data_SG)
+ continue;
- DEBUG_VOL("ITT[0x%08x] total %s cdbs(%u)\n",
- CMD_TFO(cmd)->get_task_tag(cmd), (data_direction == DMA_TO_DEVICE)
- ? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE", task_cdbs);
+ ret = dev->transport->map_data_SG(task);
+ if (ret < 0)
+ return 0;
+ }
- return task_cdbs;
-out:
- return 0;
+ return task_count;
}
static int
-transport_map_control_cmd_to_task(struct se_cmd *cmd)
+transport_allocate_control_task(struct se_cmd *cmd)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
unsigned char *cdb;
struct se_task *task;
- int ret;
+ unsigned long flags;
+ int ret = 0;
task = transport_generic_get_task(cmd, cmd->data_direction);
if (!task)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return -ENOMEM;
- cdb = TRANSPORT(dev)->get_cdb(task);
- if (cdb)
- memcpy(cdb, cmd->t_task->t_task_cdb,
- scsi_command_size(cmd->t_task->t_task_cdb));
+ cdb = dev->transport->get_cdb(task);
+ BUG_ON(!cdb);
+ memcpy(cdb, cmd->t_task_cdb,
+ scsi_command_size(cmd->t_task_cdb));
+ task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents,
+ GFP_KERNEL);
+ if (!task->task_sg) {
+ cmd->se_dev->transport->free_task(task);
+ return -ENOMEM;
+ }
+
+ memcpy(task->task_sg, cmd->t_data_sg,
+ sizeof(struct scatterlist) * cmd->t_data_nents);
task->task_size = cmd->data_length;
- task->task_sg_num =
- (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) ? 1 : 0;
+ task->task_sg_nents = cmd->t_data_nents;
- atomic_inc(&cmd->t_task->t_fe_count);
- atomic_inc(&cmd->t_task->t_se_count);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ list_add_tail(&task->t_list, &cmd->t_task_list);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
- struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
- u32 se_mem_cnt = 0, task_offset = 0;
-
- if (!list_empty(T_TASK(cmd)->t_mem_list))
- se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
- struct se_mem, se_list);
-
- ret = transport_do_se_mem_map(dev, task,
- cmd->t_task->t_mem_list, NULL, se_mem,
- &se_mem_lout, &se_mem_cnt, &task_offset);
- if (ret < 0)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
-
- if (dev->transport->map_task_SG)
- return dev->transport->map_task_SG(task);
- return 0;
- } else if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_NONSG_IO_CDB) {
- if (dev->transport->map_task_non_SG)
- return dev->transport->map_task_non_SG(task);
- return 0;
+ if (dev->transport->map_control_SG)
+ ret = dev->transport->map_control_SG(task);
} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
if (dev->transport->cdb_none)
- return dev->transport->cdb_none(task);
- return 0;
+ ret = dev->transport->cdb_none(task);
} else {
+ pr_err("target: Unknown control cmd type!\n");
BUG();
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
}
+
+ /* Success! Return number of tasks allocated */
+ if (ret == 0)
+ return 1;
+ return ret;
+}
+
+static u32 transport_allocate_tasks(
+ struct se_cmd *cmd,
+ unsigned long long lba,
+ enum dma_data_direction data_direction,
+ struct scatterlist *sgl,
+ unsigned int sgl_nents)
+{
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)
+ return transport_allocate_data_tasks(cmd, lba, data_direction,
+ sgl, sgl_nents);
+ else
+ return transport_allocate_control_task(cmd);
+
}
+
/* transport_generic_new_cmd(): Called from transport_processing_thread()
*
* Allocate storage transport resources from a set of values predefined
@@ -5088,64 +4222,33 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
/*
* Generate struct se_task(s) and/or their payloads for this CDB.
*/
-static int transport_generic_new_cmd(struct se_cmd *cmd)
+int transport_generic_new_cmd(struct se_cmd *cmd)
{
- struct se_portal_group *se_tpg;
- struct se_task *task;
- struct se_device *dev = SE_DEV(cmd);
int ret = 0;
/*
* Determine is the TCM fabric module has already allocated physical
* memory, and is directly calling transport_generic_map_mem_to_cmd()
- * to setup beforehand the linked list of physical memory at
- * T_TASK(cmd)->t_mem_list of struct se_mem->se_page
+ * beforehand.
*/
- if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)) {
- ret = transport_allocate_resources(cmd);
+ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
+ cmd->data_length) {
+ ret = transport_generic_get_mem(cmd);
if (ret < 0)
return ret;
}
-
- ret = transport_get_sectors(cmd);
- if (ret < 0)
- return ret;
-
+ /*
+ * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for
+ * control or data CDB types, and perform the map to backend subsystem
+ * code from SGL memory allocated here by transport_generic_get_mem(), or
+ * via pre-existing SGL memory setup explictly by fabric module code with
+ * transport_generic_map_mem_to_cmd().
+ */
ret = transport_new_cmd_obj(cmd);
if (ret < 0)
return ret;
-
/*
- * Determine if the calling TCM fabric module is talking to
- * Linux/NET via kernel sockets and needs to allocate a
- * struct iovec array to complete the struct se_cmd
- */
- se_tpg = SE_LUN(cmd)->lun_sep->sep_tpg;
- if (TPG_TFO(se_tpg)->alloc_cmd_iovecs != NULL) {
- ret = TPG_TFO(se_tpg)->alloc_cmd_iovecs(cmd);
- if (ret < 0)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
- }
-
- if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
- list_for_each_entry(task, &T_TASK(cmd)->t_task_list, t_list) {
- if (atomic_read(&task->task_sent))
- continue;
- if (!dev->transport->map_task_SG)
- continue;
-
- ret = dev->transport->map_task_SG(task);
- if (ret < 0)
- return ret;
- }
- } else {
- ret = transport_map_control_cmd_to_task(cmd);
- if (ret < 0)
- return ret;
- }
-
- /*
- * For WRITEs, let the iSCSI Target RX Thread know its buffer is ready..
+ * For WRITEs, let the fabric know its buffer is ready..
* This WRITE struct se_cmd (and all of its associated struct se_task's)
* will be added to the struct se_device execution queue after its WRITE
* data has arrived. (ie: It gets handled by the transport processing
@@ -5162,6 +4265,7 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
transport_execute_tasks(cmd);
return 0;
}
+EXPORT_SYMBOL(transport_generic_new_cmd);
/* transport_generic_process_write():
*
@@ -5169,68 +4273,15 @@ static int transport_generic_new_cmd(struct se_cmd *cmd)
*/
void transport_generic_process_write(struct se_cmd *cmd)
{
-#if 0
- /*
- * Copy SCSI Presented DTL sector(s) from received buffers allocated to
- * original EDTL
- */
- if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
- if (!T_TASK(cmd)->t_tasks_se_num) {
- unsigned char *dst, *buf =
- (unsigned char *)T_TASK(cmd)->t_task_buf;
-
- dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
- if (!(dst)) {
- printk(KERN_ERR "Unable to allocate memory for"
- " WRITE underflow\n");
- transport_generic_request_failure(cmd, NULL,
- PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
- return;
- }
- memcpy(dst, buf, cmd->cmd_spdtl);
-
- kfree(T_TASK(cmd)->t_task_buf);
- T_TASK(cmd)->t_task_buf = dst;
- } else {
- struct scatterlist *sg =
- (struct scatterlist *sg)T_TASK(cmd)->t_task_buf;
- struct scatterlist *orig_sg;
-
- orig_sg = kzalloc(sizeof(struct scatterlist) *
- T_TASK(cmd)->t_tasks_se_num,
- GFP_KERNEL))) {
- if (!(orig_sg)) {
- printk(KERN_ERR "Unable to allocate memory"
- " for WRITE underflow\n");
- transport_generic_request_failure(cmd, NULL,
- PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
- return;
- }
-
- memcpy(orig_sg, T_TASK(cmd)->t_task_buf,
- sizeof(struct scatterlist) *
- T_TASK(cmd)->t_tasks_se_num);
-
- cmd->data_length = cmd->cmd_spdtl;
- /*
- * FIXME, clear out original struct se_task and state
- * information.
- */
- if (transport_generic_new_cmd(cmd) < 0) {
- transport_generic_request_failure(cmd, NULL,
- PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
- kfree(orig_sg);
- return;
- }
-
- transport_memcpy_write_sg(cmd, orig_sg);
- }
- }
-#endif
transport_execute_tasks(cmd);
}
EXPORT_SYMBOL(transport_generic_process_write);
+static int transport_write_pending_qf(struct se_cmd *cmd)
+{
+ return cmd->se_tfo->write_pending(cmd);
+}
+
/* transport_generic_write_pending():
*
*
@@ -5240,24 +4291,26 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
unsigned long flags;
int ret;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
cmd->t_state = TRANSPORT_WRITE_PENDING;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- /*
- * For the TCM control CDBs using a contiguous buffer, do the memcpy
- * from the passed Linux/SCSI struct scatterlist located at
- * T_TASK(se_cmd)->t_task_pt_buf to the contiguous buffer at
- * T_TASK(se_cmd)->t_task_buf.
- */
- if (cmd->se_cmd_flags & SCF_PASSTHROUGH_CONTIG_TO_SG)
- transport_memcpy_read_contig(cmd,
- T_TASK(cmd)->t_task_buf,
- T_TASK(cmd)->t_task_pt_sgl);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (cmd->transport_qf_callback) {
+ ret = cmd->transport_qf_callback(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
+ else if (ret < 0)
+ return ret;
+
+ cmd->transport_qf_callback = NULL;
+ return 0;
+ }
+
/*
* Clear the se_cmd for WRITE_PENDING status in order to set
- * T_TASK(cmd)->t_transport_active=0 so that transport_generic_handle_data
+ * cmd->t_transport_active=0 so that transport_generic_handle_data
* can be called from HW target mode interrupt code. This is safe
- * to be called with transport_off=1 before the CMD_TFO(cmd)->write_pending
+ * to be called with transport_off=1 before the cmd->se_tfo->write_pending
* because the se_cmd->se_lun pointer is not being cleared.
*/
transport_cmd_check_stop(cmd, 1, 0);
@@ -5266,26 +4319,30 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
* Call the fabric write_pending function here to let the
* frontend know that WRITE buffers are ready.
*/
- ret = CMD_TFO(cmd)->write_pending(cmd);
- if (ret < 0)
+ ret = cmd->se_tfo->write_pending(cmd);
+ if (ret == -EAGAIN)
+ goto queue_full;
+ else if (ret < 0)
return ret;
return PYX_TRANSPORT_WRITE_PENDING;
+
+queue_full:
+ pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+ cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
+ transport_handle_queue_full(cmd, cmd->se_dev,
+ transport_write_pending_qf);
+ return ret;
}
-/* transport_release_cmd_to_pool():
- *
- *
- */
-void transport_release_cmd_to_pool(struct se_cmd *cmd)
+void transport_release_cmd(struct se_cmd *cmd)
{
- BUG_ON(!T_TASK(cmd));
- BUG_ON(!CMD_TFO(cmd));
+ BUG_ON(!cmd->se_tfo);
transport_free_se_cmd(cmd);
- CMD_TFO(cmd)->release_cmd_to_pool(cmd);
+ cmd->se_tfo->release_cmd(cmd);
}
-EXPORT_SYMBOL(transport_release_cmd_to_pool);
+EXPORT_SYMBOL(transport_release_cmd);
/* transport_generic_free_cmd():
*
@@ -5294,19 +4351,18 @@ EXPORT_SYMBOL(transport_release_cmd_to_pool);
void transport_generic_free_cmd(
struct se_cmd *cmd,
int wait_for_tasks,
- int release_to_pool,
int session_reinstatement)
{
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) || !T_TASK(cmd))
- transport_release_cmd_to_pool(cmd);
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD))
+ transport_release_cmd(cmd);
else {
core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
- if (SE_LUN(cmd)) {
+ if (cmd->se_lun) {
#if 0
- printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
- " SE_LUN(cmd)\n", cmd,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("cmd: %p ITT: 0x%08x contains"
+ " cmd->se_lun\n", cmd,
+ cmd->se_tfo->get_task_tag(cmd));
#endif
transport_lun_remove_cmd(cmd);
}
@@ -5316,8 +4372,7 @@ void transport_generic_free_cmd(
transport_free_dev_tasks(cmd);
- transport_generic_remove(cmd, release_to_pool,
- session_reinstatement);
+ transport_generic_remove(cmd, session_reinstatement);
}
}
EXPORT_SYMBOL(transport_generic_free_cmd);
@@ -5343,43 +4398,36 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
* If the frontend has already requested this struct se_cmd to
* be stopped, we can safely ignore this struct se_cmd.
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- if (atomic_read(&T_TASK(cmd)->t_transport_stop)) {
- atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
- DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
- " TRUE, skipping\n", CMD_TFO(cmd)->get_task_tag(cmd));
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (atomic_read(&cmd->t_transport_stop)) {
+ atomic_set(&cmd->transport_lun_stop, 0);
+ pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
+ " TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
transport_cmd_check_stop(cmd, 1, 0);
- return -1;
+ return -EPERM;
}
- atomic_set(&T_TASK(cmd)->transport_lun_fe_stop, 1);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ atomic_set(&cmd->transport_lun_fe_stop, 1);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
ret = transport_stop_tasks_for_cmd(cmd);
- DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
- " %d\n", cmd, T_TASK(cmd)->t_task_cdbs, ret);
+ pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
+ " %d\n", cmd, cmd->t_task_list_num, ret);
if (!ret) {
- DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
- wait_for_completion(&T_TASK(cmd)->transport_lun_stop_comp);
- DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+ cmd->se_tfo->get_task_tag(cmd));
+ wait_for_completion(&cmd->transport_lun_stop_comp);
+ pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+ cmd->se_tfo->get_task_tag(cmd));
}
- transport_remove_cmd_from_queue(cmd, SE_DEV(cmd)->dev_queue_obj);
+ transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
return 0;
}
-/* #define DEBUG_CLEAR_LUN */
-#ifdef DEBUG_CLEAR_LUN
-#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CLEAR_L(x...)
-#endif
-
static void __transport_clear_lun_from_sessions(struct se_lun *lun)
{
struct se_cmd *cmd = NULL;
@@ -5389,66 +4437,59 @@ static void __transport_clear_lun_from_sessions(struct se_lun *lun)
* Initiator Port.
*/
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
- while (!list_empty_careful(&lun->lun_cmd_list)) {
- cmd = list_entry(lun->lun_cmd_list.next,
- struct se_cmd, se_lun_list);
- list_del(&cmd->se_lun_list);
-
- if (!(T_TASK(cmd))) {
- printk(KERN_ERR "ITT: 0x%08x, T_TASK(cmd) = NULL"
- "[i,t]_state: %u/%u\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
- BUG();
- }
- atomic_set(&T_TASK(cmd)->transport_lun_active, 0);
+ while (!list_empty(&lun->lun_cmd_list)) {
+ cmd = list_first_entry(&lun->lun_cmd_list,
+ struct se_cmd, se_lun_node);
+ list_del(&cmd->se_lun_node);
+
+ atomic_set(&cmd->transport_lun_active, 0);
/*
* This will notify iscsi_target_transport.c:
* transport_cmd_check_stop() that a LUN shutdown is in
* progress for the iscsi_cmd_t.
*/
- spin_lock(&T_TASK(cmd)->t_state_lock);
- DEBUG_CLEAR_L("SE_LUN[%d] - Setting T_TASK(cmd)->transport"
+ spin_lock(&cmd->t_state_lock);
+ pr_debug("SE_LUN[%d] - Setting cmd->transport"
"_lun_stop for ITT: 0x%08x\n",
- SE_LUN(cmd)->unpacked_lun,
- CMD_TFO(cmd)->get_task_tag(cmd));
- atomic_set(&T_TASK(cmd)->transport_lun_stop, 1);
- spin_unlock(&T_TASK(cmd)->t_state_lock);
+ cmd->se_lun->unpacked_lun,
+ cmd->se_tfo->get_task_tag(cmd));
+ atomic_set(&cmd->transport_lun_stop, 1);
+ spin_unlock(&cmd->t_state_lock);
spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
- if (!(SE_LUN(cmd))) {
- printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state);
+ if (!cmd->se_lun) {
+ pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
BUG();
}
/*
* If the Storage engine still owns the iscsi_cmd_t, determine
* and/or stop its context.
*/
- DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
- "_lun_wait_for_tasks()\n", SE_LUN(cmd)->unpacked_lun,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
+ "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
+ cmd->se_tfo->get_task_tag(cmd));
- if (transport_lun_wait_for_tasks(cmd, SE_LUN(cmd)) < 0) {
+ if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
continue;
}
- DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+ pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
"_wait_for_tasks(): SUCCESS\n",
- SE_LUN(cmd)->unpacked_lun,
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_lun->unpacked_lun,
+ cmd->se_tfo->get_task_tag(cmd));
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
- if (!(atomic_read(&T_TASK(cmd)->transport_dev_active))) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
+ if (!atomic_read(&cmd->transport_dev_active)) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
goto check_cond;
}
- atomic_set(&T_TASK(cmd)->transport_dev_active, 0);
+ atomic_set(&cmd->transport_dev_active, 0);
transport_all_task_dev_remove_state(cmd);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
transport_free_dev_tasks(cmd);
/*
@@ -5465,24 +4506,24 @@ check_cond:
* be released, notify the waiting thread now that LU has
* finished accessing it.
*/
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, cmd_flags);
- if (atomic_read(&T_TASK(cmd)->transport_lun_fe_stop)) {
- DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
+ spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
+ if (atomic_read(&cmd->transport_lun_fe_stop)) {
+ pr_debug("SE_LUN[%d] - Detected FE stop for"
" struct se_cmd: %p ITT: 0x%08x\n",
lun->unpacked_lun,
- cmd, CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd, cmd->se_tfo->get_task_tag(cmd));
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
+ spin_unlock_irqrestore(&cmd->t_state_lock,
cmd_flags);
transport_cmd_check_stop(cmd, 1, 0);
- complete(&T_TASK(cmd)->transport_lun_fe_stop_comp);
+ complete(&cmd->transport_lun_fe_stop_comp);
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
continue;
}
- DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
- lun->unpacked_lun, CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+ lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, cmd_flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
}
spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
@@ -5502,11 +4543,11 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)
{
struct task_struct *kt;
- kt = kthread_run(transport_clear_lun_thread, (void *)lun,
+ kt = kthread_run(transport_clear_lun_thread, lun,
"tcm_cl_%u", lun->unpacked_lun);
if (IS_ERR(kt)) {
- printk(KERN_ERR "Unable to start clear_lun thread\n");
- return -1;
+ pr_err("Unable to start clear_lun thread\n");
+ return PTR_ERR(kt);
}
wait_for_completion(&lun->lun_shutdown_comp);
@@ -5528,20 +4569,20 @@ static void transport_generic_wait_for_tasks(
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req))
return;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* If we are already stopped due to an external event (ie: LUN shutdown)
* sleep until the connection can have the passed struct se_cmd back.
- * The T_TASK(cmd)->transport_lun_stopped_sem will be upped by
+ * The cmd->transport_lun_stopped_sem will be upped by
* transport_clear_lun_from_sessions() once the ConfigFS context caller
* has completed its operation on the struct se_cmd.
*/
- if (atomic_read(&T_TASK(cmd)->transport_lun_stop)) {
+ if (atomic_read(&cmd->transport_lun_stop)) {
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
- " wait_for_completion(&T_TASK(cmd)transport_lun_fe"
+ pr_debug("wait_for_tasks: Stopping"
+ " wait_for_completion(&cmd->t_tasktransport_lun_fe"
"_stop_comp); for ITT: 0x%08x\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
/*
* There is a special case for WRITES where a FE exception +
* LUN shutdown means ConfigFS context is still sleeping on
@@ -5549,10 +4590,10 @@ static void transport_generic_wait_for_tasks(
* We go ahead and up transport_lun_stop_comp just to be sure
* here.
*/
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
- complete(&T_TASK(cmd)->transport_lun_stop_comp);
- wait_for_completion(&T_TASK(cmd)->transport_lun_fe_stop_comp);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ complete(&cmd->transport_lun_stop_comp);
+ wait_for_completion(&cmd->transport_lun_fe_stop_comp);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
transport_all_task_dev_remove_state(cmd);
/*
@@ -5560,44 +4601,44 @@ static void transport_generic_wait_for_tasks(
* struct se_cmd, now owns the structure and can be released through
* normal means below.
*/
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
- " wait_for_completion(&T_TASK(cmd)transport_lun_fe_"
+ pr_debug("wait_for_tasks: Stopped"
+ " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
"stop_comp); for ITT: 0x%08x\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->se_tfo->get_task_tag(cmd));
- atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
+ atomic_set(&cmd->transport_lun_stop, 0);
}
- if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
- atomic_read(&T_TASK(cmd)->t_transport_aborted))
+ if (!atomic_read(&cmd->t_transport_active) ||
+ atomic_read(&cmd->t_transport_aborted))
goto remove;
- atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
+ atomic_set(&cmd->t_transport_stop, 1);
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
+ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
- " = TRUE\n", cmd, CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
+ " = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
cmd->deferred_t_state);
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wake_up_interruptible(&SE_DEV(cmd)->dev_queue_obj->thread_wq);
+ wake_up_interruptible(&cmd->se_dev->dev_queue_obj.thread_wq);
- wait_for_completion(&T_TASK(cmd)->t_transport_stop_comp);
+ wait_for_completion(&cmd->t_transport_stop_comp);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_set(&T_TASK(cmd)->t_transport_active, 0);
- atomic_set(&T_TASK(cmd)->t_transport_stop, 0);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_set(&cmd->t_transport_active, 0);
+ atomic_set(&cmd->t_transport_stop, 0);
- DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
- "&T_TASK(cmd)->t_transport_stop_comp) for ITT: 0x%08x\n",
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("wait_for_tasks: Stopped wait_for_compltion("
+ "&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
+ cmd->se_tfo->get_task_tag(cmd));
remove:
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (!remove_cmd)
return;
- transport_generic_free_cmd(cmd, 0, 0, session_reinstatement);
+ transport_generic_free_cmd(cmd, 0, session_reinstatement);
}
static int transport_get_sense_codes(
@@ -5632,13 +4673,13 @@ int transport_send_check_condition_and_sense(
int offset;
u8 asc = 0, ascq = 0;
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return 0;
}
cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
- spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (!reason && from_transport)
goto after_reason;
@@ -5651,7 +4692,7 @@ int transport_send_check_condition_and_sense(
* TRANSPORT_SENSE_BUFFER is now set to SCSI_SENSE_BUFFERSIZE
* from include/scsi/scsi_cmnd.h
*/
- offset = CMD_TFO(cmd)->set_fabric_sense_len(cmd,
+ offset = cmd->se_tfo->set_fabric_sense_len(cmd,
TRANSPORT_SENSE_BUFFER);
/*
* Actual SENSE DATA, see SPC-3 7.23.2 SPC_SENSE_KEY_OFFSET uses
@@ -5788,8 +4829,7 @@ int transport_send_check_condition_and_sense(
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset;
after_reason:
- CMD_TFO(cmd)->queue_status(cmd);
- return 0;
+ return cmd->se_tfo->queue_status(cmd);
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
@@ -5797,18 +4837,18 @@ int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
{
int ret = 0;
- if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
- if (!(send_status) ||
+ if (atomic_read(&cmd->t_transport_aborted) != 0) {
+ if (!send_status ||
(cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
return 1;
#if 0
- printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
" status for CDB: 0x%02x ITT: 0x%08x\n",
- T_TASK(cmd)->t_task_cdb[0],
- CMD_TFO(cmd)->get_task_tag(cmd));
+ cmd->t_task_cdb[0],
+ cmd->se_tfo->get_task_tag(cmd));
#endif
cmd->se_cmd_flags |= SCF_SENT_DELAYED_TAS;
- CMD_TFO(cmd)->queue_status(cmd);
+ cmd->se_tfo->queue_status(cmd);
ret = 1;
}
return ret;
@@ -5824,8 +4864,8 @@ void transport_send_task_abort(struct se_cmd *cmd)
* queued back to fabric module by transport_check_aborted_status().
*/
if (cmd->data_direction == DMA_TO_DEVICE) {
- if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
- atomic_inc(&T_TASK(cmd)->t_transport_aborted);
+ if (cmd->se_tfo->write_pending_status(cmd) != 0) {
+ atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
transport_new_cmd_failure(cmd);
@@ -5834,11 +4874,11 @@ void transport_send_task_abort(struct se_cmd *cmd)
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
#if 0
- printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
- " ITT: 0x%08x\n", T_TASK(cmd)->t_task_cdb[0],
- CMD_TFO(cmd)->get_task_tag(cmd));
+ pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+ " ITT: 0x%08x\n", cmd->t_task_cdb[0],
+ cmd->se_tfo->get_task_tag(cmd));
#endif
- CMD_TFO(cmd)->queue_status(cmd);
+ cmd->se_tfo->queue_status(cmd);
}
/* transport_generic_do_tmr():
@@ -5847,14 +4887,12 @@ void transport_send_task_abort(struct se_cmd *cmd)
*/
int transport_generic_do_tmr(struct se_cmd *cmd)
{
- struct se_cmd *ref_cmd;
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
int ret;
switch (tmr->function) {
case TMR_ABORT_TASK:
- ref_cmd = tmr->ref_cmd;
tmr->response = TMR_FUNCTION_REJECTED;
break;
case TMR_ABORT_TASK_SET:
@@ -5874,14 +4912,14 @@ int transport_generic_do_tmr(struct se_cmd *cmd)
tmr->response = TMR_FUNCTION_REJECTED;
break;
default:
- printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
+ pr_err("Uknown TMR function: 0x%02x.\n",
tmr->function);
tmr->response = TMR_FUNCTION_REJECTED;
break;
}
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
- CMD_TFO(cmd)->queue_tm_rsp(cmd);
+ cmd->se_tfo->queue_tm_rsp(cmd);
transport_cmd_check_stop(cmd, 2, 0);
return 0;
@@ -5911,62 +4949,54 @@ transport_get_task_from_state_list(struct se_device *dev)
static void transport_processing_shutdown(struct se_device *dev)
{
struct se_cmd *cmd;
- struct se_queue_req *qr;
struct se_task *task;
- u8 state;
unsigned long flags;
/*
* Empty the struct se_device's struct se_task state list.
*/
spin_lock_irqsave(&dev->execute_task_lock, flags);
while ((task = transport_get_task_from_state_list(dev))) {
- if (!(TASK_CMD(task))) {
- printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
+ if (!task->task_se_cmd) {
+ pr_err("task->task_se_cmd is NULL!\n");
continue;
}
- cmd = TASK_CMD(task);
+ cmd = task->task_se_cmd;
- if (!T_TASK(cmd)) {
- printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
- " %p ITT: 0x%08x\n", task, cmd,
- CMD_TFO(cmd)->get_task_tag(cmd));
- continue;
- }
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
- DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
- " i_state/def_i_state: %d/%d, t_state/def_t_state:"
+ pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
+ " i_state: %d, t_state/def_t_state:"
" %d/%d cdb: 0x%02x\n", cmd, task,
- CMD_TFO(cmd)->get_task_tag(cmd), cmd->cmd_sn,
- CMD_TFO(cmd)->get_cmd_state(cmd), cmd->deferred_i_state,
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd),
cmd->t_state, cmd->deferred_t_state,
- T_TASK(cmd)->t_task_cdb[0]);
- DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
+ cmd->t_task_cdb[0]);
+ pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n",
- CMD_TFO(cmd)->get_task_tag(cmd),
- T_TASK(cmd)->t_task_cdbs,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
- atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
- atomic_read(&T_TASK(cmd)->t_transport_active),
- atomic_read(&T_TASK(cmd)->t_transport_stop),
- atomic_read(&T_TASK(cmd)->t_transport_sent));
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->t_task_list_num,
+ atomic_read(&cmd->t_task_cdbs_left),
+ atomic_read(&cmd->t_task_cdbs_sent),
+ atomic_read(&cmd->t_transport_active),
+ atomic_read(&cmd->t_transport_stop),
+ atomic_read(&cmd->t_transport_sent));
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- DEBUG_DO("Waiting for task: %p to shutdown for dev:"
+ pr_debug("Waiting for task: %p to shutdown for dev:"
" %p\n", task, dev);
wait_for_completion(&task->task_stop_comp);
- DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
+ pr_debug("Completed task: %p shutdown for dev: %p\n",
task, dev);
- spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
- atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ atomic_dec(&cmd->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
@@ -5976,72 +5006,72 @@ static void transport_processing_shutdown(struct se_device *dev)
}
__transport_stop_task_timer(task, &flags);
- if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
+ if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
- DEBUG_DO("Skipping task: %p, dev: %p for"
+ pr_debug("Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
- atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
+ atomic_read(&cmd->t_task_cdbs_ex_left));
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
- DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
+ if (atomic_read(&cmd->t_transport_active)) {
+ pr_debug("got t_transport_active = 1 for task: %p, dev:"
" %p\n", task, dev);
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ if (atomic_read(&cmd->t_fe_count)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_send_check_condition_and_sense(
cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE,
0);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop(cmd, 1, 0);
} else {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
- DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
+ pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
task, dev);
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ if (atomic_read(&cmd->t_fe_count)) {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_send_check_condition_and_sense(cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
transport_cmd_check_stop(cmd, 1, 0);
} else {
spin_unlock_irqrestore(
- &T_TASK(cmd)->t_state_lock, flags);
+ &cmd->t_state_lock, flags);
transport_remove_cmd_from_queue(cmd,
- SE_DEV(cmd)->dev_queue_obj);
+ &cmd->se_dev->dev_queue_obj);
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
spin_lock_irqsave(&dev->execute_task_lock, flags);
@@ -6050,18 +5080,12 @@ static void transport_processing_shutdown(struct se_device *dev)
/*
* Empty the struct se_device's struct se_cmd list.
*/
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
- while ((qr = __transport_get_qr_from_queue(dev->dev_queue_obj))) {
- spin_unlock_irqrestore(
- &dev->dev_queue_obj->cmd_queue_lock, flags);
- cmd = (struct se_cmd *)qr->cmd;
- state = qr->state;
- kfree(qr);
-
- DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
- cmd, state);
-
- if (atomic_read(&T_TASK(cmd)->t_fe_count)) {
+ while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
+
+ pr_debug("From Device Queue: cmd: %p t_state: %d\n",
+ cmd, cmd->t_state);
+
+ if (atomic_read(&cmd->t_fe_count)) {
transport_send_check_condition_and_sense(cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -6070,11 +5094,9 @@ static void transport_processing_shutdown(struct se_device *dev)
} else {
transport_lun_remove_cmd(cmd);
if (transport_cmd_check_stop(cmd, 1, 0))
- transport_generic_remove(cmd, 0, 0);
+ transport_generic_remove(cmd, 0);
}
- spin_lock_irqsave(&dev->dev_queue_obj->cmd_queue_lock, flags);
}
- spin_unlock_irqrestore(&dev->dev_queue_obj->cmd_queue_lock, flags);
}
/* transport_processing_thread():
@@ -6083,16 +5105,15 @@ static void transport_processing_shutdown(struct se_device *dev)
*/
static int transport_processing_thread(void *param)
{
- int ret, t_state;
+ int ret;
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
- struct se_queue_req *qr;
set_user_nice(current, -20);
while (!kthread_should_stop()) {
- ret = wait_event_interruptible(dev->dev_queue_obj->thread_wq,
- atomic_read(&dev->dev_queue_obj->queue_cnt) ||
+ ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
+ atomic_read(&dev->dev_queue_obj.queue_cnt) ||
kthread_should_stop());
if (ret < 0)
goto out;
@@ -6108,22 +5129,18 @@ static int transport_processing_thread(void *param)
get_cmd:
__transport_execute_tasks(dev);
- qr = transport_get_qr_from_queue(dev->dev_queue_obj);
- if (!(qr))
+ cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
+ if (!cmd)
continue;
- cmd = (struct se_cmd *)qr->cmd;
- t_state = qr->state;
- kfree(qr);
-
- switch (t_state) {
+ switch (cmd->t_state) {
case TRANSPORT_NEW_CMD_MAP:
- if (!(CMD_TFO(cmd)->new_cmd_map)) {
- printk(KERN_ERR "CMD_TFO(cmd)->new_cmd_map is"
+ if (!cmd->se_tfo->new_cmd_map) {
+ pr_err("cmd->se_tfo->new_cmd_map is"
" NULL for TRANSPORT_NEW_CMD_MAP\n");
BUG();
}
- ret = CMD_TFO(cmd)->new_cmd_map(cmd);
+ ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd, NULL,
@@ -6134,7 +5151,9 @@ get_cmd:
/* Fall through */
case TRANSPORT_NEW_CMD:
ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
+ if (ret == -EAGAIN)
+ break;
+ else if (ret < 0) {
cmd->transport_error_status = ret;
transport_generic_request_failure(cmd, NULL,
0, (cmd->data_direction !=
@@ -6149,10 +5168,10 @@ get_cmd:
transport_generic_complete_ok(cmd);
break;
case TRANSPORT_REMOVE:
- transport_generic_remove(cmd, 1, 0);
+ transport_generic_remove(cmd, 0);
break;
case TRANSPORT_FREE_CMD_INTR:
- transport_generic_free_cmd(cmd, 0, 1, 0);
+ transport_generic_free_cmd(cmd, 0, 0);
break;
case TRANSPORT_PROCESS_TMR:
transport_generic_do_tmr(cmd);
@@ -6164,13 +5183,16 @@ get_cmd:
transport_stop_all_task_timers(cmd);
transport_generic_request_timeout(cmd);
break;
+ case TRANSPORT_COMPLETE_QF_WP:
+ transport_generic_write_pending(cmd);
+ break;
default:
- printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
+ pr_err("Unknown t_state: %d deferred_t_state:"
" %d for ITT: 0x%08x i_state: %d on SE LUN:"
- " %u\n", t_state, cmd->deferred_t_state,
- CMD_TFO(cmd)->get_task_tag(cmd),
- CMD_TFO(cmd)->get_cmd_state(cmd),
- SE_LUN(cmd)->unpacked_lun);
+ " %u\n", cmd->t_state, cmd->deferred_t_state,
+ cmd->se_tfo->get_task_tag(cmd),
+ cmd->se_tfo->get_cmd_state(cmd),
+ cmd->se_lun->unpacked_lun);
BUG();
}
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index df355176a37..31e3c652527 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -49,15 +49,15 @@ int core_scsi3_ua_check(
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
- if (!(sess))
+ if (!sess)
return 0;
nacl = sess->se_node_acl;
- if (!(nacl))
+ if (!nacl)
return 0;
deve = &nacl->device_list[cmd->orig_fe_lun];
- if (!(atomic_read(&deve->ua_count)))
+ if (!atomic_read(&deve->ua_count))
return 0;
/*
* From sam4r14, section 5.14 Unit attention condition:
@@ -80,10 +80,10 @@ int core_scsi3_ua_check(
case REQUEST_SENSE:
return 0;
default:
- return -1;
+ return -EINVAL;
}
- return -1;
+ return -EINVAL;
}
int core_scsi3_ua_allocate(
@@ -97,13 +97,13 @@ int core_scsi3_ua_allocate(
/*
* PASSTHROUGH OPS
*/
- if (!(nacl))
- return -1;
+ if (!nacl)
+ return -EINVAL;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
- if (!(ua)) {
- printk(KERN_ERR "Unable to allocate struct se_ua\n");
- return -1;
+ if (!ua) {
+ pr_err("Unable to allocate struct se_ua\n");
+ return -ENOMEM;
}
INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list);
@@ -177,9 +177,9 @@ int core_scsi3_ua_allocate(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
- printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
+ pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n",
- TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
+ nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
asc, ascq);
atomic_inc(&deve->ua_count);
@@ -208,23 +208,23 @@ void core_scsi3_ua_for_check_condition(
u8 *asc,
u8 *ascq)
{
- struct se_device *dev = SE_DEV(cmd);
+ struct se_device *dev = cmd->se_dev;
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
- if (!(sess))
+ if (!sess)
return;
nacl = sess->se_node_acl;
- if (!(nacl))
+ if (!nacl)
return;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
- if (!(atomic_read(&deve->ua_count))) {
+ if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
return;
}
@@ -240,7 +240,7 @@ void core_scsi3_ua_for_check_condition(
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
- if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
+ if (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
@@ -264,13 +264,13 @@ void core_scsi3_ua_for_check_condition(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
- printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
+ pr_debug("[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
- TPG_TFO(nacl->se_tpg)->get_fabric_name(),
- (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
- "Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
- cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
+ nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
+ (dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl != 0) ? "Reporting" :
+ "Releasing", dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl,
+ cmd->orig_fe_lun, cmd->t_task_cdb[0], *asc, *ascq);
}
int core_scsi3_ua_clear_for_request_sense(
@@ -284,18 +284,18 @@ int core_scsi3_ua_clear_for_request_sense(
struct se_ua *ua = NULL, *ua_p;
int head = 1;
- if (!(sess))
- return -1;
+ if (!sess)
+ return -EINVAL;
nacl = sess->se_node_acl;
- if (!(nacl))
- return -1;
+ if (!nacl)
+ return -EINVAL;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
- if (!(atomic_read(&deve->ua_count))) {
+ if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
- return -1;
+ return -EPERM;
}
/*
* The highest priority Unit Attentions are placed at the head of the
@@ -323,10 +323,10 @@ int core_scsi3_ua_clear_for_request_sense(
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
- printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
+ pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
- " ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
+ " ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq);
- return (head) ? -1 : 0;
+ return (head) ? -EPERM : 0;
}
diff --git a/drivers/target/tcm_fc/Makefile b/drivers/target/tcm_fc/Makefile
index 7a5c2b64cf6..20b14bb087c 100644
--- a/drivers/target/tcm_fc/Makefile
+++ b/drivers/target/tcm_fc/Makefile
@@ -1,15 +1,6 @@
-EXTRA_CFLAGS += -I$(srctree)/drivers/target/ \
- -I$(srctree)/drivers/scsi/ \
- -I$(srctree)/include/scsi/ \
- -I$(srctree)/drivers/target/tcm_fc/
-
-tcm_fc-y += tfc_cmd.o \
- tfc_conf.o \
- tfc_io.o \
- tfc_sess.o
+tcm_fc-y += tfc_cmd.o \
+ tfc_conf.o \
+ tfc_io.o \
+ tfc_sess.o
obj-$(CONFIG_TCM_FC) += tcm_fc.o
-
-ifdef CONFIGFS_TCM_FC_DEBUG
-EXTRA_CFLAGS += -DTCM_FC_DEBUG
-endif
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 7b82f1b7fef..f7fff7ed63c 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -23,30 +23,6 @@
#define FT_TPG_NAMELEN 32 /* max length of TPG name */
#define FT_LUN_NAMELEN 32 /* max length of LUN name */
-/*
- * Debug options.
- */
-#define FT_DEBUG_CONF 0x01 /* configuration messages */
-#define FT_DEBUG_SESS 0x02 /* session messages */
-#define FT_DEBUG_TM 0x04 /* TM operations */
-#define FT_DEBUG_IO 0x08 /* I/O commands */
-#define FT_DEBUG_DATA 0x10 /* Data transfer */
-
-extern unsigned int ft_debug_logging; /* debug options */
-
-#define FT_DEBUG(mask, fmt, args...) \
- do { \
- if (ft_debug_logging & (mask)) \
- printk(KERN_INFO "tcm_fc: %s: " fmt, \
- __func__, ##args); \
- } while (0)
-
-#define FT_CONF_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
-#define FT_SESS_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
-#define FT_TM_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
-#define FT_IO_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
-#define FT_DATA_DBG(fmt, args...) FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
-
struct ft_transport_id {
__u8 format;
__u8 __resvd1[7];
@@ -195,7 +171,6 @@ int ft_write_pending(struct se_cmd *);
int ft_write_pending_status(struct se_cmd *);
u32 ft_get_task_tag(struct se_cmd *);
int ft_get_cmd_state(struct se_cmd *);
-void ft_new_cmd_failure(struct se_cmd *);
int ft_queue_tm_resp(struct se_cmd *);
int ft_is_state_remove(struct se_cmd *);
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index b2a106729d4..a9e9a31da11 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -59,33 +59,30 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
struct fc_exch *ep;
struct fc_seq *sp;
struct se_cmd *se_cmd;
- struct se_mem *mem;
- struct se_transport_task *task;
-
- if (!(ft_debug_logging & FT_DEBUG_IO))
- return;
+ struct scatterlist *sg;
+ int count;
se_cmd = &cmd->se_cmd;
- printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
+ pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
- printk(KERN_INFO "%s: cmd %p cdb %p\n",
+ pr_debug("%s: cmd %p cdb %p\n",
caller, cmd, cmd->cdb);
- printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
-
- task = T_TASK(se_cmd);
- printk(KERN_INFO "%s: cmd %p task %p se_num %u buf %p len %u se_cmd_flags <0x%x>\n",
- caller, cmd, task, task->t_tasks_se_num,
- task->t_task_buf, se_cmd->data_length, se_cmd->se_cmd_flags);
- if (task->t_mem_list)
- list_for_each_entry(mem, task->t_mem_list, se_list)
- printk(KERN_INFO "%s: cmd %p mem %p page %p "
- "len 0x%x off 0x%x\n",
- caller, cmd, mem,
- mem->se_page, mem->se_len, mem->se_off);
+ pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
+
+ pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
+ caller, cmd, se_cmd->t_data_nents,
+ se_cmd->data_length, se_cmd->se_cmd_flags);
+
+ for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
+ pr_debug("%s: cmd %p sg %p page %p "
+ "len 0x%x off 0x%x\n",
+ caller, cmd, sg,
+ sg_page(sg), sg->length, sg->offset);
+
sp = cmd->seq;
if (sp) {
ep = fc_seq_exch(sp);
- printk(KERN_INFO "%s: cmd %p sid %x did %x "
+ pr_debug("%s: cmd %p sid %x did %x "
"ox_id %x rx_id %x seq_id %x e_stat %x\n",
caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
sp->id, ep->esb_stat);
@@ -96,15 +93,17 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
{
- struct se_queue_obj *qobj;
+ struct ft_tpg *tpg = sess->tport->tpg;
+ struct se_queue_obj *qobj = &tpg->qobj;
unsigned long flags;
qobj = &sess->tport->tpg->qobj;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
- spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
atomic_inc(&qobj->queue_cnt);
- wake_up_interruptible(&qobj->thread_wq);
+ spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
+
+ wake_up_process(tpg->thread);
}
static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
@@ -149,7 +148,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)
void ft_check_stop_free(struct se_cmd *se_cmd)
{
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
}
/*
@@ -256,15 +255,14 @@ int ft_write_pending(struct se_cmd *se_cmd)
(fh->fh_r_ctl == FC_RCTL_DD_DATA_DESC)) {
if (se_cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
/*
- * Map se_mem list to scatterlist, so that
- * DDP can be setup. DDP setup function require
- * scatterlist. se_mem_list is internal to
- * TCM/LIO target
+ * cmd may have been broken up into multiple
+ * tasks. Link their sgs together so we can
+ * operate on them all at once.
*/
transport_do_task_sg_chain(se_cmd);
- cmd->sg = T_TASK(se_cmd)->t_tasks_sg_chained;
+ cmd->sg = se_cmd->t_tasks_sg_chained;
cmd->sg_cnt =
- T_TASK(se_cmd)->t_tasks_sg_chained_no;
+ se_cmd->t_tasks_sg_chained_no;
}
if (cmd->sg && lport->tt.ddp_setup(lport, ep->xid,
cmd->sg, cmd->sg_cnt))
@@ -294,12 +292,6 @@ int ft_is_state_remove(struct se_cmd *se_cmd)
return 0; /* XXX TBD */
}
-void ft_new_cmd_failure(struct se_cmd *se_cmd)
-{
- /* XXX TBD */
- printk(KERN_INFO "%s: se_cmd %p\n", __func__, se_cmd);
-}
-
/*
* FC sequence response handler for follow-on sequences (data) and aborts.
*/
@@ -312,7 +304,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
/* XXX need to find cmd if queued */
cmd->se_cmd.t_state = TRANSPORT_REMOVE;
cmd->seq = NULL;
- transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
return;
}
@@ -326,10 +318,10 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)
case FC_RCTL_DD_SOL_CTL: /* transfer ready */
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
default:
- printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
+ pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
fc_frame_free(fp);
- transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
break;
}
}
@@ -351,7 +343,7 @@ static void ft_send_resp_status(struct fc_lport *lport,
struct fcp_resp_rsp_info *info;
fh = fc_frame_header_get(rx_fp);
- FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
+ pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
len = sizeof(*fcp);
if (status == SAM_STAT_GOOD)
@@ -421,15 +413,15 @@ static void ft_send_tm(struct ft_cmd *cmd)
* FCP4r01 indicates having a combination of
* tm_flags set is invalid.
*/
- FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
+ pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
return;
}
- FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
+ pr_debug("alloc tm cmd fn %d\n", tm_func);
tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
if (!tmr) {
- FT_TM_DBG("alloc failed\n");
+ pr_debug("alloc failed\n");
ft_send_resp_code(cmd, FCP_TMF_FAILED);
return;
}
@@ -438,20 +430,20 @@ static void ft_send_tm(struct ft_cmd *cmd)
switch (fcp->fc_tm_flags) {
case FCP_TMF_LUN_RESET:
cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
- if (transport_get_lun_for_tmr(&cmd->se_cmd, cmd->lun) < 0) {
+ if (transport_lookup_tmr_lun(&cmd->se_cmd, cmd->lun) < 0) {
/*
* Make sure to clean up newly allocated TMR request
* since "unable to handle TMR request because failed
* to get to LUN"
*/
- FT_TM_DBG("Failed to get LUN for TMR func %d, "
+ pr_debug("Failed to get LUN for TMR func %d, "
"se_cmd %p, unpacked_lun %d\n",
tm_func, &cmd->se_cmd, cmd->lun);
ft_dump_cmd(cmd, __func__);
sess = cmd->sess;
transport_send_check_condition_and_sense(&cmd->se_cmd,
cmd->se_cmd.scsi_sense_reason, 0);
- transport_generic_free_cmd(&cmd->se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
ft_sess_put(sess);
return;
}
@@ -495,7 +487,7 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)
code = FCP_TMF_FAILED;
break;
}
- FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
+ pr_debug("tmr fn %d resp %d fcp code %d\n",
tmr->function, tmr->response, code);
ft_send_resp_code(cmd, code);
return 0;
@@ -523,7 +515,7 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)
return;
busy:
- FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
+ pr_debug("cmd or seq allocation failure - sending BUSY\n");
ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
@@ -548,7 +540,7 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)
case FC_RCTL_DD_DATA_DESC: /* transfer ready */
case FC_RCTL_ELS4_REQ: /* SRR, perhaps */
default:
- printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
+ pr_debug("%s: unhandled frame r_ctl %x\n",
__func__, fh->fh_r_ctl);
fc_frame_free(fp);
ft_sess_put(sess); /* undo get from lookup */
@@ -637,7 +629,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
- ret = transport_get_lun_for_cmd(&cmd->se_cmd, NULL, cmd->lun);
+ ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
if (ret < 0) {
ft_dump_cmd(cmd, __func__);
transport_send_check_condition_and_sense(&cmd->se_cmd,
@@ -647,22 +639,22 @@ static void ft_send_cmd(struct ft_cmd *cmd)
ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
- FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
+ pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
ft_dump_cmd(cmd, __func__);
- if (ret == -1) {
+ if (ret == -ENOMEM) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
return;
}
- if (ret == -2) {
+ if (ret == -EINVAL) {
if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
ft_queue_status(se_cmd);
else
transport_send_check_condition_and_sense(se_cmd,
se_cmd->scsi_sense_reason, 0);
- transport_generic_free_cmd(se_cmd, 0, 1, 0);
+ transport_generic_free_cmd(se_cmd, 0, 0);
return;
}
transport_generic_handle_cdb(se_cmd);
@@ -670,7 +662,6 @@ static void ft_send_cmd(struct ft_cmd *cmd)
err:
ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
- return;
}
/*
@@ -678,7 +669,7 @@ err:
*/
static void ft_exec_req(struct ft_cmd *cmd)
{
- FT_IO_DBG("cmd state %x\n", cmd->state);
+ pr_debug("cmd state %x\n", cmd->state);
switch (cmd->state) {
case FC_CMD_ST_NEW:
ft_send_cmd(cmd);
@@ -697,15 +688,12 @@ int ft_thread(void *arg)
struct ft_tpg *tpg = arg;
struct se_queue_obj *qobj = &tpg->qobj;
struct ft_cmd *cmd;
- int ret;
-
- set_user_nice(current, -20);
while (!kthread_should_stop()) {
- ret = wait_event_interruptible(qobj->thread_wq,
- atomic_read(&qobj->queue_cnt) || kthread_should_stop());
- if (ret < 0 || kthread_should_stop())
+ schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
+ if (kthread_should_stop())
goto out;
+
cmd = ft_dequeue_cmd(qobj);
if (cmd)
ft_exec_req(cmd);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 84e868c255d..d63e3dd3b18 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -106,7 +106,7 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)
}
err = 4;
fail:
- FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
+ pr_debug("err %u len %zu pos %u byte %u\n",
err, cp - name, pos, byte);
return -1;
}
@@ -216,14 +216,14 @@ static struct se_node_acl *ft_add_acl(
u64 wwpn;
u32 q_depth;
- FT_CONF_DBG("add acl %s\n", name);
+ pr_debug("add acl %s\n", name);
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR(-EINVAL);
acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
- if (!(acl))
+ if (!acl)
return ERR_PTR(-ENOMEM);
acl->node_auth.port_name = wwpn;
@@ -239,11 +239,11 @@ static void ft_del_acl(struct se_node_acl *se_acl)
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
- FT_CONF_DBG("del acl %s\n",
+ pr_debug("del acl %s\n",
config_item_name(&se_acl->acl_group.cg_item));
tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
- FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
+ pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
acl, se_acl, tpg, &tpg->se_tpg);
core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
@@ -260,11 +260,11 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
spin_lock_bh(&se_tpg->acl_node_lock);
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
- FT_CONF_DBG("acl %p port_name %llx\n",
+ pr_debug("acl %p port_name %llx\n",
acl, (unsigned long long)acl->node_auth.port_name);
if (acl->node_auth.port_name == rdata->ids.port_name ||
acl->node_auth.node_name == rdata->ids.node_name) {
- FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
+ pr_debug("acl %p port_name %llx matched\n", acl,
(unsigned long long)rdata->ids.port_name);
found = acl;
/* XXX need to hold onto ACL */
@@ -280,11 +280,11 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
struct ft_node_acl *acl;
acl = kzalloc(sizeof(*acl), GFP_KERNEL);
- if (!(acl)) {
- printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
+ if (!acl) {
+ pr_err("Unable to allocate struct ft_node_acl\n");
return NULL;
}
- FT_CONF_DBG("acl %p\n", acl);
+ pr_debug("acl %p\n", acl);
return &acl->se_node_acl;
}
@@ -294,7 +294,7 @@ static void ft_tpg_release_fabric_acl(struct se_portal_group *se_tpg,
struct ft_node_acl *acl = container_of(se_acl,
struct ft_node_acl, se_node_acl);
- FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
+ pr_debug("acl %p\n", acl);
kfree(acl);
}
@@ -311,7 +311,7 @@ static struct se_portal_group *ft_add_tpg(
unsigned long index;
int ret;
- FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
+ pr_debug("tcm_fc: add tpg %s\n", name);
/*
* Name must be "tpgt_" followed by the index.
@@ -331,7 +331,7 @@ static struct se_portal_group *ft_add_tpg(
transport_init_queue_obj(&tpg->qobj);
ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,
- (void *)tpg, TRANSPORT_TPG_TYPE_NORMAL);
+ tpg, TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0) {
kfree(tpg);
return NULL;
@@ -354,7 +354,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)
{
struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
- FT_CONF_DBG("del tpg %s\n",
+ pr_debug("del tpg %s\n",
config_item_name(&tpg->se_tpg.tpg_group.cg_item));
kthread_stop(tpg->thread);
@@ -412,7 +412,7 @@ static struct se_wwn *ft_add_lport(
struct ft_lport_acl *old_lacl;
u64 wwpn;
- FT_CONF_DBG("add lport %s\n", name);
+ pr_debug("add lport %s\n", name);
if (ft_parse_wwn(name, &wwpn, 1) < 0)
return NULL;
lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
@@ -441,7 +441,7 @@ static void ft_del_lport(struct se_wwn *wwn)
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
- FT_CONF_DBG("del lport %s\n",
+ pr_debug("del lport %s\n",
config_item_name(&wwn->wwn_group.cg_item));
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
@@ -536,8 +536,7 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.tpg_release_fabric_acl = ft_tpg_release_fabric_acl,
.tpg_get_inst_index = ft_tpg_get_inst_index,
.check_stop_free = ft_check_stop_free,
- .release_cmd_to_pool = ft_release_cmd,
- .release_cmd_direct = ft_release_cmd,
+ .release_cmd = ft_release_cmd,
.shutdown_session = ft_sess_shutdown,
.close_session = ft_sess_close,
.stop_session = ft_sess_stop,
@@ -550,7 +549,6 @@ static struct target_core_fabric_ops ft_fabric_ops = {
.set_default_node_attributes = ft_set_default_node_attr,
.get_task_tag = ft_get_task_tag,
.get_cmd_state = ft_get_cmd_state,
- .new_cmd_failure = ft_new_cmd_failure,
.queue_data_in = ft_queue_data_in,
.queue_status = ft_queue_status,
.queue_tm_rsp = ft_queue_tm_resp,
@@ -582,10 +580,10 @@ int ft_register_configfs(void)
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
- if (!fabric) {
- printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
+ if (IS_ERR(fabric)) {
+ pr_err("%s: target_fabric_configfs_init() failed!\n",
__func__);
- return -1;
+ return PTR_ERR(fabric);
}
fabric->tf_ops = ft_fabric_ops;
@@ -610,11 +608,8 @@ int ft_register_configfs(void)
*/
ret = target_fabric_configfs_register(fabric);
if (ret < 0) {
- FT_CONF_DBG("target_fabric_configfs_register() for"
+ pr_debug("target_fabric_configfs_register() for"
" FC Target failed!\n");
- printk(KERN_INFO
- "%s: target_fabric_configfs_register() failed!\n",
- __func__);
target_fabric_configfs_free(fabric);
return -1;
}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 8c4a24077d9..11e6483fc12 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -39,6 +39,7 @@
#include <linux/configfs.h>
#include <linux/ctype.h>
#include <linux/hash.h>
+#include <linux/ratelimit.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -65,21 +66,20 @@
int ft_queue_data_in(struct se_cmd *se_cmd)
{
struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
- struct se_transport_task *task;
struct fc_frame *fp = NULL;
struct fc_exch *ep;
struct fc_lport *lport;
- struct se_mem *mem;
+ struct scatterlist *sg = NULL;
size_t remaining;
u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
- u32 mem_off;
+ u32 mem_off = 0;
u32 fh_off = 0;
u32 frame_off = 0;
size_t frame_len = 0;
- size_t mem_len;
+ size_t mem_len = 0;
size_t tlen;
size_t off_in_page;
- struct page *page;
+ struct page *page = NULL;
int use_sg;
int error;
void *page_addr;
@@ -90,24 +90,17 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
lport = ep->lp;
cmd->seq = lport->tt.seq_start_next(cmd->seq);
- task = T_TASK(se_cmd);
- BUG_ON(!task);
remaining = se_cmd->data_length;
/*
- * Setup to use first mem list entry if any.
+ * Setup to use first mem list entry, unless no data.
*/
- if (task->t_tasks_se_num) {
- mem = list_first_entry(task->t_mem_list,
- struct se_mem, se_list);
- mem_len = mem->se_len;
- mem_off = mem->se_off;
- page = mem->se_page;
- } else {
- mem = NULL;
- mem_len = remaining;
- mem_off = 0;
- page = NULL;
+ BUG_ON(remaining && !se_cmd->t_data_sg);
+ if (remaining) {
+ sg = se_cmd->t_data_sg;
+ mem_len = sg->length;
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
/* no scatter/gather in skb for odd word length due to fc_seq_send() */
@@ -115,12 +108,10 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
while (remaining) {
if (!mem_len) {
- BUG_ON(!mem);
- mem = list_entry(mem->se_list.next,
- struct se_mem, se_list);
- mem_len = min((size_t)mem->se_len, remaining);
- mem_off = mem->se_off;
- page = mem->se_page;
+ sg = sg_next(sg);
+ mem_len = min((size_t)sg->length, remaining);
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
if (!frame_len) {
/*
@@ -148,18 +139,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
tlen = min(mem_len, frame_len);
if (use_sg) {
- if (!mem) {
- BUG_ON(!task->t_task_buf);
- page_addr = task->t_task_buf + mem_off;
- /*
- * In this case, offset is 'offset_in_page' of
- * (t_task_buf + mem_off) instead of 'mem_off'.
- */
- off_in_page = offset_in_page(page_addr);
- page = virt_to_page(page_addr);
- tlen = min(tlen, PAGE_SIZE - off_in_page);
- } else
- off_in_page = mem_off;
+ off_in_page = mem_off;
BUG_ON(!page);
get_page(page);
skb_fill_page_desc(fp_skb(fp),
@@ -169,7 +149,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
fp_skb(fp)->data_len += tlen;
fp_skb(fp)->truesize +=
PAGE_SIZE << compound_order(page);
- } else if (mem) {
+ } else {
BUG_ON(!page);
from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
KM_SOFTIRQ0);
@@ -180,10 +160,6 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
memcpy(to, from, tlen);
kunmap_atomic(page_addr, KM_SOFTIRQ0);
to += tlen;
- } else {
- from = task->t_task_buf + mem_off;
- memcpy(to, from, tlen);
- to += tlen;
}
mem_off += tlen;
@@ -201,8 +177,7 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
error = lport->tt.seq_send(lport, cmd->seq, fp);
if (error) {
/* XXX For now, initiator will retry */
- if (printk_ratelimit())
- printk(KERN_ERR "%s: Failed to send frame %p, "
+ pr_err_ratelimited("%s: Failed to send frame %p, "
"xid <0x%x>, remaining %zu, "
"lso_max <0x%x>\n",
__func__, fp, ep->xid,
@@ -221,24 +196,20 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
struct fc_seq *seq = cmd->seq;
struct fc_exch *ep;
struct fc_lport *lport;
- struct se_transport_task *task;
struct fc_frame_header *fh;
- struct se_mem *mem;
- u32 mem_off;
+ struct scatterlist *sg = NULL;
+ u32 mem_off = 0;
u32 rel_off;
size_t frame_len;
- size_t mem_len;
+ size_t mem_len = 0;
size_t tlen;
- struct page *page;
+ struct page *page = NULL;
void *page_addr;
void *from;
void *to;
u32 f_ctl;
void *buf;
- task = T_TASK(se_cmd);
- BUG_ON(!task);
-
fh = fc_frame_header_get(fp);
if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
goto drop;
@@ -251,7 +222,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
*/
buf = fc_frame_payload_get(fp, 1);
if (cmd->was_ddp_setup && buf) {
- printk(KERN_INFO "%s: When DDP was setup, not expected to"
+ pr_debug("%s: When DDP was setup, not expected to"
"receive frame with payload, Payload shall be"
"copied directly to buffer instead of coming "
"via. legacy receive queues\n", __func__);
@@ -289,7 +260,7 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
* this point, but just in case if required in future
* for debugging or any other purpose
*/
- printk(KERN_ERR "%s: Received frame with TSI bit not"
+ pr_err("%s: Received frame with TSI bit not"
" being SET, dropping the frame, "
"cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
__func__, cmd->sg, cmd->sg_cnt);
@@ -312,29 +283,22 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
frame_len = se_cmd->data_length - rel_off;
/*
- * Setup to use first mem list entry if any.
+ * Setup to use first mem list entry, unless no data.
*/
- if (task->t_tasks_se_num) {
- mem = list_first_entry(task->t_mem_list,
- struct se_mem, se_list);
- mem_len = mem->se_len;
- mem_off = mem->se_off;
- page = mem->se_page;
- } else {
- mem = NULL;
- page = NULL;
- mem_off = 0;
- mem_len = frame_len;
+ BUG_ON(frame_len && !se_cmd->t_data_sg);
+ if (frame_len) {
+ sg = se_cmd->t_data_sg;
+ mem_len = sg->length;
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
while (frame_len) {
if (!mem_len) {
- BUG_ON(!mem);
- mem = list_entry(mem->se_list.next,
- struct se_mem, se_list);
- mem_len = mem->se_len;
- mem_off = mem->se_off;
- page = mem->se_page;
+ sg = sg_next(sg);
+ mem_len = sg->length;
+ mem_off = sg->offset;
+ page = sg_page(sg);
}
if (rel_off >= mem_len) {
rel_off -= mem_len;
@@ -347,19 +311,15 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
tlen = min(mem_len, frame_len);
- if (mem) {
- to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
- KM_SOFTIRQ0);
- page_addr = to;
- to += mem_off & ~PAGE_MASK;
- tlen = min(tlen, (size_t)(PAGE_SIZE -
- (mem_off & ~PAGE_MASK)));
- memcpy(to, from, tlen);
- kunmap_atomic(page_addr, KM_SOFTIRQ0);
- } else {
- to = task->t_task_buf + mem_off;
- memcpy(to, from, tlen);
- }
+ to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
+ KM_SOFTIRQ0);
+ page_addr = to;
+ to += mem_off & ~PAGE_MASK;
+ tlen = min(tlen, (size_t)(PAGE_SIZE -
+ (mem_off & ~PAGE_MASK)));
+ memcpy(to, from, tlen);
+ kunmap_atomic(page_addr, KM_SOFTIRQ0);
+
from += tlen;
frame_len -= tlen;
mem_off += tlen;
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 7491e21cc6a..fbcbb3d1d06 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -198,13 +198,13 @@ static struct ft_sess *ft_sess_get(struct fc_lport *lport, u32 port_id)
if (sess->port_id == port_id) {
kref_get(&sess->kref);
rcu_read_unlock();
- FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
+ pr_debug("port_id %x found %p\n", port_id, sess);
return sess;
}
}
out:
rcu_read_unlock();
- FT_SESS_DBG("port_id %x not found\n", port_id);
+ pr_debug("port_id %x not found\n", port_id);
return NULL;
}
@@ -240,7 +240,7 @@ static struct ft_sess *ft_sess_create(struct ft_tport *tport, u32 port_id,
hlist_add_head_rcu(&sess->hash, head);
tport->sess_count++;
- FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
+ pr_debug("port_id %x sess %p\n", port_id, sess);
transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
sess->se_sess, sess);
@@ -314,7 +314,7 @@ int ft_sess_shutdown(struct se_session *se_sess)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
- FT_SESS_DBG("port_id %x\n", sess->port_id);
+ pr_debug("port_id %x\n", sess->port_id);
return 1;
}
@@ -335,7 +335,7 @@ void ft_sess_close(struct se_session *se_sess)
mutex_unlock(&ft_lport_lock);
return;
}
- FT_SESS_DBG("port_id %x\n", port_id);
+ pr_debug("port_id %x\n", port_id);
ft_sess_unhash(sess);
mutex_unlock(&ft_lport_lock);
transport_deregister_session_configfs(se_sess);
@@ -348,7 +348,7 @@ void ft_sess_stop(struct se_session *se_sess, int sess_sleep, int conn_sleep)
{
struct ft_sess *sess = se_sess->fabric_sess_ptr;
- FT_SESS_DBG("port_id %x\n", sess->port_id);
+ pr_debug("port_id %x\n", sess->port_id);
}
int ft_sess_logged_in(struct se_session *se_sess)
@@ -458,7 +458,7 @@ static int ft_prli(struct fc_rport_priv *rdata, u32 spp_len,
mutex_lock(&ft_lport_lock);
ret = ft_prli_locked(rdata, spp_len, rspp, spp);
mutex_unlock(&ft_lport_lock);
- FT_SESS_DBG("port_id %x flags %x ret %x\n",
+ pr_debug("port_id %x flags %x ret %x\n",
rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
return ret;
}
@@ -518,11 +518,11 @@ static void ft_recv(struct fc_lport *lport, struct fc_frame *fp)
struct ft_sess *sess;
u32 sid = fc_frame_sid(fp);
- FT_SESS_DBG("sid %x\n", sid);
+ pr_debug("sid %x\n", sid);
sess = ft_sess_get(lport, sid);
if (!sess) {
- FT_SESS_DBG("sid %x sess lookup failed\n", sid);
+ pr_debug("sid %x sess lookup failed\n", sid);
/* TBD XXX - if FCP_CMND, send PRLO */
fc_frame_free(fp);
return;
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index c911b2419ab..e58cece6f44 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -32,17 +32,17 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
{
struct resource resource;
struct device_node *np = ofdev->dev.of_node;
- const __be32 *clk, *spd;
- const __be32 *prop;
- int ret, prop_size;
+ u32 clk, spd, prop;
+ int ret;
memset(port, 0, sizeof *port);
- spd = of_get_property(np, "current-speed", NULL);
- clk = of_get_property(np, "clock-frequency", NULL);
- if (!clk) {
+ if (of_property_read_u32(np, "clock-frequency", &clk)) {
dev_warn(&ofdev->dev, "no clock-frequency property set\n");
return -ENODEV;
}
+ /* If current-speed was set, then try not to change it. */
+ if (of_property_read_u32(np, "current-speed", &spd) == 0)
+ port->custom_divisor = clk / (16 * spd);
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
@@ -54,25 +54,35 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
port->mapbase = resource.start;
/* Check for shifted address mapping */
- prop = of_get_property(np, "reg-offset", &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- port->mapbase += be32_to_cpup(prop);
+ if (of_property_read_u32(np, "reg-offset", &prop) == 0)
+ port->mapbase += prop;
/* Check for registers offset within the devices address range */
- prop = of_get_property(np, "reg-shift", &prop_size);
- if (prop && (prop_size == sizeof(u32)))
- port->regshift = be32_to_cpup(prop);
+ if (of_property_read_u32(np, "reg-shift", &prop) == 0)
+ port->regshift = prop;
port->irq = irq_of_parse_and_map(np, 0);
port->iotype = UPIO_MEM;
+ if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
+ switch (prop) {
+ case 1:
+ port->iotype = UPIO_MEM;
+ break;
+ case 4:
+ port->iotype = UPIO_MEM32;
+ break;
+ default:
+ dev_warn(&ofdev->dev, "unsupported reg-io-width (%d)\n",
+ prop);
+ return -EINVAL;
+ }
+ }
+
port->type = type;
- port->uartclk = be32_to_cpup(clk);
+ port->uartclk = clk;
port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP
| UPF_FIXED_PORT | UPF_FIXED_TYPE;
port->dev = &ofdev->dev;
- /* If current-speed was set, then try not to change it. */
- if (spd)
- port->custom_divisor = be32_to_cpup(clk) / (16 * (be32_to_cpup(spd)));
return 0;
}
@@ -171,6 +181,7 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = {
{ .compatible = "ns16550", .data = (void *)PORT_16550, },
{ .compatible = "ns16750", .data = (void *)PORT_16750, },
{ .compatible = "ns16850", .data = (void *)PORT_16850, },
+ { .compatible = "nvidia,tegra20-uart", .data = (void *)PORT_TEGRA, },
#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL
{ .compatible = "ibm,qpace-nwp-serial",
.data = (void *)PORT_NWPSERIAL, },
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index a59638b37c1..03bc471c3ee 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -9,6 +9,23 @@ config XEN_BALLOON
the system to expand the domain's memory allocation, or alternatively
return unneeded memory to the system.
+config XEN_SELFBALLOONING
+ bool "Dynamically self-balloon kernel memory to target"
+ depends on XEN && XEN_BALLOON && CLEANCACHE && SWAP
+ default n
+ help
+ Self-ballooning dynamically balloons available kernel memory driven
+ by the current usage of anonymous memory ("committed AS") and
+ controlled by various sysfs-settable parameters. Configuring
+ FRONTSWAP is highly recommended; if it is not configured, self-
+ ballooning is disabled by default but can be enabled with the
+ 'selfballooning' kernel boot parameter. If FRONTSWAP is configured,
+ frontswap-selfshrinking is enabled by default but can be disabled
+ with the 'noselfshrink' kernel boot parameter; and self-ballooning
+ is enabled by default but can be disabled with the 'noselfballooning'
+ kernel boot parameter. Note that systems without a sufficiently
+ large swap device should not enable self-ballooning.
+
config XEN_SCRUB_PAGES
bool "Scrub pages before returning them to system"
depends on XEN_BALLOON
@@ -105,4 +122,33 @@ config SWIOTLB_XEN
depends on PCI
select SWIOTLB
+config XEN_TMEM
+ bool
+ default y if (CLEANCACHE || FRONTSWAP)
+ help
+ Shim to interface in-kernel Transcendent Memory hooks
+ (e.g. cleancache and frontswap) to Xen tmem hypercalls.
+
+config XEN_PCIDEV_BACKEND
+ tristate "Xen PCI-device backend driver"
+ depends on PCI && X86 && XEN
+ depends on XEN_BACKEND
+ default m
+ help
+ The PCI device backend driver allows the kernel to export arbitrary
+ PCI devices to other guests. If you select this to be a module, you
+ will need to make sure no other driver has bound to the device(s)
+ you want to make visible to other guests.
+
+ The parameter "passthrough" allows you specify how you want the PCI
+ devices to appear in the guest. You can choose the default (0) where
+ PCI topology starts at 00.00.0, or (1) for passthrough if you want
+ the PCI devices topology appear the same as in the host.
+
+ The "hide" parameter (only applicable if backend driver is compiled
+ into the kernel) allows you to bind the PCI devices to this module
+ from the default device drivers. The argument is the list of PCI BDFs:
+ xen-pciback.hide=(03:00.0)(04:00.0)
+
+ If in doubt, say m.
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index bbc18258ecc..72bbb27d7a6 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,6 +1,5 @@
obj-y += grant-table.o features.o events.o manage.o balloon.o
obj-y += xenbus/
-obj-y += tmem.o
nostackp := $(call cc-option, -fno-stack-protector)
CFLAGS_features.o := $(nostackp)
@@ -9,14 +8,17 @@ obj-$(CONFIG_BLOCK) += biomerge.o
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
+obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
obj-$(CONFIG_XEN_PLATFORM_PCI) += xen-platform-pci.o
+obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o
+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 30df85d8fca..da70f5c32eb 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -615,11 +615,6 @@ static int find_irq_by_gsi(unsigned gsi)
return -1;
}
-int xen_allocate_pirq_gsi(unsigned gsi)
-{
- return gsi;
-}
-
/*
* Do not make any assumptions regarding the relationship between the
* IRQ number returned here and the Xen pirq argument.
@@ -1693,6 +1688,6 @@ void __init xen_init_IRQ(void)
} else {
irq_ctx_init(smp_processor_id());
if (xen_initial_domain())
- xen_setup_pirqs();
+ pci_xen_initial_domain();
}
}
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 816a44959ef..d369965e8f8 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -1,7 +1,7 @@
/*
* Xen implementation for transcendent memory (tmem)
*
- * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
+ * Copyright (C) 2009-2011 Oracle Corp. All rights reserved.
* Author: Dan Magenheimer
*/
@@ -9,8 +9,14 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pagemap.h>
+#include <linux/module.h>
#include <linux/cleancache.h>
+/* temporary ifdef until include/linux/frontswap.h is upstream */
+#ifdef CONFIG_FRONTSWAP
+#include <linux/frontswap.h>
+#endif
+
#include <xen/xen.h>
#include <xen/interface/xen.h>
#include <asm/xen/hypercall.h>
@@ -122,14 +128,8 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
}
-static int xen_tmem_destroy_pool(u32 pool_id)
-{
- struct tmem_oid oid = { { 0 } };
-
- return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
-}
-
-int tmem_enabled;
+int tmem_enabled __read_mostly;
+EXPORT_SYMBOL(tmem_enabled);
static int __init enable_tmem(char *s)
{
@@ -139,6 +139,14 @@ static int __init enable_tmem(char *s)
__setup("tmem", enable_tmem);
+#ifdef CONFIG_CLEANCACHE
+static int xen_tmem_destroy_pool(u32 pool_id)
+{
+ struct tmem_oid oid = { { 0 } };
+
+ return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
+}
+
/* cleancache ops */
static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
@@ -240,18 +248,156 @@ static struct cleancache_ops tmem_cleancache_ops = {
.init_shared_fs = tmem_cleancache_init_shared_fs,
.init_fs = tmem_cleancache_init_fs
};
+#endif
-static int __init xen_tmem_init(void)
+#ifdef CONFIG_FRONTSWAP
+/* frontswap tmem operations */
+
+/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
+static int tmem_frontswap_poolid;
+
+/*
+ * Swizzling increases objects per swaptype, increasing tmem concurrency
+ * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
+ */
+#define SWIZ_BITS 4
+#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
+#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
+#define iswiz(_ind) (_ind >> SWIZ_BITS)
+
+static inline struct tmem_oid oswiz(unsigned type, u32 ind)
{
- struct cleancache_ops old_ops;
+ struct tmem_oid oid = { .oid = { 0 } };
+ oid.oid[0] = _oswiz(type, ind);
+ return oid;
+}
+/* returns 0 if the page was successfully put into frontswap, -1 if not */
+static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
+ struct page *page)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ unsigned long pfn = page_to_pfn(page);
+ int pool = tmem_frontswap_poolid;
+ int ret;
+
+ if (pool < 0)
+ return -1;
+ if (ind64 != ind)
+ return -1;
+ mb(); /* ensure page is quiescent; tmem may address it with an alias */
+ ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
+ /* translate Xen tmem return values to linux semantics */
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+/*
+ * returns 0 if the page was successfully gotten from frontswap, -1 if
+ * was not present (should never happen!)
+ */
+static int tmem_frontswap_get_page(unsigned type, pgoff_t offset,
+ struct page *page)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ unsigned long pfn = page_to_pfn(page);
+ int pool = tmem_frontswap_poolid;
+ int ret;
+
+ if (pool < 0)
+ return -1;
+ if (ind64 != ind)
+ return -1;
+ ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
+ /* translate Xen tmem return values to linux semantics */
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+/* flush a single page from frontswap */
+static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
+{
+ u64 ind64 = (u64)offset;
+ u32 ind = (u32)offset;
+ int pool = tmem_frontswap_poolid;
+
+ if (pool < 0)
+ return;
+ if (ind64 != ind)
+ return;
+ (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
+}
+
+/* flush all pages from the passed swaptype */
+static void tmem_frontswap_flush_area(unsigned type)
+{
+ int pool = tmem_frontswap_poolid;
+ int ind;
+
+ if (pool < 0)
+ return;
+ for (ind = SWIZ_MASK; ind >= 0; ind--)
+ (void)xen_tmem_flush_object(pool, oswiz(type, ind));
+}
+
+static void tmem_frontswap_init(unsigned ignored)
+{
+ struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
+
+ /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
+ if (tmem_frontswap_poolid < 0)
+ tmem_frontswap_poolid =
+ xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
+}
+
+static int __initdata use_frontswap = 1;
+
+static int __init no_frontswap(char *s)
+{
+ use_frontswap = 0;
+ return 1;
+}
+
+__setup("nofrontswap", no_frontswap);
+
+static struct frontswap_ops tmem_frontswap_ops = {
+ .put_page = tmem_frontswap_put_page,
+ .get_page = tmem_frontswap_get_page,
+ .flush_page = tmem_frontswap_flush_page,
+ .flush_area = tmem_frontswap_flush_area,
+ .init = tmem_frontswap_init
+};
+#endif
+
+static int __init xen_tmem_init(void)
+{
if (!xen_domain())
return 0;
+#ifdef CONFIG_FRONTSWAP
+ if (tmem_enabled && use_frontswap) {
+ char *s = "";
+ struct frontswap_ops old_ops =
+ frontswap_register_ops(&tmem_frontswap_ops);
+
+ tmem_frontswap_poolid = -1;
+ if (old_ops.init != NULL)
+ s = " (WARNING: frontswap_ops overridden)";
+ printk(KERN_INFO "frontswap enabled, RAM provided by "
+ "Xen Transcendent Memory\n");
+ }
+#endif
#ifdef CONFIG_CLEANCACHE
BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
if (tmem_enabled && use_cleancache) {
char *s = "";
- old_ops = cleancache_register_ops(&tmem_cleancache_ops);
+ struct cleancache_ops old_ops =
+ cleancache_register_ops(&tmem_cleancache_ops);
if (old_ops.init_fs != NULL)
s = " (WARNING: cleancache_ops overridden)";
printk(KERN_INFO "cleancache enabled, RAM provided by "
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index a4ff225ee86..5c9dc43c1e9 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -98,6 +98,8 @@ static int __init balloon_init(void)
register_balloon(&balloon_sysdev);
+ register_xen_selfballooning(&balloon_sysdev);
+
target_watch.callback = watch_target;
xenstore_notifier.notifier_call = balloon_init_watcher;
diff --git a/drivers/xen/xen-pciback/Makefile b/drivers/xen/xen-pciback/Makefile
new file mode 100644
index 00000000000..ffe0ad3438b
--- /dev/null
+++ b/drivers/xen/xen-pciback/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback.o
+
+xen-pciback-y := pci_stub.o pciback_ops.o xenbus.o
+xen-pciback-y += conf_space.o conf_space_header.o \
+ conf_space_capability.o \
+ conf_space_quirks.o vpci.o \
+ passthrough.o
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
new file mode 100644
index 00000000000..a8031445d94
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -0,0 +1,438 @@
+/*
+ * PCI Backend - Functions for creating a virtual configuration space for
+ * exported PCI Devices.
+ * It's dangerous to allow PCI Driver Domains to change their
+ * device's resources (memory, i/o ports, interrupts). We need to
+ * restrict changes to certain PCI Configuration registers:
+ * BARs, INTERRUPT_PIN, most registers in the header...
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include "pciback.h"
+#include "conf_space.h"
+#include "conf_space_quirks.h"
+
+#define DRV_NAME "xen-pciback"
+static int permissive;
+module_param(permissive, bool, 0644);
+
+/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
+ * xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
+#define DEFINE_PCI_CONFIG(op, size, type) \
+int xen_pcibk_##op##_config_##size \
+(struct pci_dev *dev, int offset, type value, void *data) \
+{ \
+ return pci_##op##_config_##size(dev, offset, value); \
+}
+
+DEFINE_PCI_CONFIG(read, byte, u8 *)
+DEFINE_PCI_CONFIG(read, word, u16 *)
+DEFINE_PCI_CONFIG(read, dword, u32 *)
+
+DEFINE_PCI_CONFIG(write, byte, u8)
+DEFINE_PCI_CONFIG(write, word, u16)
+DEFINE_PCI_CONFIG(write, dword, u32)
+
+static int conf_space_read(struct pci_dev *dev,
+ const struct config_field_entry *entry,
+ int offset, u32 *value)
+{
+ int ret = 0;
+ const struct config_field *field = entry->field;
+
+ *value = 0;
+
+ switch (field->size) {
+ case 1:
+ if (field->u.b.read)
+ ret = field->u.b.read(dev, offset, (u8 *) value,
+ entry->data);
+ break;
+ case 2:
+ if (field->u.w.read)
+ ret = field->u.w.read(dev, offset, (u16 *) value,
+ entry->data);
+ break;
+ case 4:
+ if (field->u.dw.read)
+ ret = field->u.dw.read(dev, offset, value, entry->data);
+ break;
+ }
+ return ret;
+}
+
+static int conf_space_write(struct pci_dev *dev,
+ const struct config_field_entry *entry,
+ int offset, u32 value)
+{
+ int ret = 0;
+ const struct config_field *field = entry->field;
+
+ switch (field->size) {
+ case 1:
+ if (field->u.b.write)
+ ret = field->u.b.write(dev, offset, (u8) value,
+ entry->data);
+ break;
+ case 2:
+ if (field->u.w.write)
+ ret = field->u.w.write(dev, offset, (u16) value,
+ entry->data);
+ break;
+ case 4:
+ if (field->u.dw.write)
+ ret = field->u.dw.write(dev, offset, value,
+ entry->data);
+ break;
+ }
+ return ret;
+}
+
+static inline u32 get_mask(int size)
+{
+ if (size == 1)
+ return 0xff;
+ else if (size == 2)
+ return 0xffff;
+ else
+ return 0xffffffff;
+}
+
+static inline int valid_request(int offset, int size)
+{
+ /* Validate request (no un-aligned requests) */
+ if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
+ return 1;
+ return 0;
+}
+
+static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
+ int offset)
+{
+ if (offset >= 0) {
+ new_val_mask <<= (offset * 8);
+ new_val <<= (offset * 8);
+ } else {
+ new_val_mask >>= (offset * -8);
+ new_val >>= (offset * -8);
+ }
+ val = (val & ~new_val_mask) | (new_val & new_val_mask);
+
+ return val;
+}
+
+static int pcibios_err_to_errno(int err)
+{
+ switch (err) {
+ case PCIBIOS_SUCCESSFUL:
+ return XEN_PCI_ERR_success;
+ case PCIBIOS_DEVICE_NOT_FOUND:
+ return XEN_PCI_ERR_dev_not_found;
+ case PCIBIOS_BAD_REGISTER_NUMBER:
+ return XEN_PCI_ERR_invalid_offset;
+ case PCIBIOS_FUNC_NOT_SUPPORTED:
+ return XEN_PCI_ERR_not_implemented;
+ case PCIBIOS_SET_FAILED:
+ return XEN_PCI_ERR_access_denied;
+ }
+ return err;
+}
+
+int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
+ u32 *ret_val)
+{
+ int err = 0;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ const struct config_field_entry *cfg_entry;
+ const struct config_field *field;
+ int req_start, req_end, field_start, field_end;
+ /* if read fails for any reason, return 0
+ * (as if device didn't respond) */
+ u32 value = 0, tmp_val;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x\n",
+ pci_name(dev), size, offset);
+
+ if (!valid_request(offset, size)) {
+ err = XEN_PCI_ERR_invalid_offset;
+ goto out;
+ }
+
+ /* Get the real value first, then modify as appropriate */
+ switch (size) {
+ case 1:
+ err = pci_read_config_byte(dev, offset, (u8 *) &value);
+ break;
+ case 2:
+ err = pci_read_config_word(dev, offset, (u16 *) &value);
+ break;
+ case 4:
+ err = pci_read_config_dword(dev, offset, &value);
+ break;
+ }
+
+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
+ field = cfg_entry->field;
+
+ req_start = offset;
+ req_end = offset + size;
+ field_start = OFFSET(cfg_entry);
+ field_end = OFFSET(cfg_entry) + field->size;
+
+ if ((req_start >= field_start && req_start < field_end)
+ || (req_end > field_start && req_end <= field_end)) {
+ err = conf_space_read(dev, cfg_entry, field_start,
+ &tmp_val);
+ if (err)
+ goto out;
+
+ value = merge_value(value, tmp_val,
+ get_mask(field->size),
+ field_start - req_start);
+ }
+ }
+
+out:
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: read %d bytes at 0x%x = %x\n",
+ pci_name(dev), size, offset, value);
+
+ *ret_val = value;
+ return pcibios_err_to_errno(err);
+}
+
+int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
+{
+ int err = 0, handled = 0;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ const struct config_field_entry *cfg_entry;
+ const struct config_field *field;
+ u32 tmp_val;
+ int req_start, req_end, field_start, field_end;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG
+ DRV_NAME ": %s: write request %d bytes at 0x%x = %x\n",
+ pci_name(dev), size, offset, value);
+
+ if (!valid_request(offset, size))
+ return XEN_PCI_ERR_invalid_offset;
+
+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
+ field = cfg_entry->field;
+
+ req_start = offset;
+ req_end = offset + size;
+ field_start = OFFSET(cfg_entry);
+ field_end = OFFSET(cfg_entry) + field->size;
+
+ if ((req_start >= field_start && req_start < field_end)
+ || (req_end > field_start && req_end <= field_end)) {
+ tmp_val = 0;
+
+ err = xen_pcibk_config_read(dev, field_start,
+ field->size, &tmp_val);
+ if (err)
+ break;
+
+ tmp_val = merge_value(tmp_val, value, get_mask(size),
+ req_start - field_start);
+
+ err = conf_space_write(dev, cfg_entry, field_start,
+ tmp_val);
+
+ /* handled is set true here, but not every byte
+ * may have been written! Properly detecting if
+ * every byte is handled is unnecessary as the
+ * flag is used to detect devices that need
+ * special helpers to work correctly.
+ */
+ handled = 1;
+ }
+ }
+
+ if (!handled && !err) {
+ /* By default, anything not specificially handled above is
+ * read-only. The permissive flag changes this behavior so
+ * that anything not specifically handled above is writable.
+ * This means that some fields may still be read-only because
+ * they have entries in the config_field list that intercept
+ * the write and do nothing. */
+ if (dev_data->permissive || permissive) {
+ switch (size) {
+ case 1:
+ err = pci_write_config_byte(dev, offset,
+ (u8) value);
+ break;
+ case 2:
+ err = pci_write_config_word(dev, offset,
+ (u16) value);
+ break;
+ case 4:
+ err = pci_write_config_dword(dev, offset,
+ (u32) value);
+ break;
+ }
+ } else if (!dev_data->warned_on_write) {
+ dev_data->warned_on_write = 1;
+ dev_warn(&dev->dev, "Driver tried to write to a "
+ "read-only configuration space field at offset"
+ " 0x%x, size %d. This may be harmless, but if "
+ "you have problems with your device:\n"
+ "1) see permissive attribute in sysfs\n"
+ "2) report problems to the xen-devel "
+ "mailing list along with details of your "
+ "device obtained from lspci.\n", offset, size);
+ }
+ }
+
+ return pcibios_err_to_errno(err);
+}
+
+void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev)
+{
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ struct config_field_entry *cfg_entry, *t;
+ const struct config_field *field;
+
+ dev_dbg(&dev->dev, "free-ing dynamically allocated virtual "
+ "configuration space fields\n");
+ if (!dev_data)
+ return;
+
+ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
+ field = cfg_entry->field;
+
+ if (field->clean) {
+ field->clean((struct config_field *)field);
+
+ kfree(cfg_entry->data);
+
+ list_del(&cfg_entry->list);
+ kfree(cfg_entry);
+ }
+
+ }
+}
+
+void xen_pcibk_config_reset_dev(struct pci_dev *dev)
+{
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ const struct config_field_entry *cfg_entry;
+ const struct config_field *field;
+
+ dev_dbg(&dev->dev, "resetting virtual configuration space\n");
+ if (!dev_data)
+ return;
+
+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
+ field = cfg_entry->field;
+
+ if (field->reset)
+ field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
+ }
+}
+
+void xen_pcibk_config_free_dev(struct pci_dev *dev)
+{
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ struct config_field_entry *cfg_entry, *t;
+ const struct config_field *field;
+
+ dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
+ if (!dev_data)
+ return;
+
+ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
+ list_del(&cfg_entry->list);
+
+ field = cfg_entry->field;
+
+ if (field->release)
+ field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
+
+ kfree(cfg_entry);
+ }
+}
+
+int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
+ const struct config_field *field,
+ unsigned int base_offset)
+{
+ int err = 0;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ struct config_field_entry *cfg_entry;
+ void *tmp;
+
+ cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
+ if (!cfg_entry) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ cfg_entry->data = NULL;
+ cfg_entry->field = field;
+ cfg_entry->base_offset = base_offset;
+
+ /* silently ignore duplicate fields */
+ err = xen_pcibk_field_is_dup(dev, OFFSET(cfg_entry));
+ if (err)
+ goto out;
+
+ if (field->init) {
+ tmp = field->init(dev, OFFSET(cfg_entry));
+
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto out;
+ }
+
+ cfg_entry->data = tmp;
+ }
+
+ dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
+ OFFSET(cfg_entry));
+ list_add_tail(&cfg_entry->list, &dev_data->config_fields);
+
+out:
+ if (err)
+ kfree(cfg_entry);
+
+ return err;
+}
+
+/* This sets up the device's virtual configuration space to keep track of
+ * certain registers (like the base address registers (BARs) so that we can
+ * keep the client from manipulating them directly.
+ */
+int xen_pcibk_config_init_dev(struct pci_dev *dev)
+{
+ int err = 0;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+
+ dev_dbg(&dev->dev, "initializing virtual configuration space\n");
+
+ INIT_LIST_HEAD(&dev_data->config_fields);
+
+ err = xen_pcibk_config_header_add_fields(dev);
+ if (err)
+ goto out;
+
+ err = xen_pcibk_config_capability_add_fields(dev);
+ if (err)
+ goto out;
+
+ err = xen_pcibk_config_quirks_init(dev);
+
+out:
+ return err;
+}
+
+int xen_pcibk_config_init(void)
+{
+ return xen_pcibk_config_capability_init();
+}
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
new file mode 100644
index 00000000000..e56c934ad13
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -0,0 +1,126 @@
+/*
+ * PCI Backend - Common data structures for overriding the configuration space
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#ifndef __XEN_PCIBACK_CONF_SPACE_H__
+#define __XEN_PCIBACK_CONF_SPACE_H__
+
+#include <linux/list.h>
+#include <linux/err.h>
+
+/* conf_field_init can return an errno in a ptr with ERR_PTR() */
+typedef void *(*conf_field_init) (struct pci_dev *dev, int offset);
+typedef void (*conf_field_reset) (struct pci_dev *dev, int offset, void *data);
+typedef void (*conf_field_free) (struct pci_dev *dev, int offset, void *data);
+
+typedef int (*conf_dword_write) (struct pci_dev *dev, int offset, u32 value,
+ void *data);
+typedef int (*conf_word_write) (struct pci_dev *dev, int offset, u16 value,
+ void *data);
+typedef int (*conf_byte_write) (struct pci_dev *dev, int offset, u8 value,
+ void *data);
+typedef int (*conf_dword_read) (struct pci_dev *dev, int offset, u32 *value,
+ void *data);
+typedef int (*conf_word_read) (struct pci_dev *dev, int offset, u16 *value,
+ void *data);
+typedef int (*conf_byte_read) (struct pci_dev *dev, int offset, u8 *value,
+ void *data);
+
+/* These are the fields within the configuration space which we
+ * are interested in intercepting reads/writes to and changing their
+ * values.
+ */
+struct config_field {
+ unsigned int offset;
+ unsigned int size;
+ unsigned int mask;
+ conf_field_init init;
+ conf_field_reset reset;
+ conf_field_free release;
+ void (*clean) (struct config_field *field);
+ union {
+ struct {
+ conf_dword_write write;
+ conf_dword_read read;
+ } dw;
+ struct {
+ conf_word_write write;
+ conf_word_read read;
+ } w;
+ struct {
+ conf_byte_write write;
+ conf_byte_read read;
+ } b;
+ } u;
+ struct list_head list;
+};
+
+struct config_field_entry {
+ struct list_head list;
+ const struct config_field *field;
+ unsigned int base_offset;
+ void *data;
+};
+
+#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
+
+/* Add fields to a device - the add_fields macro expects to get a pointer to
+ * the first entry in an array (of which the ending is marked by size==0)
+ */
+int xen_pcibk_config_add_field_offset(struct pci_dev *dev,
+ const struct config_field *field,
+ unsigned int offset);
+
+static inline int xen_pcibk_config_add_field(struct pci_dev *dev,
+ const struct config_field *field)
+{
+ return xen_pcibk_config_add_field_offset(dev, field, 0);
+}
+
+static inline int xen_pcibk_config_add_fields(struct pci_dev *dev,
+ const struct config_field *field)
+{
+ int i, err = 0;
+ for (i = 0; field[i].size != 0; i++) {
+ err = xen_pcibk_config_add_field(dev, &field[i]);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static inline int xen_pcibk_config_add_fields_offset(struct pci_dev *dev,
+ const struct config_field *field,
+ unsigned int offset)
+{
+ int i, err = 0;
+ for (i = 0; field[i].size != 0; i++) {
+ err = xen_pcibk_config_add_field_offset(dev, &field[i], offset);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+/* Read/Write the real configuration space */
+int xen_pcibk_read_config_byte(struct pci_dev *dev, int offset, u8 *value,
+ void *data);
+int xen_pcibk_read_config_word(struct pci_dev *dev, int offset, u16 *value,
+ void *data);
+int xen_pcibk_read_config_dword(struct pci_dev *dev, int offset, u32 *value,
+ void *data);
+int xen_pcibk_write_config_byte(struct pci_dev *dev, int offset, u8 value,
+ void *data);
+int xen_pcibk_write_config_word(struct pci_dev *dev, int offset, u16 value,
+ void *data);
+int xen_pcibk_write_config_dword(struct pci_dev *dev, int offset, u32 value,
+ void *data);
+
+int xen_pcibk_config_capability_init(void);
+
+int xen_pcibk_config_header_add_fields(struct pci_dev *dev);
+int xen_pcibk_config_capability_add_fields(struct pci_dev *dev);
+
+#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
new file mode 100644
index 00000000000..7f83e9083e9
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -0,0 +1,207 @@
+/*
+ * PCI Backend - Handles the virtual fields found on the capability lists
+ * in the configuration space.
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include "pciback.h"
+#include "conf_space.h"
+
+static LIST_HEAD(capabilities);
+struct xen_pcibk_config_capability {
+ struct list_head cap_list;
+
+ int capability;
+
+ /* If the device has the capability found above, add these fields */
+ const struct config_field *fields;
+};
+
+static const struct config_field caplist_header[] = {
+ {
+ .offset = PCI_CAP_LIST_ID,
+ .size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
+ .u.w.read = xen_pcibk_read_config_word,
+ .u.w.write = NULL,
+ },
+ {}
+};
+
+static inline void register_capability(struct xen_pcibk_config_capability *cap)
+{
+ list_add_tail(&cap->cap_list, &capabilities);
+}
+
+int xen_pcibk_config_capability_add_fields(struct pci_dev *dev)
+{
+ int err = 0;
+ struct xen_pcibk_config_capability *cap;
+ int cap_offset;
+
+ list_for_each_entry(cap, &capabilities, cap_list) {
+ cap_offset = pci_find_capability(dev, cap->capability);
+ if (cap_offset) {
+ dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
+ cap->capability, cap_offset);
+
+ err = xen_pcibk_config_add_fields_offset(dev,
+ caplist_header,
+ cap_offset);
+ if (err)
+ goto out;
+ err = xen_pcibk_config_add_fields_offset(dev,
+ cap->fields,
+ cap_offset);
+ if (err)
+ goto out;
+ }
+ }
+
+out:
+ return err;
+}
+
+static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
+ void *data)
+{
+ /* Disallow writes to the vital product data */
+ if (value & PCI_VPD_ADDR_F)
+ return PCIBIOS_SET_FAILED;
+ else
+ return pci_write_config_word(dev, offset, value);
+}
+
+static const struct config_field caplist_vpd[] = {
+ {
+ .offset = PCI_VPD_ADDR,
+ .size = 2,
+ .u.w.read = xen_pcibk_read_config_word,
+ .u.w.write = vpd_address_write,
+ },
+ {
+ .offset = PCI_VPD_DATA,
+ .size = 4,
+ .u.dw.read = xen_pcibk_read_config_dword,
+ .u.dw.write = NULL,
+ },
+ {}
+};
+
+static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
+ void *data)
+{
+ int err;
+ u16 real_value;
+
+ err = pci_read_config_word(dev, offset, &real_value);
+ if (err)
+ goto out;
+
+ *value = real_value & ~PCI_PM_CAP_PME_MASK;
+
+out:
+ return err;
+}
+
+/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
+ * Can't allow driver domain to enable PMEs - they're shared */
+#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
+
+static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
+ void *data)
+{
+ int err;
+ u16 old_value;
+ pci_power_t new_state, old_state;
+
+ err = pci_read_config_word(dev, offset, &old_value);
+ if (err)
+ goto out;
+
+ old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
+ new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
+
+ new_value &= PM_OK_BITS;
+ if ((old_value & PM_OK_BITS) != new_value) {
+ new_value = (old_value & ~PM_OK_BITS) | new_value;
+ err = pci_write_config_word(dev, offset, new_value);
+ if (err)
+ goto out;
+ }
+
+ /* Let pci core handle the power management change */
+ dev_dbg(&dev->dev, "set power state to %x\n", new_state);
+ err = pci_set_power_state(dev, new_state);
+ if (err) {
+ err = PCIBIOS_SET_FAILED;
+ goto out;
+ }
+
+ out:
+ return err;
+}
+
+/* Ensure PMEs are disabled */
+static void *pm_ctrl_init(struct pci_dev *dev, int offset)
+{
+ int err;
+ u16 value;
+
+ err = pci_read_config_word(dev, offset, &value);
+ if (err)
+ goto out;
+
+ if (value & PCI_PM_CTRL_PME_ENABLE) {
+ value &= ~PCI_PM_CTRL_PME_ENABLE;
+ err = pci_write_config_word(dev, offset, value);
+ }
+
+out:
+ return ERR_PTR(err);
+}
+
+static const struct config_field caplist_pm[] = {
+ {
+ .offset = PCI_PM_PMC,
+ .size = 2,
+ .u.w.read = pm_caps_read,
+ },
+ {
+ .offset = PCI_PM_CTRL,
+ .size = 2,
+ .init = pm_ctrl_init,
+ .u.w.read = xen_pcibk_read_config_word,
+ .u.w.write = pm_ctrl_write,
+ },
+ {
+ .offset = PCI_PM_PPB_EXTENSIONS,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ },
+ {
+ .offset = PCI_PM_DATA_REGISTER,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ },
+ {}
+};
+
+static struct xen_pcibk_config_capability xen_pcibk_config_capability_pm = {
+ .capability = PCI_CAP_ID_PM,
+ .fields = caplist_pm,
+};
+static struct xen_pcibk_config_capability xen_pcibk_config_capability_vpd = {
+ .capability = PCI_CAP_ID_VPD,
+ .fields = caplist_vpd,
+};
+
+int xen_pcibk_config_capability_init(void)
+{
+ register_capability(&xen_pcibk_config_capability_vpd);
+ register_capability(&xen_pcibk_config_capability_pm);
+
+ return 0;
+}
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
new file mode 100644
index 00000000000..da3cbdfcb5d
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -0,0 +1,386 @@
+/*
+ * PCI Backend - Handles the virtual fields in the configuration space headers.
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include "pciback.h"
+#include "conf_space.h"
+
+struct pci_bar_info {
+ u32 val;
+ u32 len_val;
+ int which;
+};
+
+#define DRV_NAME "xen-pciback"
+#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
+#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
+
+static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data)
+{
+ int i;
+ int ret;
+
+ ret = xen_pcibk_read_config_word(dev, offset, value, data);
+ if (!atomic_read(&dev->enable_cnt))
+ return ret;
+
+ for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+ if (dev->resource[i].flags & IORESOURCE_IO)
+ *value |= PCI_COMMAND_IO;
+ if (dev->resource[i].flags & IORESOURCE_MEM)
+ *value |= PCI_COMMAND_MEMORY;
+ }
+
+ return ret;
+}
+
+static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ int err;
+
+ dev_data = pci_get_drvdata(dev);
+ if (!pci_is_enabled(dev) && is_enable_cmd(value)) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: enable\n",
+ pci_name(dev));
+ err = pci_enable_device(dev);
+ if (err)
+ return err;
+ if (dev_data)
+ dev_data->enable_intx = 1;
+ } else if (pci_is_enabled(dev) && !is_enable_cmd(value)) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: disable\n",
+ pci_name(dev));
+ pci_disable_device(dev);
+ if (dev_data)
+ dev_data->enable_intx = 0;
+ }
+
+ if (!dev->is_busmaster && is_master_cmd(value)) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n",
+ pci_name(dev));
+ pci_set_master(dev);
+ }
+
+ if (value & PCI_COMMAND_INVALIDATE) {
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG
+ DRV_NAME ": %s: enable memory-write-invalidate\n",
+ pci_name(dev));
+ err = pci_set_mwi(dev);
+ if (err) {
+ printk(KERN_WARNING
+ DRV_NAME ": %s: cannot enable "
+ "memory-write-invalidate (%d)\n",
+ pci_name(dev), err);
+ value &= ~PCI_COMMAND_INVALIDATE;
+ }
+ }
+
+ return pci_write_config_word(dev, offset, value);
+}
+
+static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
+{
+ struct pci_bar_info *bar = data;
+
+ if (unlikely(!bar)) {
+ printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
+ pci_name(dev));
+ return XEN_PCI_ERR_op_failed;
+ }
+
+ /* A write to obtain the length must happen as a 32-bit write.
+ * This does not (yet) support writing individual bytes
+ */
+ if (value == ~PCI_ROM_ADDRESS_ENABLE)
+ bar->which = 1;
+ else {
+ u32 tmpval;
+ pci_read_config_dword(dev, offset, &tmpval);
+ if (tmpval != bar->val && value == bar->val) {
+ /* Allow restoration of bar value. */
+ pci_write_config_dword(dev, offset, bar->val);
+ }
+ bar->which = 0;
+ }
+
+ /* Do we need to support enabling/disabling the rom address here? */
+
+ return 0;
+}
+
+/* For the BARs, only allow writes which write ~0 or
+ * the correct resource information
+ * (Needed for when the driver probes the resource usage)
+ */
+static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
+{
+ struct pci_bar_info *bar = data;
+
+ if (unlikely(!bar)) {
+ printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
+ pci_name(dev));
+ return XEN_PCI_ERR_op_failed;
+ }
+
+ /* A write to obtain the length must happen as a 32-bit write.
+ * This does not (yet) support writing individual bytes
+ */
+ if (value == ~0)
+ bar->which = 1;
+ else {
+ u32 tmpval;
+ pci_read_config_dword(dev, offset, &tmpval);
+ if (tmpval != bar->val && value == bar->val) {
+ /* Allow restoration of bar value. */
+ pci_write_config_dword(dev, offset, bar->val);
+ }
+ bar->which = 0;
+ }
+
+ return 0;
+}
+
+static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
+{
+ struct pci_bar_info *bar = data;
+
+ if (unlikely(!bar)) {
+ printk(KERN_WARNING DRV_NAME ": driver data not found for %s\n",
+ pci_name(dev));
+ return XEN_PCI_ERR_op_failed;
+ }
+
+ *value = bar->which ? bar->len_val : bar->val;
+
+ return 0;
+}
+
+static inline void read_dev_bar(struct pci_dev *dev,
+ struct pci_bar_info *bar_info, int offset,
+ u32 len_mask)
+{
+ int pos;
+ struct resource *res = dev->resource;
+
+ if (offset == PCI_ROM_ADDRESS || offset == PCI_ROM_ADDRESS1)
+ pos = PCI_ROM_RESOURCE;
+ else {
+ pos = (offset - PCI_BASE_ADDRESS_0) / 4;
+ if (pos && ((res[pos - 1].flags & (PCI_BASE_ADDRESS_SPACE |
+ PCI_BASE_ADDRESS_MEM_TYPE_MASK)) ==
+ (PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_TYPE_64))) {
+ bar_info->val = res[pos - 1].start >> 32;
+ bar_info->len_val = res[pos - 1].end >> 32;
+ return;
+ }
+ }
+
+ bar_info->val = res[pos].start |
+ (res[pos].flags & PCI_REGION_FLAG_MASK);
+ bar_info->len_val = res[pos].end - res[pos].start + 1;
+}
+
+static void *bar_init(struct pci_dev *dev, int offset)
+{
+ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+
+ if (!bar)
+ return ERR_PTR(-ENOMEM);
+
+ read_dev_bar(dev, bar, offset, ~0);
+ bar->which = 0;
+
+ return bar;
+}
+
+static void *rom_init(struct pci_dev *dev, int offset)
+{
+ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+
+ if (!bar)
+ return ERR_PTR(-ENOMEM);
+
+ read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
+ bar->which = 0;
+
+ return bar;
+}
+
+static void bar_reset(struct pci_dev *dev, int offset, void *data)
+{
+ struct pci_bar_info *bar = data;
+
+ bar->which = 0;
+}
+
+static void bar_release(struct pci_dev *dev, int offset, void *data)
+{
+ kfree(data);
+}
+
+static int xen_pcibk_read_vendor(struct pci_dev *dev, int offset,
+ u16 *value, void *data)
+{
+ *value = dev->vendor;
+
+ return 0;
+}
+
+static int xen_pcibk_read_device(struct pci_dev *dev, int offset,
+ u16 *value, void *data)
+{
+ *value = dev->device;
+
+ return 0;
+}
+
+static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
+ void *data)
+{
+ *value = (u8) dev->irq;
+
+ return 0;
+}
+
+static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
+{
+ u8 cur_value;
+ int err;
+
+ err = pci_read_config_byte(dev, offset, &cur_value);
+ if (err)
+ goto out;
+
+ if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
+ || value == PCI_BIST_START)
+ err = pci_write_config_byte(dev, offset, value);
+
+out:
+ return err;
+}
+
+static const struct config_field header_common[] = {
+ {
+ .offset = PCI_VENDOR_ID,
+ .size = 2,
+ .u.w.read = xen_pcibk_read_vendor,
+ },
+ {
+ .offset = PCI_DEVICE_ID,
+ .size = 2,
+ .u.w.read = xen_pcibk_read_device,
+ },
+ {
+ .offset = PCI_COMMAND,
+ .size = 2,
+ .u.w.read = command_read,
+ .u.w.write = command_write,
+ },
+ {
+ .offset = PCI_INTERRUPT_LINE,
+ .size = 1,
+ .u.b.read = interrupt_read,
+ },
+ {
+ .offset = PCI_INTERRUPT_PIN,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ },
+ {
+ /* Any side effects of letting driver domain control cache line? */
+ .offset = PCI_CACHE_LINE_SIZE,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ .u.b.write = xen_pcibk_write_config_byte,
+ },
+ {
+ .offset = PCI_LATENCY_TIMER,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ },
+ {
+ .offset = PCI_BIST,
+ .size = 1,
+ .u.b.read = xen_pcibk_read_config_byte,
+ .u.b.write = bist_write,
+ },
+ {}
+};
+
+#define CFG_FIELD_BAR(reg_offset) \
+ { \
+ .offset = reg_offset, \
+ .size = 4, \
+ .init = bar_init, \
+ .reset = bar_reset, \
+ .release = bar_release, \
+ .u.dw.read = bar_read, \
+ .u.dw.write = bar_write, \
+ }
+
+#define CFG_FIELD_ROM(reg_offset) \
+ { \
+ .offset = reg_offset, \
+ .size = 4, \
+ .init = rom_init, \
+ .reset = bar_reset, \
+ .release = bar_release, \
+ .u.dw.read = bar_read, \
+ .u.dw.write = rom_write, \
+ }
+
+static const struct config_field header_0[] = {
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
+ CFG_FIELD_ROM(PCI_ROM_ADDRESS),
+ {}
+};
+
+static const struct config_field header_1[] = {
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
+ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
+ CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
+ {}
+};
+
+int xen_pcibk_config_header_add_fields(struct pci_dev *dev)
+{
+ int err;
+
+ err = xen_pcibk_config_add_fields(dev, header_common);
+ if (err)
+ goto out;
+
+ switch (dev->hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ err = xen_pcibk_config_add_fields(dev, header_0);
+ break;
+
+ case PCI_HEADER_TYPE_BRIDGE:
+ err = xen_pcibk_config_add_fields(dev, header_1);
+ break;
+
+ default:
+ err = -EINVAL;
+ printk(KERN_ERR DRV_NAME ": %s: Unsupported header type %d!\n",
+ pci_name(dev), dev->hdr_type);
+ break;
+ }
+
+out:
+ return err;
+}
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.c b/drivers/xen/xen-pciback/conf_space_quirks.c
new file mode 100644
index 00000000000..921a889e65e
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space_quirks.c
@@ -0,0 +1,140 @@
+/*
+ * PCI Backend - Handle special overlays for broken devices.
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ * Author: Chris Bookholt <hap10@epoch.ncsc.mil>
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include "pciback.h"
+#include "conf_space.h"
+#include "conf_space_quirks.h"
+
+LIST_HEAD(xen_pcibk_quirks);
+#define DRV_NAME "xen-pciback"
+static inline const struct pci_device_id *
+match_one_device(const struct pci_device_id *id, const struct pci_dev *dev)
+{
+ if ((id->vendor == PCI_ANY_ID || id->vendor == dev->vendor) &&
+ (id->device == PCI_ANY_ID || id->device == dev->device) &&
+ (id->subvendor == PCI_ANY_ID ||
+ id->subvendor == dev->subsystem_vendor) &&
+ (id->subdevice == PCI_ANY_ID ||
+ id->subdevice == dev->subsystem_device) &&
+ !((id->class ^ dev->class) & id->class_mask))
+ return id;
+ return NULL;
+}
+
+static struct xen_pcibk_config_quirk *xen_pcibk_find_quirk(struct pci_dev *dev)
+{
+ struct xen_pcibk_config_quirk *tmp_quirk;
+
+ list_for_each_entry(tmp_quirk, &xen_pcibk_quirks, quirks_list)
+ if (match_one_device(&tmp_quirk->devid, dev) != NULL)
+ goto out;
+ tmp_quirk = NULL;
+ printk(KERN_DEBUG DRV_NAME
+ ":quirk didn't match any device xen_pciback knows about\n");
+out:
+ return tmp_quirk;
+}
+
+static inline void register_quirk(struct xen_pcibk_config_quirk *quirk)
+{
+ list_add_tail(&quirk->quirks_list, &xen_pcibk_quirks);
+}
+
+int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg)
+{
+ int ret = 0;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+ struct config_field_entry *cfg_entry;
+
+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
+ if (OFFSET(cfg_entry) == reg) {
+ ret = 1;
+ break;
+ }
+ }
+ return ret;
+}
+
+int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
+ *field)
+{
+ int err = 0;
+
+ switch (field->size) {
+ case 1:
+ field->u.b.read = xen_pcibk_read_config_byte;
+ field->u.b.write = xen_pcibk_write_config_byte;
+ break;
+ case 2:
+ field->u.w.read = xen_pcibk_read_config_word;
+ field->u.w.write = xen_pcibk_write_config_word;
+ break;
+ case 4:
+ field->u.dw.read = xen_pcibk_read_config_dword;
+ field->u.dw.write = xen_pcibk_write_config_dword;
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ xen_pcibk_config_add_field(dev, field);
+
+out:
+ return err;
+}
+
+int xen_pcibk_config_quirks_init(struct pci_dev *dev)
+{
+ struct xen_pcibk_config_quirk *quirk;
+ int ret = 0;
+
+ quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
+ if (!quirk) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ quirk->devid.vendor = dev->vendor;
+ quirk->devid.device = dev->device;
+ quirk->devid.subvendor = dev->subsystem_vendor;
+ quirk->devid.subdevice = dev->subsystem_device;
+ quirk->devid.class = 0;
+ quirk->devid.class_mask = 0;
+ quirk->devid.driver_data = 0UL;
+
+ quirk->pdev = dev;
+
+ register_quirk(quirk);
+out:
+ return ret;
+}
+
+void xen_pcibk_config_field_free(struct config_field *field)
+{
+ kfree(field);
+}
+
+int xen_pcibk_config_quirk_release(struct pci_dev *dev)
+{
+ struct xen_pcibk_config_quirk *quirk;
+ int ret = 0;
+
+ quirk = xen_pcibk_find_quirk(dev);
+ if (!quirk) {
+ ret = -ENXIO;
+ goto out;
+ }
+
+ list_del(&quirk->quirks_list);
+ kfree(quirk);
+
+out:
+ return ret;
+}
diff --git a/drivers/xen/xen-pciback/conf_space_quirks.h b/drivers/xen/xen-pciback/conf_space_quirks.h
new file mode 100644
index 00000000000..cfcc517e457
--- /dev/null
+++ b/drivers/xen/xen-pciback/conf_space_quirks.h
@@ -0,0 +1,33 @@
+/*
+ * PCI Backend - Data structures for special overlays for broken devices.
+ *
+ * Ryan Wilson <hap9@epoch.ncsc.mil>
+ * Chris Bookholt <hap10@epoch.ncsc.mil>
+ */
+
+#ifndef __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
+#define __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
+
+#include <linux/pci.h>
+#include <linux/list.h>
+
+struct xen_pcibk_config_quirk {
+ struct list_head quirks_list;
+ struct pci_device_id devid;
+ struct pci_dev *pdev;
+};
+
+int xen_pcibk_config_quirks_add_field(struct pci_dev *dev, struct config_field
+ *field);
+
+int xen_pcibk_config_quirks_remove_field(struct pci_dev *dev, int reg);
+
+int xen_pcibk_config_quirks_init(struct pci_dev *dev);
+
+void xen_pcibk_config_field_free(struct config_field *field);
+
+int xen_pcibk_config_quirk_release(struct pci_dev *dev);
+
+int xen_pcibk_field_is_dup(struct pci_dev *dev, unsigned int reg);
+
+#endif
diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c
new file mode 100644
index 00000000000..1d32a9a42c0
--- /dev/null
+++ b/drivers/xen/xen-pciback/passthrough.c
@@ -0,0 +1,194 @@
+/*
+ * PCI Backend - Provides restricted access to the real PCI bus topology
+ * to the frontend
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include "pciback.h"
+
+struct passthrough_dev_data {
+ /* Access to dev_list must be protected by lock */
+ struct list_head dev_list;
+ spinlock_t lock;
+};
+
+static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
+ unsigned int domain,
+ unsigned int bus,
+ unsigned int devfn)
+{
+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
+ struct pci_dev_entry *dev_entry;
+ struct pci_dev *dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_data->lock, flags);
+
+ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
+ if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
+ && bus == (unsigned int)dev_entry->dev->bus->number
+ && devfn == dev_entry->dev->devfn) {
+ dev = dev_entry->dev;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&dev_data->lock, flags);
+
+ return dev;
+}
+
+static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev,
+ int devid, publish_pci_dev_cb publish_cb)
+{
+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
+ struct pci_dev_entry *dev_entry;
+ unsigned long flags;
+ unsigned int domain, bus, devfn;
+ int err;
+
+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
+ if (!dev_entry)
+ return -ENOMEM;
+ dev_entry->dev = dev;
+
+ spin_lock_irqsave(&dev_data->lock, flags);
+ list_add_tail(&dev_entry->list, &dev_data->dev_list);
+ spin_unlock_irqrestore(&dev_data->lock, flags);
+
+ /* Publish this device. */
+ domain = (unsigned int)pci_domain_nr(dev->bus);
+ bus = (unsigned int)dev->bus->number;
+ devfn = dev->devfn;
+ err = publish_cb(pdev, domain, bus, devfn, devid);
+
+ return err;
+}
+
+static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev)
+{
+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
+ struct pci_dev_entry *dev_entry, *t;
+ struct pci_dev *found_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_data->lock, flags);
+
+ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
+ if (dev_entry->dev == dev) {
+ list_del(&dev_entry->list);
+ found_dev = dev_entry->dev;
+ kfree(dev_entry);
+ }
+ }
+
+ spin_unlock_irqrestore(&dev_data->lock, flags);
+
+ if (found_dev)
+ pcistub_put_pci_dev(found_dev);
+}
+
+static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
+{
+ struct passthrough_dev_data *dev_data;
+
+ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+
+ spin_lock_init(&dev_data->lock);
+
+ INIT_LIST_HEAD(&dev_data->dev_list);
+
+ pdev->pci_dev_data = dev_data;
+
+ return 0;
+}
+
+static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
+ publish_pci_root_cb publish_root_cb)
+{
+ int err = 0;
+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
+ struct pci_dev_entry *dev_entry, *e, *tmp;
+ struct pci_dev *dev;
+ int found;
+ unsigned int domain, bus;
+
+ spin_lock(&dev_data->lock);
+
+ list_for_each_entry_safe(dev_entry, tmp, &dev_data->dev_list, list) {
+ /* Only publish this device as a root if none of its
+ * parent bridges are exported
+ */
+ found = 0;
+ dev = dev_entry->dev->bus->self;
+ for (; !found && dev != NULL; dev = dev->bus->self) {
+ list_for_each_entry(e, &dev_data->dev_list, list) {
+ if (dev == e->dev) {
+ found = 1;
+ break;
+ }
+ }
+ }
+
+ domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
+ bus = (unsigned int)dev_entry->dev->bus->number;
+
+ if (!found) {
+ spin_unlock(&dev_data->lock);
+ err = publish_root_cb(pdev, domain, bus);
+ if (err)
+ break;
+ spin_lock(&dev_data->lock);
+ }
+ }
+
+ if (!err)
+ spin_unlock(&dev_data->lock);
+
+ return err;
+}
+
+static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
+{
+ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
+ struct pci_dev_entry *dev_entry, *t;
+
+ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
+ list_del(&dev_entry->list);
+ pcistub_put_pci_dev(dev_entry->dev);
+ kfree(dev_entry);
+ }
+
+ kfree(dev_data);
+ pdev->pci_dev_data = NULL;
+}
+
+static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
+ struct xen_pcibk_device *pdev,
+ unsigned int *domain, unsigned int *bus,
+ unsigned int *devfn)
+{
+ *domain = pci_domain_nr(pcidev->bus);
+ *bus = pcidev->bus->number;
+ *devfn = pcidev->devfn;
+ return 1;
+}
+
+struct xen_pcibk_backend xen_pcibk_passthrough_backend = {
+ .name = "passthrough",
+ .init = __xen_pcibk_init_devices,
+ .free = __xen_pcibk_release_devices,
+ .find = __xen_pcibk_get_pcifront_dev,
+ .publish = __xen_pcibk_publish_pci_roots,
+ .release = __xen_pcibk_release_pci_dev,
+ .add = __xen_pcibk_add_pci_dev,
+ .get = __xen_pcibk_get_pci_dev,
+};
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
new file mode 100644
index 00000000000..aec214ac0a1
--- /dev/null
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -0,0 +1,1376 @@
+/*
+ * PCI Stub Driver - Grabs devices in backend to be exported later
+ *
+ * Ryan Wilson <hap9@epoch.ncsc.mil>
+ * Chris Bookholt <hap10@epoch.ncsc.mil>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/rwsem.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <xen/events.h>
+#include <asm/xen/pci.h>
+#include <asm/xen/hypervisor.h>
+#include "pciback.h"
+#include "conf_space.h"
+#include "conf_space_quirks.h"
+
+#define DRV_NAME "xen-pciback"
+
+static char *pci_devs_to_hide;
+wait_queue_head_t xen_pcibk_aer_wait_queue;
+/*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
+* We want to avoid in middle of AER ops, xen_pcibk devices is being removed
+*/
+static DECLARE_RWSEM(pcistub_sem);
+module_param_named(hide, pci_devs_to_hide, charp, 0444);
+
+struct pcistub_device_id {
+ struct list_head slot_list;
+ int domain;
+ unsigned char bus;
+ unsigned int devfn;
+};
+static LIST_HEAD(pcistub_device_ids);
+static DEFINE_SPINLOCK(device_ids_lock);
+
+struct pcistub_device {
+ struct kref kref;
+ struct list_head dev_list;
+ spinlock_t lock;
+
+ struct pci_dev *dev;
+ struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
+};
+
+/* Access to pcistub_devices & seized_devices lists and the initialize_devices
+ * flag must be locked with pcistub_devices_lock
+ */
+static DEFINE_SPINLOCK(pcistub_devices_lock);
+static LIST_HEAD(pcistub_devices);
+
+/* wait for device_initcall before initializing our devices
+ * (see pcistub_init_devices_late)
+ */
+static int initialize_devices;
+static LIST_HEAD(seized_devices);
+
+static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+
+ dev_dbg(&dev->dev, "pcistub_device_alloc\n");
+
+ psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
+ if (!psdev)
+ return NULL;
+
+ psdev->dev = pci_dev_get(dev);
+ if (!psdev->dev) {
+ kfree(psdev);
+ return NULL;
+ }
+
+ kref_init(&psdev->kref);
+ spin_lock_init(&psdev->lock);
+
+ return psdev;
+}
+
+/* Don't call this directly as it's called by pcistub_device_put */
+static void pcistub_device_release(struct kref *kref)
+{
+ struct pcistub_device *psdev;
+
+ psdev = container_of(kref, struct pcistub_device, kref);
+
+ dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
+
+ xen_unregister_device_domain_owner(psdev->dev);
+
+ /* Clean-up the device */
+ xen_pcibk_reset_device(psdev->dev);
+ xen_pcibk_config_free_dyn_fields(psdev->dev);
+ xen_pcibk_config_free_dev(psdev->dev);
+ kfree(pci_get_drvdata(psdev->dev));
+ pci_set_drvdata(psdev->dev, NULL);
+
+ pci_dev_put(psdev->dev);
+
+ kfree(psdev);
+}
+
+static inline void pcistub_device_get(struct pcistub_device *psdev)
+{
+ kref_get(&psdev->kref);
+}
+
+static inline void pcistub_device_put(struct pcistub_device *psdev)
+{
+ kref_put(&psdev->kref, pcistub_device_release);
+}
+
+static struct pcistub_device *pcistub_device_find(int domain, int bus,
+ int slot, int func)
+{
+ struct pcistub_device *psdev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (psdev->dev != NULL
+ && domain == pci_domain_nr(psdev->dev->bus)
+ && bus == psdev->dev->bus->number
+ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
+ pcistub_device_get(psdev);
+ goto out;
+ }
+ }
+
+ /* didn't find it */
+ psdev = NULL;
+
+out:
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return psdev;
+}
+
+static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
+ struct pcistub_device *psdev)
+{
+ struct pci_dev *pci_dev = NULL;
+ unsigned long flags;
+
+ pcistub_device_get(psdev);
+
+ spin_lock_irqsave(&psdev->lock, flags);
+ if (!psdev->pdev) {
+ psdev->pdev = pdev;
+ pci_dev = psdev->dev;
+ }
+ spin_unlock_irqrestore(&psdev->lock, flags);
+
+ if (!pci_dev)
+ pcistub_device_put(psdev);
+
+ return pci_dev;
+}
+
+struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
+ int domain, int bus,
+ int slot, int func)
+{
+ struct pcistub_device *psdev;
+ struct pci_dev *found_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (psdev->dev != NULL
+ && domain == pci_domain_nr(psdev->dev->bus)
+ && bus == psdev->dev->bus->number
+ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
+ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return found_dev;
+}
+
+struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+ struct pci_dev *found_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (psdev->dev == dev) {
+ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return found_dev;
+}
+
+void pcistub_put_pci_dev(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev, *found_psdev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (psdev->dev == dev) {
+ found_psdev = psdev;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ /*hold this lock for avoiding breaking link between
+ * pcistub and xen_pcibk when AER is in processing
+ */
+ down_write(&pcistub_sem);
+ /* Cleanup our device
+ * (so it's ready for the next domain)
+ */
+ xen_pcibk_reset_device(found_psdev->dev);
+ xen_pcibk_config_free_dyn_fields(found_psdev->dev);
+ xen_pcibk_config_reset_dev(found_psdev->dev);
+
+ spin_lock_irqsave(&found_psdev->lock, flags);
+ found_psdev->pdev = NULL;
+ spin_unlock_irqrestore(&found_psdev->lock, flags);
+
+ pcistub_device_put(found_psdev);
+ up_write(&pcistub_sem);
+}
+
+static int __devinit pcistub_match_one(struct pci_dev *dev,
+ struct pcistub_device_id *pdev_id)
+{
+ /* Match the specified device by domain, bus, slot, func and also if
+ * any of the device's parent bridges match.
+ */
+ for (; dev != NULL; dev = dev->bus->self) {
+ if (pci_domain_nr(dev->bus) == pdev_id->domain
+ && dev->bus->number == pdev_id->bus
+ && dev->devfn == pdev_id->devfn)
+ return 1;
+
+ /* Sometimes topmost bridge links to itself. */
+ if (dev == dev->bus->self)
+ break;
+ }
+
+ return 0;
+}
+
+static int __devinit pcistub_match(struct pci_dev *dev)
+{
+ struct pcistub_device_id *pdev_id;
+ unsigned long flags;
+ int found = 0;
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+ list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
+ if (pcistub_match_one(dev, pdev_id)) {
+ found = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ return found;
+}
+
+static int __devinit pcistub_init_device(struct pci_dev *dev)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ int err = 0;
+
+ dev_dbg(&dev->dev, "initializing...\n");
+
+ /* The PCI backend is not intended to be a module (or to work with
+ * removable PCI devices (yet). If it were, xen_pcibk_config_free()
+ * would need to be called somewhere to free the memory allocated
+ * here and then to call kfree(pci_get_drvdata(psdev->dev)).
+ */
+ dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
+ + strlen(pci_name(dev)) + 1, GFP_ATOMIC);
+ if (!dev_data) {
+ err = -ENOMEM;
+ goto out;
+ }
+ pci_set_drvdata(dev, dev_data);
+
+ /*
+ * Setup name for fake IRQ handler. It will only be enabled
+ * once the device is turned on by the guest.
+ */
+ sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
+
+ dev_dbg(&dev->dev, "initializing config\n");
+
+ init_waitqueue_head(&xen_pcibk_aer_wait_queue);
+ err = xen_pcibk_config_init_dev(dev);
+ if (err)
+ goto out;
+
+ /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
+ * must do this here because pcibios_enable_device may specify
+ * the pci device's true irq (and possibly its other resources)
+ * if they differ from what's in the configuration space.
+ * This makes the assumption that the device's resources won't
+ * change after this point (otherwise this code may break!)
+ */
+ dev_dbg(&dev->dev, "enabling device\n");
+ err = pci_enable_device(dev);
+ if (err)
+ goto config_release;
+
+ /* Now disable the device (this also ensures some private device
+ * data is setup before we export)
+ */
+ dev_dbg(&dev->dev, "reset device\n");
+ xen_pcibk_reset_device(dev);
+
+ return 0;
+
+config_release:
+ xen_pcibk_config_free_dev(dev);
+
+out:
+ pci_set_drvdata(dev, NULL);
+ kfree(dev_data);
+ return err;
+}
+
+/*
+ * Because some initialization still happens on
+ * devices during fs_initcall, we need to defer
+ * full initialization of our devices until
+ * device_initcall.
+ */
+static int __init pcistub_init_devices_late(void)
+{
+ struct pcistub_device *psdev;
+ unsigned long flags;
+ int err = 0;
+
+ pr_debug(DRV_NAME ": pcistub_init_devices_late\n");
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ while (!list_empty(&seized_devices)) {
+ psdev = container_of(seized_devices.next,
+ struct pcistub_device, dev_list);
+ list_del(&psdev->dev_list);
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ err = pcistub_init_device(psdev->dev);
+ if (err) {
+ dev_err(&psdev->dev->dev,
+ "error %d initializing device\n", err);
+ kfree(psdev);
+ psdev = NULL;
+ }
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ if (psdev)
+ list_add_tail(&psdev->dev_list, &pcistub_devices);
+ }
+
+ initialize_devices = 1;
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ return 0;
+}
+
+static int __devinit pcistub_seize(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+ unsigned long flags;
+ int err = 0;
+
+ psdev = pcistub_device_alloc(dev);
+ if (!psdev)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ if (initialize_devices) {
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ /* don't want irqs disabled when calling pcistub_init_device */
+ err = pcistub_init_device(psdev->dev);
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ if (!err)
+ list_add(&psdev->dev_list, &pcistub_devices);
+ } else {
+ dev_dbg(&dev->dev, "deferring initialization\n");
+ list_add(&psdev->dev_list, &seized_devices);
+ }
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ if (err)
+ pcistub_device_put(psdev);
+
+ return err;
+}
+
+static int __devinit pcistub_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int err = 0;
+
+ dev_dbg(&dev->dev, "probing...\n");
+
+ if (pcistub_match(dev)) {
+
+ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
+ && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+ dev_err(&dev->dev, "can't export pci devices that "
+ "don't have a normal (0) or bridge (1) "
+ "header type!\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ dev_info(&dev->dev, "seizing device\n");
+ err = pcistub_seize(dev);
+ } else
+ /* Didn't find the device */
+ err = -ENODEV;
+
+out:
+ return err;
+}
+
+static void pcistub_remove(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev, *found_psdev = NULL;
+ unsigned long flags;
+
+ dev_dbg(&dev->dev, "removing\n");
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+
+ xen_pcibk_config_quirk_release(dev);
+
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (psdev->dev == dev) {
+ found_psdev = psdev;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ if (found_psdev) {
+ dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
+ found_psdev->pdev);
+
+ if (found_psdev->pdev) {
+ printk(KERN_WARNING DRV_NAME ": ****** removing device "
+ "%s while still in-use! ******\n",
+ pci_name(found_psdev->dev));
+ printk(KERN_WARNING DRV_NAME ": ****** driver domain may"
+ " still access this device's i/o resources!\n");
+ printk(KERN_WARNING DRV_NAME ": ****** shutdown driver "
+ "domain before binding device\n");
+ printk(KERN_WARNING DRV_NAME ": ****** to other drivers "
+ "or domains\n");
+
+ xen_pcibk_release_pci_dev(found_psdev->pdev,
+ found_psdev->dev);
+ }
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+ list_del(&found_psdev->dev_list);
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+
+ /* the final put for releasing from the list */
+ pcistub_device_put(found_psdev);
+ }
+}
+
+static DEFINE_PCI_DEVICE_TABLE(pcistub_ids) = {
+ {
+ .vendor = PCI_ANY_ID,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
+ {0,},
+};
+
+#define PCI_NODENAME_MAX 40
+static void kill_domain_by_device(struct pcistub_device *psdev)
+{
+ struct xenbus_transaction xbt;
+ int err;
+ char nodename[PCI_NODENAME_MAX];
+
+ if (!psdev)
+ dev_err(&psdev->dev->dev,
+ "device is NULL when do AER recovery/kill_domain\n");
+ snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
+ psdev->pdev->xdev->otherend_id);
+ nodename[strlen(nodename)] = '\0';
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ dev_err(&psdev->dev->dev,
+ "error %d when start xenbus transaction\n", err);
+ return;
+ }
+ /*PV AER handlers will set this flag*/
+ xenbus_printf(xbt, nodename, "aerState" , "aerfail");
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ dev_err(&psdev->dev->dev,
+ "error %d when end xenbus transaction\n", err);
+ return;
+ }
+}
+
+/* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
+ * backend need to have cooperation. In xen_pcibk, those steps will do similar
+ * jobs: send service request and waiting for front_end response.
+*/
+static pci_ers_result_t common_process(struct pcistub_device *psdev,
+ pci_channel_state_t state, int aer_cmd,
+ pci_ers_result_t result)
+{
+ pci_ers_result_t res = result;
+ struct xen_pcie_aer_op *aer_op;
+ int ret;
+
+ /*with PV AER drivers*/
+ aer_op = &(psdev->pdev->sh_info->aer_op);
+ aer_op->cmd = aer_cmd ;
+ /*useful for error_detected callback*/
+ aer_op->err = state;
+ /*pcifront_end BDF*/
+ ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
+ &aer_op->domain, &aer_op->bus, &aer_op->devfn);
+ if (!ret) {
+ dev_err(&psdev->dev->dev,
+ DRV_NAME ": failed to get pcifront device\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+ wmb();
+
+ dev_dbg(&psdev->dev->dev,
+ DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n",
+ aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
+ /*local flag to mark there's aer request, xen_pcibk callback will use
+ * this flag to judge whether we need to check pci-front give aer
+ * service ack signal
+ */
+ set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
+
+ /*It is possible that a pcifront conf_read_write ops request invokes
+ * the callback which cause the spurious execution of wake_up.
+ * Yet it is harmless and better than a spinlock here
+ */
+ set_bit(_XEN_PCIB_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags);
+ wmb();
+ notify_remote_via_irq(psdev->pdev->evtchn_irq);
+
+ ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
+ !(test_bit(_XEN_PCIB_active, (unsigned long *)
+ &psdev->pdev->sh_info->flags)), 300*HZ);
+
+ if (!ret) {
+ if (test_bit(_XEN_PCIB_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_err(&psdev->dev->dev,
+ "pcifront aer process not responding!\n");
+ clear_bit(_XEN_PCIB_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags);
+ aer_op->err = PCI_ERS_RESULT_NONE;
+ return res;
+ }
+ }
+ clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
+
+ if (test_bit(_XEN_PCIF_active,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_dbg(&psdev->dev->dev,
+ "schedule pci_conf service in xen_pcibk\n");
+ xen_pcibk_test_and_schedule_op(psdev->pdev);
+ }
+
+ res = (pci_ers_result_t)aer_op->err;
+ return res;
+}
+
+/*
+* xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
+* of the device driver could provide this service, and then wait for pcifront
+* ack.
+* @dev: pointer to PCI devices
+* return value is used by aer_core do_recovery policy
+*/
+static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+ pci_ers_result_t result;
+
+ result = PCI_ERS_RESULT_RECOVERED;
+ dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+
+ if (!psdev || !psdev->pdev) {
+ dev_err(&dev->dev,
+ DRV_NAME " device is not found/assigned\n");
+ goto end;
+ }
+
+ if (!psdev->pdev->sh_info) {
+ dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ " by HVM, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+
+ if (!test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_err(&dev->dev,
+ "guest with no AER driver should have been killed\n");
+ goto release;
+ }
+ result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
+
+ if (result == PCI_ERS_RESULT_NONE ||
+ result == PCI_ERS_RESULT_DISCONNECT) {
+ dev_dbg(&dev->dev,
+ "No AER slot_reset service or disconnected!\n");
+ kill_domain_by_device(psdev);
+ }
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return result;
+
+}
+
+
+/*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
+* in case of the device driver could provide this service, and then wait
+* for pcifront ack
+* @dev: pointer to PCI devices
+* return value is used by aer_core do_recovery policy
+*/
+
+static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+ pci_ers_result_t result;
+
+ result = PCI_ERS_RESULT_RECOVERED;
+ dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+
+ if (!psdev || !psdev->pdev) {
+ dev_err(&dev->dev,
+ DRV_NAME " device is not found/assigned\n");
+ goto end;
+ }
+
+ if (!psdev->pdev->sh_info) {
+ dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ " by HVM, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+
+ if (!test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_err(&dev->dev,
+ "guest with no AER driver should have been killed\n");
+ goto release;
+ }
+ result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
+
+ if (result == PCI_ERS_RESULT_NONE ||
+ result == PCI_ERS_RESULT_DISCONNECT) {
+ dev_dbg(&dev->dev,
+ "No AER mmio_enabled service or disconnected!\n");
+ kill_domain_by_device(psdev);
+ }
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return result;
+}
+
+/*xen_pcibk_error_detected: it will send the error_detected request to pcifront
+* in case of the device driver could provide this service, and then wait
+* for pcifront ack.
+* @dev: pointer to PCI devices
+* @error: the current PCI connection state
+* return value is used by aer_core do_recovery policy
+*/
+
+static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
+ pci_channel_state_t error)
+{
+ struct pcistub_device *psdev;
+ pci_ers_result_t result;
+
+ result = PCI_ERS_RESULT_CAN_RECOVER;
+ dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+
+ if (!psdev || !psdev->pdev) {
+ dev_err(&dev->dev,
+ DRV_NAME " device is not found/assigned\n");
+ goto end;
+ }
+
+ if (!psdev->pdev->sh_info) {
+ dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ " by HVM, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+
+ /*Guest owns the device yet no aer handler regiested, kill guest*/
+ if (!test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+ result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
+
+ if (result == PCI_ERS_RESULT_NONE ||
+ result == PCI_ERS_RESULT_DISCONNECT) {
+ dev_dbg(&dev->dev,
+ "No AER error_detected service or disconnected!\n");
+ kill_domain_by_device(psdev);
+ }
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return result;
+}
+
+/*xen_pcibk_error_resume: it will send the error_resume request to pcifront
+* in case of the device driver could provide this service, and then wait
+* for pcifront ack.
+* @dev: pointer to PCI devices
+*/
+
+static void xen_pcibk_error_resume(struct pci_dev *dev)
+{
+ struct pcistub_device *psdev;
+
+ dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
+ dev->bus->number, dev->devfn);
+
+ down_write(&pcistub_sem);
+ psdev = pcistub_device_find(pci_domain_nr(dev->bus),
+ dev->bus->number,
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+
+ if (!psdev || !psdev->pdev) {
+ dev_err(&dev->dev,
+ DRV_NAME " device is not found/assigned\n");
+ goto end;
+ }
+
+ if (!psdev->pdev->sh_info) {
+ dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
+ " by HVM, kill it\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+
+ if (!test_bit(_XEN_PCIB_AERHANDLER,
+ (unsigned long *)&psdev->pdev->sh_info->flags)) {
+ dev_err(&dev->dev,
+ "guest with no AER driver should have been killed\n");
+ kill_domain_by_device(psdev);
+ goto release;
+ }
+ common_process(psdev, 1, XEN_PCI_OP_aer_resume,
+ PCI_ERS_RESULT_RECOVERED);
+release:
+ pcistub_device_put(psdev);
+end:
+ up_write(&pcistub_sem);
+ return;
+}
+
+/*add xen_pcibk AER handling*/
+static struct pci_error_handlers xen_pcibk_error_handler = {
+ .error_detected = xen_pcibk_error_detected,
+ .mmio_enabled = xen_pcibk_mmio_enabled,
+ .slot_reset = xen_pcibk_slot_reset,
+ .resume = xen_pcibk_error_resume,
+};
+
+/*
+ * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
+ * for a normal device. I don't want it to be loaded automatically.
+ */
+
+static struct pci_driver xen_pcibk_pci_driver = {
+ /* The name should be xen_pciback, but until the tools are updated
+ * we will keep it as pciback. */
+ .name = "pciback",
+ .id_table = pcistub_ids,
+ .probe = pcistub_probe,
+ .remove = pcistub_remove,
+ .err_handler = &xen_pcibk_error_handler,
+};
+
+static inline int str_to_slot(const char *buf, int *domain, int *bus,
+ int *slot, int *func)
+{
+ int err;
+
+ err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
+ if (err == 4)
+ return 0;
+ else if (err < 0)
+ return -EINVAL;
+
+ /* try again without domain */
+ *domain = 0;
+ err = sscanf(buf, " %x:%x.%x", bus, slot, func);
+ if (err == 3)
+ return 0;
+
+ return -EINVAL;
+}
+
+static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
+ *slot, int *func, int *reg, int *size, int *mask)
+{
+ int err;
+
+ err =
+ sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot,
+ func, reg, size, mask);
+ if (err == 7)
+ return 0;
+ return -EINVAL;
+}
+
+static int pcistub_device_id_add(int domain, int bus, int slot, int func)
+{
+ struct pcistub_device_id *pci_dev_id;
+ unsigned long flags;
+
+ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
+ if (!pci_dev_id)
+ return -ENOMEM;
+
+ pci_dev_id->domain = domain;
+ pci_dev_id->bus = bus;
+ pci_dev_id->devfn = PCI_DEVFN(slot, func);
+
+ pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%01x\n",
+ domain, bus, slot, func);
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+ list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ return 0;
+}
+
+static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
+{
+ struct pcistub_device_id *pci_dev_id, *t;
+ int devfn = PCI_DEVFN(slot, func);
+ int err = -ENOENT;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+ list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
+ slot_list) {
+ if (pci_dev_id->domain == domain
+ && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
+ /* Don't break; here because it's possible the same
+ * slot could be in the list more than once
+ */
+ list_del(&pci_dev_id->slot_list);
+ kfree(pci_dev_id);
+
+ err = 0;
+
+ pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%01x from "
+ "seize list\n", domain, bus, slot, func);
+ }
+ }
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ return err;
+}
+
+static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
+ int size, int mask)
+{
+ int err = 0;
+ struct pcistub_device *psdev;
+ struct pci_dev *dev;
+ struct config_field *field;
+
+ psdev = pcistub_device_find(domain, bus, slot, func);
+ if (!psdev || !psdev->dev) {
+ err = -ENODEV;
+ goto out;
+ }
+ dev = psdev->dev;
+
+ field = kzalloc(sizeof(*field), GFP_ATOMIC);
+ if (!field) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ field->offset = reg;
+ field->size = size;
+ field->mask = mask;
+ field->init = NULL;
+ field->reset = NULL;
+ field->release = NULL;
+ field->clean = xen_pcibk_config_field_free;
+
+ err = xen_pcibk_config_quirks_add_field(dev, field);
+ if (err)
+ kfree(field);
+out:
+ return err;
+}
+
+static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int domain, bus, slot, func;
+ int err;
+
+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
+ if (err)
+ goto out;
+
+ err = pcistub_device_id_add(domain, bus, slot, func);
+
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+
+DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
+
+static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int domain, bus, slot, func;
+ int err;
+
+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
+ if (err)
+ goto out;
+
+ err = pcistub_device_id_remove(domain, bus, slot, func);
+
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+
+DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
+
+static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
+{
+ struct pcistub_device_id *pci_dev_id;
+ size_t count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+ list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
+ if (count >= PAGE_SIZE)
+ break;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "%04x:%02x:%02x.%01x\n",
+ pci_dev_id->domain, pci_dev_id->bus,
+ PCI_SLOT(pci_dev_id->devfn),
+ PCI_FUNC(pci_dev_id->devfn));
+ }
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ return count;
+}
+
+DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
+
+static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
+{
+ struct pcistub_device *psdev;
+ struct xen_pcibk_dev_data *dev_data;
+ size_t count = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (count >= PAGE_SIZE)
+ break;
+ if (!psdev->dev)
+ continue;
+ dev_data = pci_get_drvdata(psdev->dev);
+ if (!dev_data)
+ continue;
+ count +=
+ scnprintf(buf + count, PAGE_SIZE - count,
+ "%s:%s:%sing:%ld\n",
+ pci_name(psdev->dev),
+ dev_data->isr_on ? "on" : "off",
+ dev_data->ack_intr ? "ack" : "not ack",
+ dev_data->handled);
+ }
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return count;
+}
+
+DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
+
+static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
+ const char *buf,
+ size_t count)
+{
+ struct pcistub_device *psdev;
+ struct xen_pcibk_dev_data *dev_data;
+ int domain, bus, slot, func;
+ int err = -ENOENT;
+
+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
+ if (err)
+ goto out;
+
+ psdev = pcistub_device_find(domain, bus, slot, func);
+
+ if (!psdev)
+ goto out;
+
+ dev_data = pci_get_drvdata(psdev->dev);
+ if (!dev_data)
+ goto out;
+
+ dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
+ dev_data->irq_name, dev_data->isr_on,
+ !dev_data->isr_on);
+
+ dev_data->isr_on = !(dev_data->isr_on);
+ if (dev_data->isr_on)
+ dev_data->ack_intr = 1;
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL, pcistub_irq_handler_switch);
+
+static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int domain, bus, slot, func, reg, size, mask;
+ int err;
+
+ err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
+ &mask);
+ if (err)
+ goto out;
+
+ err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
+
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+
+static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
+{
+ int count = 0;
+ unsigned long flags;
+ struct xen_pcibk_config_quirk *quirk;
+ struct xen_pcibk_dev_data *dev_data;
+ const struct config_field *field;
+ const struct config_field_entry *cfg_entry;
+
+ spin_lock_irqsave(&device_ids_lock, flags);
+ list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
+ if (count >= PAGE_SIZE)
+ goto out;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
+ quirk->pdev->bus->number,
+ PCI_SLOT(quirk->pdev->devfn),
+ PCI_FUNC(quirk->pdev->devfn),
+ quirk->devid.vendor, quirk->devid.device,
+ quirk->devid.subvendor,
+ quirk->devid.subdevice);
+
+ dev_data = pci_get_drvdata(quirk->pdev);
+
+ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
+ field = cfg_entry->field;
+ if (count >= PAGE_SIZE)
+ goto out;
+
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "\t\t%08x:%01x:%08x\n",
+ cfg_entry->base_offset +
+ field->offset, field->size,
+ field->mask);
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&device_ids_lock, flags);
+
+ return count;
+}
+
+DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
+
+static ssize_t permissive_add(struct device_driver *drv, const char *buf,
+ size_t count)
+{
+ int domain, bus, slot, func;
+ int err;
+ struct pcistub_device *psdev;
+ struct xen_pcibk_dev_data *dev_data;
+ err = str_to_slot(buf, &domain, &bus, &slot, &func);
+ if (err)
+ goto out;
+ psdev = pcistub_device_find(domain, bus, slot, func);
+ if (!psdev) {
+ err = -ENODEV;
+ goto out;
+ }
+ if (!psdev->dev) {
+ err = -ENODEV;
+ goto release;
+ }
+ dev_data = pci_get_drvdata(psdev->dev);
+ /* the driver data for a device should never be null at this point */
+ if (!dev_data) {
+ err = -ENXIO;
+ goto release;
+ }
+ if (!dev_data->permissive) {
+ dev_data->permissive = 1;
+ /* Let user know that what they're doing could be unsafe */
+ dev_warn(&psdev->dev->dev, "enabling permissive mode "
+ "configuration space accesses!\n");
+ dev_warn(&psdev->dev->dev,
+ "permissive mode is potentially unsafe!\n");
+ }
+release:
+ pcistub_device_put(psdev);
+out:
+ if (!err)
+ err = count;
+ return err;
+}
+
+static ssize_t permissive_show(struct device_driver *drv, char *buf)
+{
+ struct pcistub_device *psdev;
+ struct xen_pcibk_dev_data *dev_data;
+ size_t count = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&pcistub_devices_lock, flags);
+ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
+ if (count >= PAGE_SIZE)
+ break;
+ if (!psdev->dev)
+ continue;
+ dev_data = pci_get_drvdata(psdev->dev);
+ if (!dev_data || !dev_data->permissive)
+ continue;
+ count +=
+ scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
+ pci_name(psdev->dev));
+ }
+ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
+ return count;
+}
+
+DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
+
+static void pcistub_exit(void)
+{
+ driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
+ driver_remove_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_remove_slot);
+ driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
+ driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
+ driver_remove_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_permissive);
+ driver_remove_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_irq_handlers);
+ driver_remove_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_irq_handler_state);
+ pci_unregister_driver(&xen_pcibk_pci_driver);
+}
+
+static int __init pcistub_init(void)
+{
+ int pos = 0;
+ int err = 0;
+ int domain, bus, slot, func;
+ int parsed;
+
+ if (pci_devs_to_hide && *pci_devs_to_hide) {
+ do {
+ parsed = 0;
+
+ err = sscanf(pci_devs_to_hide + pos,
+ " (%x:%x:%x.%x) %n",
+ &domain, &bus, &slot, &func, &parsed);
+ if (err != 4) {
+ domain = 0;
+ err = sscanf(pci_devs_to_hide + pos,
+ " (%x:%x.%x) %n",
+ &bus, &slot, &func, &parsed);
+ if (err != 3)
+ goto parse_error;
+ }
+
+ err = pcistub_device_id_add(domain, bus, slot, func);
+ if (err)
+ goto out;
+
+ /* if parsed<=0, we've reached the end of the string */
+ pos += parsed;
+ } while (parsed > 0 && pci_devs_to_hide[pos]);
+ }
+
+ /* If we're the first PCI Device Driver to register, we're the
+ * first one to get offered PCI devices as they become
+ * available (and thus we can be the first to grab them)
+ */
+ err = pci_register_driver(&xen_pcibk_pci_driver);
+ if (err < 0)
+ goto out;
+
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_new_slot);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_remove_slot);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_slots);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_quirks);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_permissive);
+
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_irq_handlers);
+ if (!err)
+ err = driver_create_file(&xen_pcibk_pci_driver.driver,
+ &driver_attr_irq_handler_state);
+ if (err)
+ pcistub_exit();
+
+out:
+ return err;
+
+parse_error:
+ printk(KERN_ERR DRV_NAME ": Error parsing pci_devs_to_hide at \"%s\"\n",
+ pci_devs_to_hide + pos);
+ return -EINVAL;
+}
+
+#ifndef MODULE
+/*
+ * fs_initcall happens before device_initcall
+ * so xen_pcibk *should* get called first (b/c we
+ * want to suck up any device before other drivers
+ * get a chance by being the first pci device
+ * driver to register)
+ */
+fs_initcall(pcistub_init);
+#endif
+
+static int __init xen_pcibk_init(void)
+{
+ int err;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ err = xen_pcibk_config_init();
+ if (err)
+ return err;
+
+#ifdef MODULE
+ err = pcistub_init();
+ if (err < 0)
+ return err;
+#endif
+
+ pcistub_init_devices_late();
+ err = xen_pcibk_xenbus_register();
+ if (err)
+ pcistub_exit();
+
+ return err;
+}
+
+static void __exit xen_pcibk_cleanup(void)
+{
+ xen_pcibk_xenbus_unregister();
+ pcistub_exit();
+}
+
+module_init(xen_pcibk_init);
+module_exit(xen_pcibk_cleanup);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
new file mode 100644
index 00000000000..a0e131a8150
--- /dev/null
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -0,0 +1,183 @@
+/*
+ * PCI Backend Common Data Structures & Function Declarations
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#ifndef __XEN_PCIBACK_H__
+#define __XEN_PCIBACK_H__
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <xen/xenbus.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <xen/interface/io/pciif.h>
+
+struct pci_dev_entry {
+ struct list_head list;
+ struct pci_dev *dev;
+};
+
+#define _PDEVF_op_active (0)
+#define PDEVF_op_active (1<<(_PDEVF_op_active))
+#define _PCIB_op_pending (1)
+#define PCIB_op_pending (1<<(_PCIB_op_pending))
+
+struct xen_pcibk_device {
+ void *pci_dev_data;
+ spinlock_t dev_lock;
+ struct xenbus_device *xdev;
+ struct xenbus_watch be_watch;
+ u8 be_watching;
+ int evtchn_irq;
+ struct xen_pci_sharedinfo *sh_info;
+ unsigned long flags;
+ struct work_struct op_work;
+};
+
+struct xen_pcibk_dev_data {
+ struct list_head config_fields;
+ unsigned int permissive:1;
+ unsigned int warned_on_write:1;
+ unsigned int enable_intx:1;
+ unsigned int isr_on:1; /* Whether the IRQ handler is installed. */
+ unsigned int ack_intr:1; /* .. and ACK-ing */
+ unsigned long handled;
+ unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
+ char irq_name[0]; /* xen-pcibk[000:04:00.0] */
+};
+
+/* Used by XenBus and xen_pcibk_ops.c */
+extern wait_queue_head_t xen_pcibk_aer_wait_queue;
+extern struct workqueue_struct *xen_pcibk_wq;
+/* Used by pcistub.c and conf_space_quirks.c */
+extern struct list_head xen_pcibk_quirks;
+
+/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
+struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
+ int domain, int bus,
+ int slot, int func);
+struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev);
+void pcistub_put_pci_dev(struct pci_dev *dev);
+
+/* Ensure a device is turned off or reset */
+void xen_pcibk_reset_device(struct pci_dev *pdev);
+
+/* Access a virtual configuration space for a PCI device */
+int xen_pcibk_config_init(void);
+int xen_pcibk_config_init_dev(struct pci_dev *dev);
+void xen_pcibk_config_free_dyn_fields(struct pci_dev *dev);
+void xen_pcibk_config_reset_dev(struct pci_dev *dev);
+void xen_pcibk_config_free_dev(struct pci_dev *dev);
+int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
+ u32 *ret_val);
+int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size,
+ u32 value);
+
+/* Handle requests for specific devices from the frontend */
+typedef int (*publish_pci_dev_cb) (struct xen_pcibk_device *pdev,
+ unsigned int domain, unsigned int bus,
+ unsigned int devfn, unsigned int devid);
+typedef int (*publish_pci_root_cb) (struct xen_pcibk_device *pdev,
+ unsigned int domain, unsigned int bus);
+
+/* Backend registration for the two types of BDF representation:
+ * vpci - BDFs start at 00
+ * passthrough - BDFs are exactly like in the host.
+ */
+struct xen_pcibk_backend {
+ char *name;
+ int (*init)(struct xen_pcibk_device *pdev);
+ void (*free)(struct xen_pcibk_device *pdev);
+ int (*find)(struct pci_dev *pcidev, struct xen_pcibk_device *pdev,
+ unsigned int *domain, unsigned int *bus,
+ unsigned int *devfn);
+ int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb);
+ void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev);
+ int (*add)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
+ int devid, publish_pci_dev_cb publish_cb);
+ struct pci_dev *(*get)(struct xen_pcibk_device *pdev,
+ unsigned int domain, unsigned int bus,
+ unsigned int devfn);
+};
+
+extern struct xen_pcibk_backend xen_pcibk_vpci_backend;
+extern struct xen_pcibk_backend xen_pcibk_passthrough_backend;
+extern struct xen_pcibk_backend *xen_pcibk_backend;
+
+static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev,
+ int devid,
+ publish_pci_dev_cb publish_cb)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->add)
+ return xen_pcibk_backend->add(pdev, dev, devid, publish_cb);
+ return -1;
+};
+static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->free)
+ return xen_pcibk_backend->release(pdev, dev);
+};
+
+static inline struct pci_dev *
+xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain,
+ unsigned int bus, unsigned int devfn)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->get)
+ return xen_pcibk_backend->get(pdev, domain, bus, devfn);
+ return NULL;
+};
+/**
+* Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk
+* before sending aer request to pcifront, so that guest could identify
+* device, coopearte with xen_pcibk to finish aer recovery job if device driver
+* has the capability
+*/
+static inline int xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
+ struct xen_pcibk_device *pdev,
+ unsigned int *domain,
+ unsigned int *bus,
+ unsigned int *devfn)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->find)
+ return xen_pcibk_backend->find(pcidev, pdev, domain, bus,
+ devfn);
+ return -1;
+};
+static inline int xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->init)
+ return xen_pcibk_backend->init(pdev);
+ return -1;
+};
+static inline int xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
+ publish_pci_root_cb cb)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->publish)
+ return xen_pcibk_backend->publish(pdev, cb);
+ return -1;
+};
+static inline void xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
+{
+ if (xen_pcibk_backend && xen_pcibk_backend->free)
+ return xen_pcibk_backend->free(pdev);
+};
+/* Handles events from front-end */
+irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id);
+void xen_pcibk_do_op(struct work_struct *data);
+
+int xen_pcibk_xenbus_register(void);
+void xen_pcibk_xenbus_unregister(void);
+
+extern int verbose_request;
+
+void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev);
+#endif
+
+/* Handles shared IRQs that can to device domain and control domain. */
+void xen_pcibk_irq_handler(struct pci_dev *dev, int reset);
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
new file mode 100644
index 00000000000..8c95c3415b7
--- /dev/null
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -0,0 +1,384 @@
+/*
+ * PCI Backend Operations - respond to PCI requests from Frontend
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <xen/events.h>
+#include <linux/sched.h>
+#include "pciback.h"
+
+#define DRV_NAME "xen-pciback"
+int verbose_request;
+module_param(verbose_request, int, 0644);
+
+static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id);
+
+/* Ensure a device is has the fake IRQ handler "turned on/off" and is
+ * ready to be exported. This MUST be run after xen_pcibk_reset_device
+ * which does the actual PCI device enable/disable.
+ */
+static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ int rc;
+ int enable = 0;
+
+ dev_data = pci_get_drvdata(dev);
+ if (!dev_data)
+ return;
+
+ /* We don't deal with bridges */
+ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
+ return;
+
+ if (reset) {
+ dev_data->enable_intx = 0;
+ dev_data->ack_intr = 0;
+ }
+ enable = dev_data->enable_intx;
+
+ /* Asked to disable, but ISR isn't runnig */
+ if (!enable && !dev_data->isr_on)
+ return;
+
+ /* Squirrel away the IRQs in the dev_data. We need this
+ * b/c when device transitions to MSI, the dev->irq is
+ * overwritten with the MSI vector.
+ */
+ if (enable)
+ dev_data->irq = dev->irq;
+
+ /*
+ * SR-IOV devices in all use MSI-X and have no legacy
+ * interrupts, so inhibit creating a fake IRQ handler for them.
+ */
+ if (dev_data->irq == 0)
+ goto out;
+
+ dev_dbg(&dev->dev, "%s: #%d %s %s%s %s-> %s\n",
+ dev_data->irq_name,
+ dev_data->irq,
+ pci_is_enabled(dev) ? "on" : "off",
+ dev->msi_enabled ? "MSI" : "",
+ dev->msix_enabled ? "MSI/X" : "",
+ dev_data->isr_on ? "enable" : "disable",
+ enable ? "enable" : "disable");
+
+ if (enable) {
+ rc = request_irq(dev_data->irq,
+ xen_pcibk_guest_interrupt, IRQF_SHARED,
+ dev_data->irq_name, dev);
+ if (rc) {
+ dev_err(&dev->dev, "%s: failed to install fake IRQ " \
+ "handler for IRQ %d! (rc:%d)\n",
+ dev_data->irq_name, dev_data->irq, rc);
+ goto out;
+ }
+ } else {
+ free_irq(dev_data->irq, dev);
+ dev_data->irq = 0;
+ }
+ dev_data->isr_on = enable;
+ dev_data->ack_intr = enable;
+out:
+ dev_dbg(&dev->dev, "%s: #%d %s %s%s %s\n",
+ dev_data->irq_name,
+ dev_data->irq,
+ pci_is_enabled(dev) ? "on" : "off",
+ dev->msi_enabled ? "MSI" : "",
+ dev->msix_enabled ? "MSI/X" : "",
+ enable ? (dev_data->isr_on ? "enabled" : "failed to enable") :
+ (dev_data->isr_on ? "failed to disable" : "disabled"));
+}
+
+/* Ensure a device is "turned off" and ready to be exported.
+ * (Also see xen_pcibk_config_reset to ensure virtual configuration space is
+ * ready to be re-exported)
+ */
+void xen_pcibk_reset_device(struct pci_dev *dev)
+{
+ u16 cmd;
+
+ xen_pcibk_control_isr(dev, 1 /* reset device */);
+
+ /* Disable devices (but not bridges) */
+ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
+#ifdef CONFIG_PCI_MSI
+ /* The guest could have been abruptly killed without
+ * disabling MSI/MSI-X interrupts.*/
+ if (dev->msix_enabled)
+ pci_disable_msix(dev);
+ if (dev->msi_enabled)
+ pci_disable_msi(dev);
+#endif
+ pci_disable_device(dev);
+
+ pci_write_config_word(dev, PCI_COMMAND, 0);
+
+ dev->is_busmaster = 0;
+ } else {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & (PCI_COMMAND_INVALIDATE)) {
+ cmd &= ~(PCI_COMMAND_INVALIDATE);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ dev->is_busmaster = 0;
+ }
+ }
+}
+
+#ifdef CONFIG_PCI_MSI
+static
+int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, struct xen_pci_op *op)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ int otherend = pdev->xdev->otherend_id;
+ int status;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
+
+ status = pci_enable_msi(dev);
+
+ if (status) {
+ printk(KERN_ERR "error enable msi for guest %x status %x\n",
+ otherend, status);
+ op->value = 0;
+ return XEN_PCI_ERR_op_failed;
+ }
+
+ /* The value the guest needs is actually the IDT vector, not the
+ * the local domain's IRQ number. */
+
+ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
+ op->value);
+
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 0;
+
+ return 0;
+}
+
+static
+int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, struct xen_pci_op *op)
+{
+ struct xen_pcibk_dev_data *dev_data;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
+ pci_name(dev));
+ pci_disable_msi(dev);
+
+ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
+ op->value);
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 1;
+ return 0;
+}
+
+static
+int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, struct xen_pci_op *op)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ int i, result;
+ struct msix_entry *entries;
+
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
+ pci_name(dev));
+ if (op->value > SH_INFO_MAX_VEC)
+ return -EINVAL;
+
+ entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
+ if (entries == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < op->value; i++) {
+ entries[i].entry = op->msix_entries[i].entry;
+ entries[i].vector = op->msix_entries[i].vector;
+ }
+
+ result = pci_enable_msix(dev, entries, op->value);
+
+ if (result == 0) {
+ for (i = 0; i < op->value; i++) {
+ op->msix_entries[i].entry = entries[i].entry;
+ if (entries[i].vector)
+ op->msix_entries[i].vector =
+ xen_pirq_from_irq(entries[i].vector);
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: " \
+ "MSI-X[%d]: %d\n",
+ pci_name(dev), i,
+ op->msix_entries[i].vector);
+ }
+ } else {
+ printk(KERN_WARNING DRV_NAME ": %s: failed to enable MSI-X: err %d!\n",
+ pci_name(dev), result);
+ }
+ kfree(entries);
+
+ op->value = result;
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 0;
+
+ return result;
+}
+
+static
+int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, struct xen_pci_op *op)
+{
+ struct xen_pcibk_dev_data *dev_data;
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
+ pci_name(dev));
+ pci_disable_msix(dev);
+
+ /*
+ * SR-IOV devices (which don't have any legacy IRQ) have
+ * an undefined IRQ value of zero.
+ */
+ op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
+ if (unlikely(verbose_request))
+ printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev),
+ op->value);
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 1;
+ return 0;
+}
+#endif
+/*
+* Now the same evtchn is used for both pcifront conf_read_write request
+* as well as pcie aer front end ack. We use a new work_queue to schedule
+* xen_pcibk conf_read_write service for avoiding confict with aer_core
+* do_recovery job which also use the system default work_queue
+*/
+void xen_pcibk_test_and_schedule_op(struct xen_pcibk_device *pdev)
+{
+ /* Check that frontend is requesting an operation and that we are not
+ * already processing a request */
+ if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
+ && !test_and_set_bit(_PDEVF_op_active, &pdev->flags)) {
+ queue_work(xen_pcibk_wq, &pdev->op_work);
+ }
+ /*_XEN_PCIB_active should have been cleared by pcifront. And also make
+ sure xen_pcibk is waiting for ack by checking _PCIB_op_pending*/
+ if (!test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)
+ && test_bit(_PCIB_op_pending, &pdev->flags)) {
+ wake_up(&xen_pcibk_aer_wait_queue);
+ }
+}
+
+/* Performing the configuration space reads/writes must not be done in atomic
+ * context because some of the pci_* functions can sleep (mostly due to ACPI
+ * use of semaphores). This function is intended to be called from a work
+ * queue in process context taking a struct xen_pcibk_device as a parameter */
+
+void xen_pcibk_do_op(struct work_struct *data)
+{
+ struct xen_pcibk_device *pdev =
+ container_of(data, struct xen_pcibk_device, op_work);
+ struct pci_dev *dev;
+ struct xen_pcibk_dev_data *dev_data = NULL;
+ struct xen_pci_op *op = &pdev->sh_info->op;
+ int test_intx = 0;
+
+ dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
+
+ if (dev == NULL)
+ op->err = XEN_PCI_ERR_dev_not_found;
+ else {
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ test_intx = dev_data->enable_intx;
+ switch (op->cmd) {
+ case XEN_PCI_OP_conf_read:
+ op->err = xen_pcibk_config_read(dev,
+ op->offset, op->size, &op->value);
+ break;
+ case XEN_PCI_OP_conf_write:
+ op->err = xen_pcibk_config_write(dev,
+ op->offset, op->size, op->value);
+ break;
+#ifdef CONFIG_PCI_MSI
+ case XEN_PCI_OP_enable_msi:
+ op->err = xen_pcibk_enable_msi(pdev, dev, op);
+ break;
+ case XEN_PCI_OP_disable_msi:
+ op->err = xen_pcibk_disable_msi(pdev, dev, op);
+ break;
+ case XEN_PCI_OP_enable_msix:
+ op->err = xen_pcibk_enable_msix(pdev, dev, op);
+ break;
+ case XEN_PCI_OP_disable_msix:
+ op->err = xen_pcibk_disable_msix(pdev, dev, op);
+ break;
+#endif
+ default:
+ op->err = XEN_PCI_ERR_not_implemented;
+ break;
+ }
+ }
+ if (!op->err && dev && dev_data) {
+ /* Transition detected */
+ if ((dev_data->enable_intx != test_intx))
+ xen_pcibk_control_isr(dev, 0 /* no reset */);
+ }
+ /* Tell the driver domain that we're done. */
+ wmb();
+ clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
+ notify_remote_via_irq(pdev->evtchn_irq);
+
+ /* Mark that we're done. */
+ smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
+ clear_bit(_PDEVF_op_active, &pdev->flags);
+ smp_mb__after_clear_bit(); /* /before/ final check for work */
+
+ /* Check to see if the driver domain tried to start another request in
+ * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active.
+ */
+ xen_pcibk_test_and_schedule_op(pdev);
+}
+
+irqreturn_t xen_pcibk_handle_event(int irq, void *dev_id)
+{
+ struct xen_pcibk_device *pdev = dev_id;
+
+ xen_pcibk_test_and_schedule_op(pdev);
+
+ return IRQ_HANDLED;
+}
+static irqreturn_t xen_pcibk_guest_interrupt(int irq, void *dev_id)
+{
+ struct pci_dev *dev = (struct pci_dev *)dev_id;
+ struct xen_pcibk_dev_data *dev_data = pci_get_drvdata(dev);
+
+ if (dev_data->isr_on && dev_data->ack_intr) {
+ dev_data->handled++;
+ if ((dev_data->handled % 1000) == 0) {
+ if (xen_test_irq_shared(irq)) {
+ printk(KERN_INFO "%s IRQ line is not shared "
+ "with other domains. Turning ISR off\n",
+ dev_data->irq_name);
+ dev_data->ack_intr = 0;
+ }
+ }
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
new file mode 100644
index 00000000000..4a42cfb0959
--- /dev/null
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -0,0 +1,259 @@
+/*
+ * PCI Backend - Provides a Virtual PCI bus (with real devices)
+ * to the frontend
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include "pciback.h"
+
+#define PCI_SLOT_MAX 32
+#define DRV_NAME "xen-pciback"
+
+struct vpci_dev_data {
+ /* Access to dev_list must be protected by lock */
+ struct list_head dev_list[PCI_SLOT_MAX];
+ spinlock_t lock;
+};
+
+static inline struct list_head *list_first(struct list_head *head)
+{
+ return head->next;
+}
+
+static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev,
+ unsigned int domain,
+ unsigned int bus,
+ unsigned int devfn)
+{
+ struct pci_dev_entry *entry;
+ struct pci_dev *dev = NULL;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ unsigned long flags;
+
+ if (domain != 0 || bus != 0)
+ return NULL;
+
+ if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
+ spin_lock_irqsave(&vpci_dev->lock, flags);
+
+ list_for_each_entry(entry,
+ &vpci_dev->dev_list[PCI_SLOT(devfn)],
+ list) {
+ if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
+ dev = entry->dev;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ }
+ return dev;
+}
+
+static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
+{
+ if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
+ && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
+ return 1;
+
+ return 0;
+}
+
+static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev, int devid,
+ publish_pci_dev_cb publish_cb)
+{
+ int err = 0, slot, func = -1;
+ struct pci_dev_entry *t, *dev_entry;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ unsigned long flags;
+
+ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
+ err = -EFAULT;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Can't export bridges on the virtual PCI bus");
+ goto out;
+ }
+
+ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
+ if (!dev_entry) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error adding entry to virtual PCI bus");
+ goto out;
+ }
+
+ dev_entry->dev = dev;
+
+ spin_lock_irqsave(&vpci_dev->lock, flags);
+
+ /* Keep multi-function devices together on the virtual PCI bus */
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ if (!list_empty(&vpci_dev->dev_list[slot])) {
+ t = list_entry(list_first(&vpci_dev->dev_list[slot]),
+ struct pci_dev_entry, list);
+
+ if (match_slot(dev, t->dev)) {
+ pr_info(DRV_NAME ": vpci: %s: "
+ "assign to virtual slot %d func %d\n",
+ pci_name(dev), slot,
+ PCI_FUNC(dev->devfn));
+ list_add_tail(&dev_entry->list,
+ &vpci_dev->dev_list[slot]);
+ func = PCI_FUNC(dev->devfn);
+ goto unlock;
+ }
+ }
+ }
+
+ /* Assign to a new slot on the virtual PCI bus */
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ if (list_empty(&vpci_dev->dev_list[slot])) {
+ printk(KERN_INFO DRV_NAME
+ ": vpci: %s: assign to virtual slot %d\n",
+ pci_name(dev), slot);
+ list_add_tail(&dev_entry->list,
+ &vpci_dev->dev_list[slot]);
+ func = PCI_FUNC(dev->devfn);
+ goto unlock;
+ }
+ }
+
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "No more space on root virtual PCI bus");
+
+unlock:
+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
+
+ /* Publish this device. */
+ if (!err)
+ err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
+
+out:
+ return err;
+}
+
+static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
+ struct pci_dev *dev)
+{
+ int slot;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ struct pci_dev *found_dev = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vpci_dev->lock, flags);
+
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ struct pci_dev_entry *e, *tmp;
+ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
+ list) {
+ if (e->dev == dev) {
+ list_del(&e->list);
+ found_dev = e->dev;
+ kfree(e);
+ goto out;
+ }
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
+
+ if (found_dev)
+ pcistub_put_pci_dev(found_dev);
+}
+
+static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
+{
+ int slot;
+ struct vpci_dev_data *vpci_dev;
+
+ vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
+ if (!vpci_dev)
+ return -ENOMEM;
+
+ spin_lock_init(&vpci_dev->lock);
+
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++)
+ INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
+
+ pdev->pci_dev_data = vpci_dev;
+
+ return 0;
+}
+
+static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev,
+ publish_pci_root_cb publish_cb)
+{
+ /* The Virtual PCI bus has only one root */
+ return publish_cb(pdev, 0, 0);
+}
+
+static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
+{
+ int slot;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ struct pci_dev_entry *e, *tmp;
+ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
+ list) {
+ list_del(&e->list);
+ pcistub_put_pci_dev(e->dev);
+ kfree(e);
+ }
+ }
+
+ kfree(vpci_dev);
+ pdev->pci_dev_data = NULL;
+}
+
+static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev,
+ struct xen_pcibk_device *pdev,
+ unsigned int *domain, unsigned int *bus,
+ unsigned int *devfn)
+{
+ struct pci_dev_entry *entry;
+ struct pci_dev *dev = NULL;
+ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
+ unsigned long flags;
+ int found = 0, slot;
+
+ spin_lock_irqsave(&vpci_dev->lock, flags);
+ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
+ list_for_each_entry(entry,
+ &vpci_dev->dev_list[slot],
+ list) {
+ dev = entry->dev;
+ if (dev && dev->bus->number == pcidev->bus->number
+ && pci_domain_nr(dev->bus) ==
+ pci_domain_nr(pcidev->bus)
+ && dev->devfn == pcidev->devfn) {
+ found = 1;
+ *domain = 0;
+ *bus = 0;
+ *devfn = PCI_DEVFN(slot,
+ PCI_FUNC(pcidev->devfn));
+ }
+ }
+ }
+ spin_unlock_irqrestore(&vpci_dev->lock, flags);
+ return found;
+}
+
+struct xen_pcibk_backend xen_pcibk_vpci_backend = {
+ .name = "vpci",
+ .init = __xen_pcibk_init_devices,
+ .free = __xen_pcibk_release_devices,
+ .find = __xen_pcibk_get_pcifront_dev,
+ .publish = __xen_pcibk_publish_pci_roots,
+ .release = __xen_pcibk_release_pci_dev,
+ .add = __xen_pcibk_add_pci_dev,
+ .get = __xen_pcibk_get_pci_dev,
+};
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
new file mode 100644
index 00000000000..206c4ce030b
--- /dev/null
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -0,0 +1,749 @@
+/*
+ * PCI Backend Xenbus Setup - handles setup with frontend and xend
+ *
+ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <asm/xen/pci.h>
+#include <linux/workqueue.h>
+#include "pciback.h"
+
+#define DRV_NAME "xen-pciback"
+#define INVALID_EVTCHN_IRQ (-1)
+struct workqueue_struct *xen_pcibk_wq;
+
+static int __read_mostly passthrough;
+module_param(passthrough, bool, S_IRUGO);
+MODULE_PARM_DESC(passthrough,
+ "Option to specify how to export PCI topology to guest:\n"\
+ " 0 - (default) Hide the true PCI topology and makes the frontend\n"\
+ " there is a single PCI bus with only the exported devices on it.\n"\
+ " For example, a device at 03:05.0 will be re-assigned to 00:00.0\n"\
+ " while second device at 02:1a.1 will be re-assigned to 00:01.1.\n"\
+ " 1 - Passthrough provides a real view of the PCI topology to the\n"\
+ " frontend (for example, a device at 06:01.b will still appear at\n"\
+ " 06:01.b to the frontend). This is similar to how Xen 2.0.x\n"\
+ " exposed PCI devices to its driver domains. This may be required\n"\
+ " for drivers which depend on finding their hardward in certain\n"\
+ " bus/slot locations.");
+
+static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
+{
+ struct xen_pcibk_device *pdev;
+
+ pdev = kzalloc(sizeof(struct xen_pcibk_device), GFP_KERNEL);
+ if (pdev == NULL)
+ goto out;
+ dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
+
+ pdev->xdev = xdev;
+ dev_set_drvdata(&xdev->dev, pdev);
+
+ spin_lock_init(&pdev->dev_lock);
+
+ pdev->sh_info = NULL;
+ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
+ pdev->be_watching = 0;
+
+ INIT_WORK(&pdev->op_work, xen_pcibk_do_op);
+
+ if (xen_pcibk_init_devices(pdev)) {
+ kfree(pdev);
+ pdev = NULL;
+ }
+out:
+ return pdev;
+}
+
+static void xen_pcibk_disconnect(struct xen_pcibk_device *pdev)
+{
+ spin_lock(&pdev->dev_lock);
+
+ /* Ensure the guest can't trigger our handler before removing devices */
+ if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) {
+ unbind_from_irqhandler(pdev->evtchn_irq, pdev);
+ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
+ }
+ spin_unlock(&pdev->dev_lock);
+
+ /* If the driver domain started an op, make sure we complete it
+ * before releasing the shared memory */
+
+ /* Note, the workqueue does not use spinlocks at all.*/
+ flush_workqueue(xen_pcibk_wq);
+
+ spin_lock(&pdev->dev_lock);
+ if (pdev->sh_info != NULL) {
+ xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_info);
+ pdev->sh_info = NULL;
+ }
+ spin_unlock(&pdev->dev_lock);
+
+}
+
+static void free_pdev(struct xen_pcibk_device *pdev)
+{
+ if (pdev->be_watching) {
+ unregister_xenbus_watch(&pdev->be_watch);
+ pdev->be_watching = 0;
+ }
+
+ xen_pcibk_disconnect(pdev);
+
+ xen_pcibk_release_devices(pdev);
+
+ dev_set_drvdata(&pdev->xdev->dev, NULL);
+ pdev->xdev = NULL;
+
+ kfree(pdev);
+}
+
+static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
+ int remote_evtchn)
+{
+ int err = 0;
+ void *vaddr;
+
+ dev_dbg(&pdev->xdev->dev,
+ "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
+ gnt_ref, remote_evtchn);
+
+ err = xenbus_map_ring_valloc(pdev->xdev, gnt_ref, &vaddr);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error mapping other domain page in ours.");
+ goto out;
+ }
+
+ spin_lock(&pdev->dev_lock);
+ pdev->sh_info = vaddr;
+ spin_unlock(&pdev->dev_lock);
+
+ err = bind_interdomain_evtchn_to_irqhandler(
+ pdev->xdev->otherend_id, remote_evtchn, xen_pcibk_handle_event,
+ 0, DRV_NAME, pdev);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error binding event channel to IRQ");
+ goto out;
+ }
+
+ spin_lock(&pdev->dev_lock);
+ pdev->evtchn_irq = err;
+ spin_unlock(&pdev->dev_lock);
+ err = 0;
+
+ dev_dbg(&pdev->xdev->dev, "Attached!\n");
+out:
+ return err;
+}
+
+static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
+{
+ int err = 0;
+ int gnt_ref, remote_evtchn;
+ char *magic = NULL;
+
+
+ /* Make sure we only do this setup once */
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateInitialised)
+ goto out;
+
+ /* Wait for frontend to state that it has published the configuration */
+ if (xenbus_read_driver_state(pdev->xdev->otherend) !=
+ XenbusStateInitialised)
+ goto out;
+
+ dev_dbg(&pdev->xdev->dev, "Reading frontend config\n");
+
+ err = xenbus_gather(XBT_NIL, pdev->xdev->otherend,
+ "pci-op-ref", "%u", &gnt_ref,
+ "event-channel", "%u", &remote_evtchn,
+ "magic", NULL, &magic, NULL);
+ if (err) {
+ /* If configuration didn't get read correctly, wait longer */
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading configuration from frontend");
+ goto out;
+ }
+
+ if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
+ xenbus_dev_fatal(pdev->xdev, -EFAULT,
+ "version mismatch (%s/%s) with pcifront - "
+ "halting xen_pcibk",
+ magic, XEN_PCI_MAGIC);
+ goto out;
+ }
+
+ err = xen_pcibk_do_attach(pdev, gnt_ref, remote_evtchn);
+ if (err)
+ goto out;
+
+ dev_dbg(&pdev->xdev->dev, "Connecting...\n");
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
+ if (err)
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error switching to connected state!");
+
+ dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
+out:
+
+ kfree(magic);
+
+ return err;
+}
+
+static int xen_pcibk_publish_pci_dev(struct xen_pcibk_device *pdev,
+ unsigned int domain, unsigned int bus,
+ unsigned int devfn, unsigned int devid)
+{
+ int err;
+ int len;
+ char str[64];
+
+ len = snprintf(str, sizeof(str), "vdev-%d", devid);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
+ "%04x:%02x:%02x.%02x", domain, bus,
+ PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+out:
+ return err;
+}
+
+static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
+ int domain, int bus, int slot, int func,
+ int devid)
+{
+ struct pci_dev *dev;
+ int err = 0;
+
+ dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n",
+ domain, bus, slot, func);
+
+ dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func);
+ if (!dev) {
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Couldn't locate PCI device "
+ "(%04x:%02x:%02x.%01x)! "
+ "perhaps already in-use?",
+ domain, bus, slot, func);
+ goto out;
+ }
+
+ err = xen_pcibk_add_pci_dev(pdev, dev, devid,
+ xen_pcibk_publish_pci_dev);
+ if (err)
+ goto out;
+
+ dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
+ if (xen_register_device_domain_owner(dev,
+ pdev->xdev->otherend_id) != 0) {
+ dev_err(&dev->dev, "device has been assigned to another " \
+ "domain! Over-writting the ownership, but beware.\n");
+ xen_unregister_device_domain_owner(dev);
+ xen_register_device_domain_owner(dev, pdev->xdev->otherend_id);
+ }
+
+ /* TODO: It'd be nice to export a bridge and have all of its children
+ * get exported with it. This may be best done in xend (which will
+ * have to calculate resource usage anyway) but we probably want to
+ * put something in here to ensure that if a bridge gets given to a
+ * driver domain, that all devices under that bridge are not given
+ * to other driver domains (as he who controls the bridge can disable
+ * it and stop the other devices from working).
+ */
+out:
+ return err;
+}
+
+static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
+ int domain, int bus, int slot, int func)
+{
+ int err = 0;
+ struct pci_dev *dev;
+
+ dev_dbg(&pdev->xdev->dev, "removing dom %x bus %x slot %x func %x\n",
+ domain, bus, slot, func);
+
+ dev = xen_pcibk_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func));
+ if (!dev) {
+ err = -EINVAL;
+ dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device "
+ "(%04x:%02x:%02x.%01x)! not owned by this domain\n",
+ domain, bus, slot, func);
+ goto out;
+ }
+
+ dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
+ xen_unregister_device_domain_owner(dev);
+
+ xen_pcibk_release_pci_dev(pdev, dev);
+
+out:
+ return err;
+}
+
+static int xen_pcibk_publish_pci_root(struct xen_pcibk_device *pdev,
+ unsigned int domain, unsigned int bus)
+{
+ unsigned int d, b;
+ int i, root_num, len, err;
+ char str[64];
+
+ dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n");
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
+ "root_num", "%d", &root_num);
+ if (err == 0 || err == -ENOENT)
+ root_num = 0;
+ else if (err < 0)
+ goto out;
+
+ /* Verify that we haven't already published this pci root */
+ for (i = 0; i < root_num; i++) {
+ len = snprintf(str, sizeof(str), "root-%d", i);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
+ str, "%x:%x", &d, &b);
+ if (err < 0)
+ goto out;
+ if (err != 2) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (d == domain && b == bus) {
+ err = 0;
+ goto out;
+ }
+ }
+
+ len = snprintf(str, sizeof(str), "root-%d", root_num);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n",
+ root_num, domain, bus);
+
+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
+ "%04x:%02x", domain, bus);
+ if (err)
+ goto out;
+
+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
+ "root_num", "%d", (root_num + 1));
+
+out:
+ return err;
+}
+
+static int xen_pcibk_reconfigure(struct xen_pcibk_device *pdev)
+{
+ int err = 0;
+ int num_devs;
+ int domain, bus, slot, func;
+ int substate;
+ int i, len;
+ char state_str[64];
+ char dev_str[64];
+
+
+ dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
+
+ /* Make sure we only reconfigure once */
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateReconfiguring)
+ goto out;
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
+ &num_devs);
+ if (err != 1) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of devices");
+ goto out;
+ }
+
+ for (i = 0; i < num_devs; i++) {
+ len = snprintf(state_str, sizeof(state_str), "state-%d", i);
+ if (unlikely(len >= (sizeof(state_str) - 1))) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "String overflow while reading "
+ "configuration");
+ goto out;
+ }
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str,
+ "%d", &substate);
+ if (err != 1)
+ substate = XenbusStateUnknown;
+
+ switch (substate) {
+ case XenbusStateInitialising:
+ dev_dbg(&pdev->xdev->dev, "Attaching dev-%d ...\n", i);
+
+ len = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
+ if (unlikely(len >= (sizeof(dev_str) - 1))) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "String overflow while "
+ "reading configuration");
+ goto out;
+ }
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
+ dev_str, "%x:%x:%x.%x",
+ &domain, &bus, &slot, &func);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading device "
+ "configuration");
+ goto out;
+ }
+ if (err != 4) {
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error parsing pci device "
+ "configuration");
+ goto out;
+ }
+
+ err = xen_pcibk_export_device(pdev, domain, bus, slot,
+ func, i);
+ if (err)
+ goto out;
+
+ /* Publish pci roots. */
+ err = xen_pcibk_publish_pci_roots(pdev,
+ xen_pcibk_publish_pci_root);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error while publish PCI root"
+ "buses for frontend");
+ goto out;
+ }
+
+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
+ state_str, "%d",
+ XenbusStateInitialised);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error switching substate of "
+ "dev-%d\n", i);
+ goto out;
+ }
+ break;
+
+ case XenbusStateClosing:
+ dev_dbg(&pdev->xdev->dev, "Detaching dev-%d ...\n", i);
+
+ len = snprintf(dev_str, sizeof(dev_str), "vdev-%d", i);
+ if (unlikely(len >= (sizeof(dev_str) - 1))) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "String overflow while "
+ "reading configuration");
+ goto out;
+ }
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
+ dev_str, "%x:%x:%x.%x",
+ &domain, &bus, &slot, &func);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading device "
+ "configuration");
+ goto out;
+ }
+ if (err != 4) {
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error parsing pci device "
+ "configuration");
+ goto out;
+ }
+
+ err = xen_pcibk_remove_device(pdev, domain, bus, slot,
+ func);
+ if (err)
+ goto out;
+
+ /* TODO: If at some point we implement support for pci
+ * root hot-remove on pcifront side, we'll need to
+ * remove unnecessary xenstore nodes of pci roots here.
+ */
+
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error switching to reconfigured state!");
+ goto out;
+ }
+
+out:
+ return 0;
+}
+
+static void xen_pcibk_frontend_changed(struct xenbus_device *xdev,
+ enum xenbus_state fe_state)
+{
+ struct xen_pcibk_device *pdev = dev_get_drvdata(&xdev->dev);
+
+ dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
+
+ switch (fe_state) {
+ case XenbusStateInitialised:
+ xen_pcibk_attach(pdev);
+ break;
+
+ case XenbusStateReconfiguring:
+ xen_pcibk_reconfigure(pdev);
+ break;
+
+ case XenbusStateConnected:
+ /* pcifront switched its state from reconfiguring to connected.
+ * Then switch to connected state.
+ */
+ xenbus_switch_state(xdev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosing:
+ xen_pcibk_disconnect(pdev);
+ xenbus_switch_state(xdev, XenbusStateClosing);
+ break;
+
+ case XenbusStateClosed:
+ xen_pcibk_disconnect(pdev);
+ xenbus_switch_state(xdev, XenbusStateClosed);
+ if (xenbus_dev_is_online(xdev))
+ break;
+ /* fall through if not online */
+ case XenbusStateUnknown:
+ dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
+ device_unregister(&xdev->dev);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int xen_pcibk_setup_backend(struct xen_pcibk_device *pdev)
+{
+ /* Get configuration from xend (if available now) */
+ int domain, bus, slot, func;
+ int err = 0;
+ int i, num_devs;
+ char dev_str[64];
+ char state_str[64];
+
+ /* It's possible we could get the call to setup twice, so make sure
+ * we're not already connected.
+ */
+ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
+ XenbusStateInitWait)
+ goto out;
+
+ dev_dbg(&pdev->xdev->dev, "getting be setup\n");
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
+ &num_devs);
+ if (err != 1) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of devices");
+ goto out;
+ }
+
+ for (i = 0; i < num_devs; i++) {
+ int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
+ if (unlikely(l >= (sizeof(dev_str) - 1))) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "String overflow while reading "
+ "configuration");
+ goto out;
+ }
+
+ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str,
+ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
+ if (err < 0) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading device configuration");
+ goto out;
+ }
+ if (err != 4) {
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error parsing pci device "
+ "configuration");
+ goto out;
+ }
+
+ err = xen_pcibk_export_device(pdev, domain, bus, slot, func, i);
+ if (err)
+ goto out;
+
+ /* Switch substate of this device. */
+ l = snprintf(state_str, sizeof(state_str), "state-%d", i);
+ if (unlikely(l >= (sizeof(state_str) - 1))) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "String overflow while reading "
+ "configuration");
+ goto out;
+ }
+ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, state_str,
+ "%d", XenbusStateInitialised);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err, "Error switching "
+ "substate of dev-%d\n", i);
+ goto out;
+ }
+ }
+
+ err = xen_pcibk_publish_pci_roots(pdev, xen_pcibk_publish_pci_root);
+ if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error while publish PCI root buses "
+ "for frontend");
+ goto out;
+ }
+
+ err = xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
+ if (err)
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error switching to initialised state!");
+
+out:
+ if (!err)
+ /* see if pcifront is already configured (if not, we'll wait) */
+ xen_pcibk_attach(pdev);
+
+ return err;
+}
+
+static void xen_pcibk_be_watch(struct xenbus_watch *watch,
+ const char **vec, unsigned int len)
+{
+ struct xen_pcibk_device *pdev =
+ container_of(watch, struct xen_pcibk_device, be_watch);
+
+ switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
+ case XenbusStateInitWait:
+ xen_pcibk_setup_backend(pdev);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int xen_pcibk_xenbus_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err = 0;
+ struct xen_pcibk_device *pdev = alloc_pdev(dev);
+
+ if (pdev == NULL) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(dev, err,
+ "Error allocating xen_pcibk_device struct");
+ goto out;
+ }
+
+ /* wait for xend to configure us */
+ err = xenbus_switch_state(dev, XenbusStateInitWait);
+ if (err)
+ goto out;
+
+ /* watch the backend node for backend configuration information */
+ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
+ xen_pcibk_be_watch);
+ if (err)
+ goto out;
+
+ pdev->be_watching = 1;
+
+ /* We need to force a call to our callback here in case
+ * xend already configured us!
+ */
+ xen_pcibk_be_watch(&pdev->be_watch, NULL, 0);
+
+out:
+ return err;
+}
+
+static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
+{
+ struct xen_pcibk_device *pdev = dev_get_drvdata(&dev->dev);
+
+ if (pdev != NULL)
+ free_pdev(pdev);
+
+ return 0;
+}
+
+static const struct xenbus_device_id xenpci_ids[] = {
+ {"pci"},
+ {""},
+};
+
+static struct xenbus_driver xenbus_xen_pcibk_driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .ids = xenpci_ids,
+ .probe = xen_pcibk_xenbus_probe,
+ .remove = xen_pcibk_xenbus_remove,
+ .otherend_changed = xen_pcibk_frontend_changed,
+};
+
+struct xen_pcibk_backend *xen_pcibk_backend;
+
+int __init xen_pcibk_xenbus_register(void)
+{
+ xen_pcibk_wq = create_workqueue("xen_pciback_workqueue");
+ if (!xen_pcibk_wq) {
+ printk(KERN_ERR "%s: create"
+ "xen_pciback_workqueue failed\n", __func__);
+ return -EFAULT;
+ }
+ xen_pcibk_backend = &xen_pcibk_vpci_backend;
+ if (passthrough)
+ xen_pcibk_backend = &xen_pcibk_passthrough_backend;
+ pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name);
+ return xenbus_register_backend(&xenbus_xen_pcibk_driver);
+}
+
+void __exit xen_pcibk_xenbus_unregister(void)
+{
+ destroy_workqueue(xen_pcibk_wq);
+ xenbus_unregister_driver(&xenbus_xen_pcibk_driver);
+}
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
new file mode 100644
index 00000000000..010937b5a7c
--- /dev/null
+++ b/drivers/xen/xen-selfballoon.c
@@ -0,0 +1,485 @@
+/******************************************************************************
+ * Xen selfballoon driver (and optional frontswap self-shrinking driver)
+ *
+ * Copyright (c) 2009-2011, Dan Magenheimer, Oracle Corp.
+ *
+ * This code complements the cleancache and frontswap patchsets to optimize
+ * support for Xen Transcendent Memory ("tmem"). The policy it implements
+ * is rudimentary and will likely improve over time, but it does work well
+ * enough today.
+ *
+ * Two functionalities are implemented here which both use "control theory"
+ * (feedback) to optimize memory utilization. In a virtualized environment
+ * such as Xen, RAM is often a scarce resource and we would like to ensure
+ * that each of a possibly large number of virtual machines is using RAM
+ * efficiently, i.e. using as little as possible when under light load
+ * and obtaining as much as possible when memory demands are high.
+ * Since RAM needs vary highly dynamically and sometimes dramatically,
+ * "hysteresis" is used, that is, memory target is determined not just
+ * on current data but also on past data stored in the system.
+ *
+ * "Selfballooning" creates memory pressure by managing the Xen balloon
+ * driver to decrease and increase available kernel memory, driven
+ * largely by the target value of "Committed_AS" (see /proc/meminfo).
+ * Since Committed_AS does not account for clean mapped pages (i.e. pages
+ * in RAM that are identical to pages on disk), selfballooning has the
+ * affect of pushing less frequently used clean pagecache pages out of
+ * kernel RAM and, presumably using cleancache, into Xen tmem where
+ * Xen can more efficiently optimize RAM utilization for such pages.
+ *
+ * When kernel memory demand unexpectedly increases faster than Xen, via
+ * the selfballoon driver, is able to (or chooses to) provide usable RAM,
+ * the kernel may invoke swapping. In most cases, frontswap is able
+ * to absorb this swapping into Xen tmem. However, due to the fact
+ * that the kernel swap subsystem assumes swapping occurs to a disk,
+ * swapped pages may sit on the disk for a very long time; even if
+ * the kernel knows the page will never be used again. This is because
+ * the disk space costs very little and can be overwritten when
+ * necessary. When such stale pages are in frontswap, however, they
+ * are taking up valuable real estate. "Frontswap selfshrinking" works
+ * to resolve this: When frontswap activity is otherwise stable
+ * and the guest kernel is not under memory pressure, the "frontswap
+ * selfshrinking" accounts for this by providing pressure to remove some
+ * pages from frontswap and return them to kernel memory.
+ *
+ * For both "selfballooning" and "frontswap-selfshrinking", a worker
+ * thread is used and sysfs tunables are provided to adjust the frequency
+ * and rate of adjustments to achieve the goal, as well as to disable one
+ * or both functions independently.
+ *
+ * While some argue that this functionality can and should be implemented
+ * in userspace, it has been observed that bad things happen (e.g. OOMs).
+ *
+ * System configuration note: Selfballooning should not be enabled on
+ * systems without a sufficiently large swap device configured; for best
+ * results, it is recommended that total swap be increased by the size
+ * of the guest memory. Also, while technically not required to be
+ * configured, it is highly recommended that frontswap also be configured
+ * and enabled when selfballooning is running. So, selfballooning
+ * is disabled by default if frontswap is not configured and can only
+ * be enabled with the "selfballooning" kernel boot option; similarly
+ * selfballooning is enabled by default if frontswap is configured and
+ * can be disabled with the "noselfballooning" kernel boot option. Finally,
+ * when frontswap is configured, frontswap-selfshrinking can be disabled
+ * with the "noselfshrink" kernel boot option.
+ *
+ * Selfballooning is disallowed in domain0 and force-disabled.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+
+#include <xen/balloon.h>
+
+#include <xen/tmem.h>
+
+/* Enable/disable with sysfs. */
+static int xen_selfballooning_enabled __read_mostly;
+
+/*
+ * Controls rate at which memory target (this iteration) approaches
+ * ultimate goal when memory need is increasing (up-hysteresis) or
+ * decreasing (down-hysteresis). Higher values of hysteresis cause
+ * slower increases/decreases. The default values for the various
+ * parameters were deemed reasonable by experimentation, may be
+ * workload-dependent, and can all be adjusted via sysfs.
+ */
+static unsigned int selfballoon_downhysteresis __read_mostly = 8;
+static unsigned int selfballoon_uphysteresis __read_mostly = 1;
+
+/* In HZ, controls frequency of worker invocation. */
+static unsigned int selfballoon_interval __read_mostly = 5;
+
+static void selfballoon_process(struct work_struct *work);
+static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
+
+#ifdef CONFIG_FRONTSWAP
+#include <linux/frontswap.h>
+
+/* Enable/disable with sysfs. */
+static bool frontswap_selfshrinking __read_mostly;
+
+/* Enable/disable with kernel boot option. */
+static bool use_frontswap_selfshrink __initdata = true;
+
+/*
+ * The default values for the following parameters were deemed reasonable
+ * by experimentation, may be workload-dependent, and can all be
+ * adjusted via sysfs.
+ */
+
+/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
+static unsigned int frontswap_hysteresis __read_mostly = 20;
+
+/*
+ * Number of selfballoon worker invocations to wait before observing that
+ * frontswap selfshrinking should commence. Note that selfshrinking does
+ * not use a separate worker thread.
+ */
+static unsigned int frontswap_inertia __read_mostly = 3;
+
+/* Countdown to next invocation of frontswap_shrink() */
+static unsigned long frontswap_inertia_counter;
+
+/*
+ * Invoked by the selfballoon worker thread, uses current number of pages
+ * in frontswap (frontswap_curr_pages()), previous status, and control
+ * values (hysteresis and inertia) to determine if frontswap should be
+ * shrunk and what the new frontswap size should be. Note that
+ * frontswap_shrink is essentially a partial swapoff that immediately
+ * transfers pages from the "swap device" (frontswap) back into kernel
+ * RAM; despite the name, frontswap "shrinking" is very different from
+ * the "shrinker" interface used by the kernel MM subsystem to reclaim
+ * memory.
+ */
+static void frontswap_selfshrink(void)
+{
+ static unsigned long cur_frontswap_pages;
+ static unsigned long last_frontswap_pages;
+ static unsigned long tgt_frontswap_pages;
+
+ last_frontswap_pages = cur_frontswap_pages;
+ cur_frontswap_pages = frontswap_curr_pages();
+ if (!cur_frontswap_pages ||
+ (cur_frontswap_pages > last_frontswap_pages)) {
+ frontswap_inertia_counter = frontswap_inertia;
+ return;
+ }
+ if (frontswap_inertia_counter && --frontswap_inertia_counter)
+ return;
+ if (cur_frontswap_pages <= frontswap_hysteresis)
+ tgt_frontswap_pages = 0;
+ else
+ tgt_frontswap_pages = cur_frontswap_pages -
+ (cur_frontswap_pages / frontswap_hysteresis);
+ frontswap_shrink(tgt_frontswap_pages);
+}
+
+static int __init xen_nofrontswap_selfshrink_setup(char *s)
+{
+ use_frontswap_selfshrink = false;
+ return 1;
+}
+
+__setup("noselfshrink", xen_nofrontswap_selfshrink_setup);
+
+/* Disable with kernel boot option. */
+static bool use_selfballooning __initdata = true;
+
+static int __init xen_noselfballooning_setup(char *s)
+{
+ use_selfballooning = false;
+ return 1;
+}
+
+__setup("noselfballooning", xen_noselfballooning_setup);
+#else /* !CONFIG_FRONTSWAP */
+/* Enable with kernel boot option. */
+static bool use_selfballooning __initdata = false;
+
+static int __init xen_selfballooning_setup(char *s)
+{
+ use_selfballooning = true;
+ return 1;
+}
+
+__setup("selfballooning", xen_selfballooning_setup);
+#endif /* CONFIG_FRONTSWAP */
+
+/*
+ * Use current balloon size, the goal (vm_committed_as), and hysteresis
+ * parameters to set a new target balloon size
+ */
+static void selfballoon_process(struct work_struct *work)
+{
+ unsigned long cur_pages, goal_pages, tgt_pages;
+ bool reset_timer = false;
+
+ if (xen_selfballooning_enabled) {
+ cur_pages = balloon_stats.current_pages;
+ tgt_pages = cur_pages; /* default is no change */
+ goal_pages = percpu_counter_read_positive(&vm_committed_as) +
+ balloon_stats.current_pages - totalram_pages;
+#ifdef CONFIG_FRONTSWAP
+ /* allow space for frontswap pages to be repatriated */
+ if (frontswap_selfshrinking && frontswap_enabled)
+ goal_pages += frontswap_curr_pages();
+#endif
+ if (cur_pages > goal_pages)
+ tgt_pages = cur_pages -
+ ((cur_pages - goal_pages) /
+ selfballoon_downhysteresis);
+ else if (cur_pages < goal_pages)
+ tgt_pages = cur_pages +
+ ((goal_pages - cur_pages) /
+ selfballoon_uphysteresis);
+ /* else if cur_pages == goal_pages, no change */
+ balloon_set_new_target(tgt_pages);
+ reset_timer = true;
+ }
+#ifdef CONFIG_FRONTSWAP
+ if (frontswap_selfshrinking && frontswap_enabled) {
+ frontswap_selfshrink();
+ reset_timer = true;
+ }
+#endif
+ if (reset_timer)
+ schedule_delayed_work(&selfballoon_worker,
+ selfballoon_interval * HZ);
+}
+
+#ifdef CONFIG_SYSFS
+
+#include <linux/sysdev.h>
+#include <linux/capability.h>
+
+#define SELFBALLOON_SHOW(name, format, args...) \
+ static ssize_t show_##name(struct sys_device *dev, \
+ struct sysdev_attribute *attr, \
+ char *buf) \
+ { \
+ return sprintf(buf, format, ##args); \
+ }
+
+SELFBALLOON_SHOW(selfballooning, "%d\n", xen_selfballooning_enabled);
+
+static ssize_t store_selfballooning(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ bool was_enabled = xen_selfballooning_enabled;
+ unsigned long tmp;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ err = strict_strtoul(buf, 10, &tmp);
+ if (err || ((tmp != 0) && (tmp != 1)))
+ return -EINVAL;
+
+ xen_selfballooning_enabled = !!tmp;
+ if (!was_enabled && xen_selfballooning_enabled)
+ schedule_delayed_work(&selfballoon_worker,
+ selfballoon_interval * HZ);
+
+ return count;
+}
+
+static SYSDEV_ATTR(selfballooning, S_IRUGO | S_IWUSR,
+ show_selfballooning, store_selfballooning);
+
+SELFBALLOON_SHOW(selfballoon_interval, "%d\n", selfballoon_interval);
+
+static ssize_t store_selfballoon_interval(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_interval = val;
+ return count;
+}
+
+static SYSDEV_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR,
+ show_selfballoon_interval, store_selfballoon_interval);
+
+SELFBALLOON_SHOW(selfballoon_downhys, "%d\n", selfballoon_downhysteresis);
+
+static ssize_t store_selfballoon_downhys(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_downhysteresis = val;
+ return count;
+}
+
+static SYSDEV_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR,
+ show_selfballoon_downhys, store_selfballoon_downhys);
+
+
+SELFBALLOON_SHOW(selfballoon_uphys, "%d\n", selfballoon_uphysteresis);
+
+static ssize_t store_selfballoon_uphys(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ selfballoon_uphysteresis = val;
+ return count;
+}
+
+static SYSDEV_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
+ show_selfballoon_uphys, store_selfballoon_uphys);
+
+#ifdef CONFIG_FRONTSWAP
+SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
+
+static ssize_t store_frontswap_selfshrinking(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ bool was_enabled = frontswap_selfshrinking;
+ unsigned long tmp;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &tmp);
+ if (err || ((tmp != 0) && (tmp != 1)))
+ return -EINVAL;
+ frontswap_selfshrinking = !!tmp;
+ if (!was_enabled && !xen_selfballooning_enabled &&
+ frontswap_selfshrinking)
+ schedule_delayed_work(&selfballoon_worker,
+ selfballoon_interval * HZ);
+
+ return count;
+}
+
+static SYSDEV_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR,
+ show_frontswap_selfshrinking, store_frontswap_selfshrinking);
+
+SELFBALLOON_SHOW(frontswap_inertia, "%d\n", frontswap_inertia);
+
+static ssize_t store_frontswap_inertia(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ frontswap_inertia = val;
+ frontswap_inertia_counter = val;
+ return count;
+}
+
+static SYSDEV_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR,
+ show_frontswap_inertia, store_frontswap_inertia);
+
+SELFBALLOON_SHOW(frontswap_hysteresis, "%d\n", frontswap_hysteresis);
+
+static ssize_t store_frontswap_hysteresis(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ unsigned long val;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ err = strict_strtoul(buf, 10, &val);
+ if (err || val == 0)
+ return -EINVAL;
+ frontswap_hysteresis = val;
+ return count;
+}
+
+static SYSDEV_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR,
+ show_frontswap_hysteresis, store_frontswap_hysteresis);
+
+#endif /* CONFIG_FRONTSWAP */
+
+static struct attribute *selfballoon_attrs[] = {
+ &attr_selfballooning.attr,
+ &attr_selfballoon_interval.attr,
+ &attr_selfballoon_downhysteresis.attr,
+ &attr_selfballoon_uphysteresis.attr,
+#ifdef CONFIG_FRONTSWAP
+ &attr_frontswap_selfshrinking.attr,
+ &attr_frontswap_hysteresis.attr,
+ &attr_frontswap_inertia.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group selfballoon_group = {
+ .name = "selfballoon",
+ .attrs = selfballoon_attrs
+};
+#endif
+
+int register_xen_selfballooning(struct sys_device *sysdev)
+{
+ int error = -1;
+
+#ifdef CONFIG_SYSFS
+ error = sysfs_create_group(&sysdev->kobj, &selfballoon_group);
+#endif
+ return error;
+}
+EXPORT_SYMBOL(register_xen_selfballooning);
+
+static int __init xen_selfballoon_init(void)
+{
+ bool enable = false;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (xen_initial_domain()) {
+ pr_info("xen/balloon: Xen selfballooning driver "
+ "disabled for domain0.\n");
+ return -ENODEV;
+ }
+
+ xen_selfballooning_enabled = tmem_enabled && use_selfballooning;
+ if (xen_selfballooning_enabled) {
+ pr_info("xen/balloon: Initializing Xen "
+ "selfballooning driver.\n");
+ enable = true;
+ }
+#ifdef CONFIG_FRONTSWAP
+ frontswap_selfshrinking = tmem_enabled && use_frontswap_selfshrink;
+ if (frontswap_selfshrinking) {
+ pr_info("xen/balloon: Initializing frontswap "
+ "selfshrinking driver.\n");
+ enable = true;
+ }
+#endif
+ if (!enable)
+ return -ENODEV;
+
+ schedule_delayed_work(&selfballoon_worker, selfballoon_interval * HZ);
+
+ return 0;
+}
+
+subsys_initcall(xen_selfballoon_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 739769551e3..bd2f90c9ac8 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -378,26 +378,32 @@ static void xenbus_dev_release(struct device *dev)
kfree(to_xenbus_device(dev));
}
-static ssize_t xendev_show_nodename(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t nodename_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->nodename);
}
-static DEVICE_ATTR(nodename, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_nodename, NULL);
-static ssize_t xendev_show_devtype(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t devtype_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_xenbus_device(dev)->devicetype);
}
-static DEVICE_ATTR(devtype, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_devtype, NULL);
-static ssize_t xendev_show_modalias(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "xen:%s\n", to_xenbus_device(dev)->devicetype);
+ return sprintf(buf, "%s:%s\n", dev->bus->name,
+ to_xenbus_device(dev)->devicetype);
}
-static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, xendev_show_modalias, NULL);
+
+struct device_attribute xenbus_dev_attrs[] = {
+ __ATTR_RO(nodename),
+ __ATTR_RO(devtype),
+ __ATTR_RO(modalias),
+ __ATTR_NULL
+};
+EXPORT_SYMBOL_GPL(xenbus_dev_attrs);
int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
@@ -449,25 +455,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
if (err)
goto fail;
- err = device_create_file(&xendev->dev, &dev_attr_nodename);
- if (err)
- goto fail_unregister;
-
- err = device_create_file(&xendev->dev, &dev_attr_devtype);
- if (err)
- goto fail_remove_nodename;
-
- err = device_create_file(&xendev->dev, &dev_attr_modalias);
- if (err)
- goto fail_remove_devtype;
-
return 0;
-fail_remove_devtype:
- device_remove_file(&xendev->dev, &dev_attr_devtype);
-fail_remove_nodename:
- device_remove_file(&xendev->dev, &dev_attr_nodename);
-fail_unregister:
- device_unregister(&xendev->dev);
fail:
kfree(xendev);
return err;
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 888b9900ca0..b814935378c 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -48,6 +48,8 @@ struct xen_bus_type
struct bus_type bus;
};
+extern struct device_attribute xenbus_dev_attrs[];
+
extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
extern int xenbus_dev_probe(struct device *_dev);
extern int xenbus_dev_remove(struct device *_dev);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 6cf467bf63e..60adf919d78 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -107,6 +107,9 @@ static int xenbus_uevent_backend(struct device *dev,
if (xdev == NULL)
return -ENODEV;
+ if (add_uevent_var(env, "MODALIAS=xen-backend:%s", xdev->devicetype))
+ return -ENOMEM;
+
/* stuff we want to pass to /sbin/hotplug */
if (add_uevent_var(env, "XENBUS_TYPE=%s", xdev->devicetype))
return -ENOMEM;
@@ -183,10 +186,6 @@ static void frontend_changed(struct xenbus_watch *watch,
xenbus_otherend_changed(watch, vec, len, 0);
}
-static struct device_attribute xenbus_backend_dev_attrs[] = {
- __ATTR_NULL
-};
-
static struct xen_bus_type xenbus_backend = {
.root = "backend",
.levels = 3, /* backend/type/<frontend>/<id> */
@@ -200,7 +199,7 @@ static struct xen_bus_type xenbus_backend = {
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
- .dev_attrs = xenbus_backend_dev_attrs,
+ .dev_attrs = xenbus_dev_attrs,
},
};
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index b6a2690c9d4..ed2ba474a56 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -81,10 +81,6 @@ static void backend_changed(struct xenbus_watch *watch,
xenbus_otherend_changed(watch, vec, len, 1);
}
-static struct device_attribute xenbus_frontend_dev_attrs[] = {
- __ATTR_NULL
-};
-
static const struct dev_pm_ops xenbus_pm_ops = {
.suspend = xenbus_dev_suspend,
.resume = xenbus_dev_resume,
@@ -106,7 +102,7 @@ static struct xen_bus_type xenbus_frontend = {
.probe = xenbus_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
- .dev_attrs = xenbus_frontend_dev_attrs,
+ .dev_attrs = xenbus_dev_attrs,
.pm = &xenbus_pm_ops,
},
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index bb71471a4d9..a9b4a24f2a1 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1737,7 +1737,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
io_parms.pid = pid;
io_parms.tcon = pTcon;
io_parms.offset = *poffset;
- io_parms.length = len;
+ io_parms.length = cur_len;
rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
&read_data, &buf_type);
pSMBr = (struct smb_com_read_rsp *)read_data;
diff --git a/fs/dcache.c b/fs/dcache.c
index 6e4ea6d8777..fbdcbca4072 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1813,8 +1813,6 @@ seqretry:
tname = dentry->d_name.name;
i = dentry->d_inode;
prefetch(tname);
- if (i)
- prefetch(i);
/*
* This seqcount check is required to ensure name and
* len are loaded atomically, so as not to walk off the
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index abc49f29245..90e5997262e 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -14,17 +14,9 @@
#include "dlm_internal.h"
#include "lock.h"
#include "user.h"
-#include "ast.h"
-
-#define WAKE_ASTS 0
-
-static uint64_t ast_seq_count;
-static struct list_head ast_queue;
-static spinlock_t ast_queue_lock;
-static struct task_struct * astd_task;
-static unsigned long astd_wakeflags;
-static struct mutex astd_running;
+static uint64_t dlm_cb_seq;
+static spinlock_t dlm_cb_seq_spin;
static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb)
{
@@ -57,21 +49,13 @@ static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb)
}
}
-void dlm_del_ast(struct dlm_lkb *lkb)
-{
- spin_lock(&ast_queue_lock);
- if (!list_empty(&lkb->lkb_astqueue))
- list_del_init(&lkb->lkb_astqueue);
- spin_unlock(&ast_queue_lock);
-}
-
int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
int status, uint32_t sbflags, uint64_t seq)
{
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
uint64_t prev_seq;
int prev_mode;
- int i;
+ int i, rv;
for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
if (lkb->lkb_callbacks[i].seq)
@@ -100,7 +84,8 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
mode,
(unsigned long long)prev_seq,
prev_mode);
- return 0;
+ rv = 0;
+ goto out;
}
}
@@ -109,6 +94,7 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
lkb->lkb_callbacks[i].mode = mode;
lkb->lkb_callbacks[i].sb_status = status;
lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF);
+ rv = 0;
break;
}
@@ -117,21 +103,24 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
lkb->lkb_id, (unsigned long long)seq,
flags, mode, status, sbflags);
dlm_dump_lkb_callbacks(lkb);
- return -1;
+ rv = -1;
+ goto out;
}
-
- return 0;
+ out:
+ return rv;
}
int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_callback *cb, int *resid)
{
- int i;
+ int i, rv;
*resid = 0;
- if (!lkb->lkb_callbacks[0].seq)
- return -ENOENT;
+ if (!lkb->lkb_callbacks[0].seq) {
+ rv = -ENOENT;
+ goto out;
+ }
/* oldest undelivered cb is callbacks[0] */
@@ -163,7 +152,8 @@ int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
cb->mode,
(unsigned long long)lkb->lkb_last_cast.seq,
lkb->lkb_last_cast.mode);
- return 0;
+ rv = 0;
+ goto out;
}
}
@@ -176,171 +166,150 @@ int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback));
lkb->lkb_last_bast_time = ktime_get();
}
-
- return 0;
+ rv = 0;
+ out:
+ return rv;
}
-void dlm_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
- uint32_t sbflags)
+void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
+ uint32_t sbflags)
{
- uint64_t seq;
+ struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+ uint64_t new_seq, prev_seq;
int rv;
- spin_lock(&ast_queue_lock);
-
- seq = ++ast_seq_count;
+ spin_lock(&dlm_cb_seq_spin);
+ new_seq = ++dlm_cb_seq;
+ spin_unlock(&dlm_cb_seq_spin);
if (lkb->lkb_flags & DLM_IFL_USER) {
- spin_unlock(&ast_queue_lock);
- dlm_user_add_ast(lkb, flags, mode, status, sbflags, seq);
+ dlm_user_add_ast(lkb, flags, mode, status, sbflags, new_seq);
return;
}
- rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, seq);
- if (rv < 0) {
- spin_unlock(&ast_queue_lock);
- return;
- }
+ mutex_lock(&lkb->lkb_cb_mutex);
+ prev_seq = lkb->lkb_callbacks[0].seq;
- if (list_empty(&lkb->lkb_astqueue)) {
+ rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, new_seq);
+ if (rv < 0)
+ goto out;
+
+ if (!prev_seq) {
kref_get(&lkb->lkb_ref);
- list_add_tail(&lkb->lkb_astqueue, &ast_queue);
- }
- spin_unlock(&ast_queue_lock);
- set_bit(WAKE_ASTS, &astd_wakeflags);
- wake_up_process(astd_task);
+ if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
+ mutex_lock(&ls->ls_cb_mutex);
+ list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
+ mutex_unlock(&ls->ls_cb_mutex);
+ } else {
+ queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
+ }
+ }
+ out:
+ mutex_unlock(&lkb->lkb_cb_mutex);
}
-static void process_asts(void)
+void dlm_callback_work(struct work_struct *work)
{
- struct dlm_ls *ls = NULL;
- struct dlm_rsb *r = NULL;
- struct dlm_lkb *lkb;
+ struct dlm_lkb *lkb = container_of(work, struct dlm_lkb, lkb_cb_work);
+ struct dlm_ls *ls = lkb->lkb_resource->res_ls;
void (*castfn) (void *astparam);
void (*bastfn) (void *astparam, int mode);
struct dlm_callback callbacks[DLM_CALLBACKS_SIZE];
int i, rv, resid;
-repeat:
- spin_lock(&ast_queue_lock);
- list_for_each_entry(lkb, &ast_queue, lkb_astqueue) {
- r = lkb->lkb_resource;
- ls = r->res_ls;
+ memset(&callbacks, 0, sizeof(callbacks));
- if (dlm_locking_stopped(ls))
- continue;
-
- /* we remove from astqueue list and remove everything in
- lkb_callbacks before releasing the spinlock so empty
- lkb_astqueue is always consistent with empty lkb_callbacks */
-
- list_del_init(&lkb->lkb_astqueue);
-
- castfn = lkb->lkb_astfn;
- bastfn = lkb->lkb_bastfn;
+ mutex_lock(&lkb->lkb_cb_mutex);
+ if (!lkb->lkb_callbacks[0].seq) {
+ /* no callback work exists, shouldn't happen */
+ log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id);
+ dlm_print_lkb(lkb);
+ dlm_dump_lkb_callbacks(lkb);
+ }
- memset(&callbacks, 0, sizeof(callbacks));
+ for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
+ rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid);
+ if (rv < 0)
+ break;
+ }
- for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
- rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid);
- if (rv < 0)
- break;
- }
- spin_unlock(&ast_queue_lock);
+ if (resid) {
+ /* cbs remain, loop should have removed all, shouldn't happen */
+ log_error(ls, "dlm_callback_work %x resid %d", lkb->lkb_id,
+ resid);
+ dlm_print_lkb(lkb);
+ dlm_dump_lkb_callbacks(lkb);
+ }
+ mutex_unlock(&lkb->lkb_cb_mutex);
- if (resid) {
- /* shouldn't happen, for loop should have removed all */
- log_error(ls, "callback resid %d lkb %x",
- resid, lkb->lkb_id);
- }
+ castfn = lkb->lkb_astfn;
+ bastfn = lkb->lkb_bastfn;
- for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
- if (!callbacks[i].seq)
- break;
- if (callbacks[i].flags & DLM_CB_SKIP) {
- continue;
- } else if (callbacks[i].flags & DLM_CB_BAST) {
- bastfn(lkb->lkb_astparam, callbacks[i].mode);
- } else if (callbacks[i].flags & DLM_CB_CAST) {
- lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
- lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
- castfn(lkb->lkb_astparam);
- }
+ for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
+ if (!callbacks[i].seq)
+ break;
+ if (callbacks[i].flags & DLM_CB_SKIP) {
+ continue;
+ } else if (callbacks[i].flags & DLM_CB_BAST) {
+ bastfn(lkb->lkb_astparam, callbacks[i].mode);
+ } else if (callbacks[i].flags & DLM_CB_CAST) {
+ lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
+ lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
+ castfn(lkb->lkb_astparam);
}
-
- /* removes ref for ast_queue, may cause lkb to be freed */
- dlm_put_lkb(lkb);
-
- cond_resched();
- goto repeat;
}
- spin_unlock(&ast_queue_lock);
-}
-
-static inline int no_asts(void)
-{
- int ret;
- spin_lock(&ast_queue_lock);
- ret = list_empty(&ast_queue);
- spin_unlock(&ast_queue_lock);
- return ret;
+ /* undo kref_get from dlm_add_callback, may cause lkb to be freed */
+ dlm_put_lkb(lkb);
}
-static int dlm_astd(void *data)
+int dlm_callback_start(struct dlm_ls *ls)
{
- while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
- if (!test_bit(WAKE_ASTS, &astd_wakeflags))
- schedule();
- set_current_state(TASK_RUNNING);
-
- mutex_lock(&astd_running);
- if (test_and_clear_bit(WAKE_ASTS, &astd_wakeflags))
- process_asts();
- mutex_unlock(&astd_running);
+ ls->ls_callback_wq = alloc_workqueue("dlm_callback",
+ WQ_UNBOUND |
+ WQ_MEM_RECLAIM |
+ WQ_NON_REENTRANT,
+ 0);
+ if (!ls->ls_callback_wq) {
+ log_print("can't start dlm_callback workqueue");
+ return -ENOMEM;
}
return 0;
}
-void dlm_astd_wake(void)
+void dlm_callback_stop(struct dlm_ls *ls)
{
- if (!no_asts()) {
- set_bit(WAKE_ASTS, &astd_wakeflags);
- wake_up_process(astd_task);
- }
+ if (ls->ls_callback_wq)
+ destroy_workqueue(ls->ls_callback_wq);
}
-int dlm_astd_start(void)
+void dlm_callback_suspend(struct dlm_ls *ls)
{
- struct task_struct *p;
- int error = 0;
-
- INIT_LIST_HEAD(&ast_queue);
- spin_lock_init(&ast_queue_lock);
- mutex_init(&astd_running);
-
- p = kthread_run(dlm_astd, NULL, "dlm_astd");
- if (IS_ERR(p))
- error = PTR_ERR(p);
- else
- astd_task = p;
- return error;
-}
+ set_bit(LSFL_CB_DELAY, &ls->ls_flags);
-void dlm_astd_stop(void)
-{
- kthread_stop(astd_task);
+ if (ls->ls_callback_wq)
+ flush_workqueue(ls->ls_callback_wq);
}
-void dlm_astd_suspend(void)
+void dlm_callback_resume(struct dlm_ls *ls)
{
- mutex_lock(&astd_running);
-}
+ struct dlm_lkb *lkb, *safe;
+ int count = 0;
-void dlm_astd_resume(void)
-{
- mutex_unlock(&astd_running);
+ clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
+
+ if (!ls->ls_callback_wq)
+ return;
+
+ mutex_lock(&ls->ls_cb_mutex);
+ list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
+ list_del_init(&lkb->lkb_cb_list);
+ queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
+ count++;
+ }
+ mutex_unlock(&ls->ls_cb_mutex);
+
+ log_debug(ls, "dlm_callback_resume %d", count);
}
diff --git a/fs/dlm/ast.h b/fs/dlm/ast.h
index 8aa89c9b561..757b551c682 100644
--- a/fs/dlm/ast.h
+++ b/fs/dlm/ast.h
@@ -18,14 +18,15 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
int status, uint32_t sbflags, uint64_t seq);
int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
struct dlm_callback *cb, int *resid);
-void dlm_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
- uint32_t sbflags);
+void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
+ uint32_t sbflags);
-void dlm_astd_wake(void);
-int dlm_astd_start(void);
-void dlm_astd_stop(void);
-void dlm_astd_suspend(void);
-void dlm_astd_resume(void);
+void dlm_callback_work(struct work_struct *work);
+int dlm_callback_start(struct dlm_ls *ls);
+void dlm_callback_stop(struct dlm_ls *ls);
+void dlm_callback_suspend(struct dlm_ls *ls);
+void dlm_callback_resume(struct dlm_ls *ls);
#endif
+
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 9b026ea8baa..6cf72fcc0d0 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -28,7 +28,8 @@
* /config/dlm/<cluster>/spaces/<space>/nodes/<node>/weight
* /config/dlm/<cluster>/comms/<comm>/nodeid
* /config/dlm/<cluster>/comms/<comm>/local
- * /config/dlm/<cluster>/comms/<comm>/addr
+ * /config/dlm/<cluster>/comms/<comm>/addr (write only)
+ * /config/dlm/<cluster>/comms/<comm>/addr_list (read only)
* The <cluster> level is useless, but I haven't figured out how to avoid it.
*/
@@ -80,6 +81,7 @@ static ssize_t comm_local_write(struct dlm_comm *cm, const char *buf,
size_t len);
static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf,
size_t len);
+static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf);
static ssize_t node_nodeid_read(struct dlm_node *nd, char *buf);
static ssize_t node_nodeid_write(struct dlm_node *nd, const char *buf,
size_t len);
@@ -92,7 +94,6 @@ struct dlm_cluster {
unsigned int cl_tcp_port;
unsigned int cl_buffer_size;
unsigned int cl_rsbtbl_size;
- unsigned int cl_lkbtbl_size;
unsigned int cl_dirtbl_size;
unsigned int cl_recover_timer;
unsigned int cl_toss_secs;
@@ -101,13 +102,13 @@ struct dlm_cluster {
unsigned int cl_protocol;
unsigned int cl_timewarn_cs;
unsigned int cl_waitwarn_us;
+ unsigned int cl_new_rsb_count;
};
enum {
CLUSTER_ATTR_TCP_PORT = 0,
CLUSTER_ATTR_BUFFER_SIZE,
CLUSTER_ATTR_RSBTBL_SIZE,
- CLUSTER_ATTR_LKBTBL_SIZE,
CLUSTER_ATTR_DIRTBL_SIZE,
CLUSTER_ATTR_RECOVER_TIMER,
CLUSTER_ATTR_TOSS_SECS,
@@ -116,6 +117,7 @@ enum {
CLUSTER_ATTR_PROTOCOL,
CLUSTER_ATTR_TIMEWARN_CS,
CLUSTER_ATTR_WAITWARN_US,
+ CLUSTER_ATTR_NEW_RSB_COUNT,
};
struct cluster_attribute {
@@ -160,7 +162,6 @@ __CONFIGFS_ATTR(name, 0644, name##_read, name##_write)
CLUSTER_ATTR(tcp_port, 1);
CLUSTER_ATTR(buffer_size, 1);
CLUSTER_ATTR(rsbtbl_size, 1);
-CLUSTER_ATTR(lkbtbl_size, 1);
CLUSTER_ATTR(dirtbl_size, 1);
CLUSTER_ATTR(recover_timer, 1);
CLUSTER_ATTR(toss_secs, 1);
@@ -169,12 +170,12 @@ CLUSTER_ATTR(log_debug, 0);
CLUSTER_ATTR(protocol, 0);
CLUSTER_ATTR(timewarn_cs, 1);
CLUSTER_ATTR(waitwarn_us, 0);
+CLUSTER_ATTR(new_rsb_count, 0);
static struct configfs_attribute *cluster_attrs[] = {
[CLUSTER_ATTR_TCP_PORT] = &cluster_attr_tcp_port.attr,
[CLUSTER_ATTR_BUFFER_SIZE] = &cluster_attr_buffer_size.attr,
[CLUSTER_ATTR_RSBTBL_SIZE] = &cluster_attr_rsbtbl_size.attr,
- [CLUSTER_ATTR_LKBTBL_SIZE] = &cluster_attr_lkbtbl_size.attr,
[CLUSTER_ATTR_DIRTBL_SIZE] = &cluster_attr_dirtbl_size.attr,
[CLUSTER_ATTR_RECOVER_TIMER] = &cluster_attr_recover_timer.attr,
[CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs.attr,
@@ -183,6 +184,7 @@ static struct configfs_attribute *cluster_attrs[] = {
[CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol.attr,
[CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs.attr,
[CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us.attr,
+ [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count.attr,
NULL,
};
@@ -190,6 +192,7 @@ enum {
COMM_ATTR_NODEID = 0,
COMM_ATTR_LOCAL,
COMM_ATTR_ADDR,
+ COMM_ATTR_ADDR_LIST,
};
struct comm_attribute {
@@ -217,14 +220,22 @@ static struct comm_attribute comm_attr_local = {
static struct comm_attribute comm_attr_addr = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "addr",
- .ca_mode = S_IRUGO | S_IWUSR },
+ .ca_mode = S_IWUSR },
.store = comm_addr_write,
};
+static struct comm_attribute comm_attr_addr_list = {
+ .attr = { .ca_owner = THIS_MODULE,
+ .ca_name = "addr_list",
+ .ca_mode = S_IRUGO },
+ .show = comm_addr_list_read,
+};
+
static struct configfs_attribute *comm_attrs[] = {
[COMM_ATTR_NODEID] = &comm_attr_nodeid.attr,
[COMM_ATTR_LOCAL] = &comm_attr_local.attr,
[COMM_ATTR_ADDR] = &comm_attr_addr.attr,
+ [COMM_ATTR_ADDR_LIST] = &comm_attr_addr_list.attr,
NULL,
};
@@ -435,7 +446,6 @@ static struct config_group *make_cluster(struct config_group *g,
cl->cl_tcp_port = dlm_config.ci_tcp_port;
cl->cl_buffer_size = dlm_config.ci_buffer_size;
cl->cl_rsbtbl_size = dlm_config.ci_rsbtbl_size;
- cl->cl_lkbtbl_size = dlm_config.ci_lkbtbl_size;
cl->cl_dirtbl_size = dlm_config.ci_dirtbl_size;
cl->cl_recover_timer = dlm_config.ci_recover_timer;
cl->cl_toss_secs = dlm_config.ci_toss_secs;
@@ -444,6 +454,7 @@ static struct config_group *make_cluster(struct config_group *g,
cl->cl_protocol = dlm_config.ci_protocol;
cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs;
cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us;
+ cl->cl_new_rsb_count = dlm_config.ci_new_rsb_count;
space_list = &sps->ss_group;
comm_list = &cms->cs_group;
@@ -720,6 +731,50 @@ static ssize_t comm_addr_write(struct dlm_comm *cm, const char *buf, size_t len)
return len;
}
+static ssize_t comm_addr_list_read(struct dlm_comm *cm, char *buf)
+{
+ ssize_t s;
+ ssize_t allowance;
+ int i;
+ struct sockaddr_storage *addr;
+ struct sockaddr_in *addr_in;
+ struct sockaddr_in6 *addr_in6;
+
+ /* Taken from ip6_addr_string() defined in lib/vsprintf.c */
+ char buf0[sizeof("AF_INET6 xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255\n")];
+
+
+ /* Derived from SIMPLE_ATTR_SIZE of fs/configfs/file.c */
+ allowance = 4096;
+ buf[0] = '\0';
+
+ for (i = 0; i < cm->addr_count; i++) {
+ addr = cm->addr[i];
+
+ switch(addr->ss_family) {
+ case AF_INET:
+ addr_in = (struct sockaddr_in *)addr;
+ s = sprintf(buf0, "AF_INET %pI4\n", &addr_in->sin_addr.s_addr);
+ break;
+ case AF_INET6:
+ addr_in6 = (struct sockaddr_in6 *)addr;
+ s = sprintf(buf0, "AF_INET6 %pI6\n", &addr_in6->sin6_addr);
+ break;
+ default:
+ s = sprintf(buf0, "%s\n", "<UNKNOWN>");
+ break;
+ }
+ allowance -= s;
+ if (allowance >= 0)
+ strcat(buf, buf0);
+ else {
+ allowance += s;
+ break;
+ }
+ }
+ return 4096 - allowance;
+}
+
static ssize_t show_node(struct config_item *i, struct configfs_attribute *a,
char *buf)
{
@@ -983,7 +1038,6 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
#define DEFAULT_TCP_PORT 21064
#define DEFAULT_BUFFER_SIZE 4096
#define DEFAULT_RSBTBL_SIZE 1024
-#define DEFAULT_LKBTBL_SIZE 1024
#define DEFAULT_DIRTBL_SIZE 1024
#define DEFAULT_RECOVER_TIMER 5
#define DEFAULT_TOSS_SECS 10
@@ -992,12 +1046,12 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
#define DEFAULT_PROTOCOL 0
#define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */
#define DEFAULT_WAITWARN_US 0
+#define DEFAULT_NEW_RSB_COUNT 128
struct dlm_config_info dlm_config = {
.ci_tcp_port = DEFAULT_TCP_PORT,
.ci_buffer_size = DEFAULT_BUFFER_SIZE,
.ci_rsbtbl_size = DEFAULT_RSBTBL_SIZE,
- .ci_lkbtbl_size = DEFAULT_LKBTBL_SIZE,
.ci_dirtbl_size = DEFAULT_DIRTBL_SIZE,
.ci_recover_timer = DEFAULT_RECOVER_TIMER,
.ci_toss_secs = DEFAULT_TOSS_SECS,
@@ -1005,6 +1059,7 @@ struct dlm_config_info dlm_config = {
.ci_log_debug = DEFAULT_LOG_DEBUG,
.ci_protocol = DEFAULT_PROTOCOL,
.ci_timewarn_cs = DEFAULT_TIMEWARN_CS,
- .ci_waitwarn_us = DEFAULT_WAITWARN_US
+ .ci_waitwarn_us = DEFAULT_WAITWARN_US,
+ .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT
};
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index dd0ce24d5a8..3099d0dd26c 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -20,7 +20,6 @@ struct dlm_config_info {
int ci_tcp_port;
int ci_buffer_size;
int ci_rsbtbl_size;
- int ci_lkbtbl_size;
int ci_dirtbl_size;
int ci_recover_timer;
int ci_toss_secs;
@@ -29,6 +28,7 @@ struct dlm_config_info {
int ci_protocol;
int ci_timewarn_cs;
int ci_waitwarn_us;
+ int ci_new_rsb_count;
};
extern struct dlm_config_info dlm_config;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 0262451eb9c..fe2860c0244 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -37,6 +37,7 @@
#include <linux/jhash.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
+#include <linux/idr.h>
#include <asm/uaccess.h>
#include <linux/dlm.h>
@@ -52,7 +53,6 @@ struct dlm_ls;
struct dlm_lkb;
struct dlm_rsb;
struct dlm_member;
-struct dlm_lkbtable;
struct dlm_rsbtable;
struct dlm_dirtable;
struct dlm_direntry;
@@ -108,11 +108,6 @@ struct dlm_rsbtable {
spinlock_t lock;
};
-struct dlm_lkbtable {
- struct list_head list;
- rwlock_t lock;
- uint16_t counter;
-};
/*
* Lockspace member (per node in a ls)
@@ -248,17 +243,18 @@ struct dlm_lkb {
int8_t lkb_wait_count;
int lkb_wait_nodeid; /* for debugging */
- struct list_head lkb_idtbl_list; /* lockspace lkbtbl */
struct list_head lkb_statequeue; /* rsb g/c/w list */
struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */
struct list_head lkb_wait_reply; /* waiting for remote reply */
- struct list_head lkb_astqueue; /* need ast to be sent */
struct list_head lkb_ownqueue; /* list of locks for a process */
struct list_head lkb_time_list;
ktime_t lkb_timestamp;
ktime_t lkb_wait_time;
unsigned long lkb_timeout_cs;
+ struct mutex lkb_cb_mutex;
+ struct work_struct lkb_cb_work;
+ struct list_head lkb_cb_list; /* for ls_cb_delay or proc->asts */
struct dlm_callback lkb_callbacks[DLM_CALLBACKS_SIZE];
struct dlm_callback lkb_last_cast;
struct dlm_callback lkb_last_bast;
@@ -299,7 +295,7 @@ struct dlm_rsb {
int res_recover_locks_count;
char *res_lvbptr;
- char res_name[1];
+ char res_name[DLM_RESNAME_MAXLEN+1];
};
/* find_rsb() flags */
@@ -465,12 +461,12 @@ struct dlm_ls {
unsigned long ls_scan_time;
struct kobject ls_kobj;
+ struct idr ls_lkbidr;
+ spinlock_t ls_lkbidr_spin;
+
struct dlm_rsbtable *ls_rsbtbl;
uint32_t ls_rsbtbl_size;
- struct dlm_lkbtable *ls_lkbtbl;
- uint32_t ls_lkbtbl_size;
-
struct dlm_dirtable *ls_dirtbl;
uint32_t ls_dirtbl_size;
@@ -483,6 +479,10 @@ struct dlm_ls {
struct mutex ls_timeout_mutex;
struct list_head ls_timeout;
+ spinlock_t ls_new_rsb_spin;
+ int ls_new_rsb_count;
+ struct list_head ls_new_rsb; /* new rsb structs */
+
struct list_head ls_nodes; /* current nodes in ls */
struct list_head ls_nodes_gone; /* dead node list, recovery */
int ls_num_nodes; /* number of nodes in ls */
@@ -506,8 +506,12 @@ struct dlm_ls {
struct miscdevice ls_device;
+ struct workqueue_struct *ls_callback_wq;
+
/* recovery related */
+ struct mutex ls_cb_mutex;
+ struct list_head ls_cb_delay; /* save for queue_work later */
struct timer_list ls_timer;
struct task_struct *ls_recoverd_task;
struct mutex ls_recoverd_active;
@@ -544,6 +548,7 @@ struct dlm_ls {
#define LSFL_RCOM_WAIT 4
#define LSFL_UEVENT_WAIT 5
#define LSFL_TIMEWARN 6
+#define LSFL_CB_DELAY 7
/* much of this is just saving user space pointers associated with the
lock that we pass back to the user lib with an ast */
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index f71d0b5abd9..83b5e32514e 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -305,7 +305,7 @@ static void queue_cast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rv)
rv = -EDEADLK;
}
- dlm_add_ast(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
+ dlm_add_cb(lkb, DLM_CB_CAST, lkb->lkb_grmode, rv, lkb->lkb_sbflags);
}
static inline void queue_cast_overlap(struct dlm_rsb *r, struct dlm_lkb *lkb)
@@ -319,7 +319,7 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
if (is_master_copy(lkb)) {
send_bast(r, lkb, rqmode);
} else {
- dlm_add_ast(lkb, DLM_CB_BAST, rqmode, 0, 0);
+ dlm_add_cb(lkb, DLM_CB_BAST, rqmode, 0, 0);
}
}
@@ -327,19 +327,68 @@ static void queue_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int rqmode)
* Basic operations on rsb's and lkb's
*/
-static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
+static int pre_rsb_struct(struct dlm_ls *ls)
+{
+ struct dlm_rsb *r1, *r2;
+ int count = 0;
+
+ spin_lock(&ls->ls_new_rsb_spin);
+ if (ls->ls_new_rsb_count > dlm_config.ci_new_rsb_count / 2) {
+ spin_unlock(&ls->ls_new_rsb_spin);
+ return 0;
+ }
+ spin_unlock(&ls->ls_new_rsb_spin);
+
+ r1 = dlm_allocate_rsb(ls);
+ r2 = dlm_allocate_rsb(ls);
+
+ spin_lock(&ls->ls_new_rsb_spin);
+ if (r1) {
+ list_add(&r1->res_hashchain, &ls->ls_new_rsb);
+ ls->ls_new_rsb_count++;
+ }
+ if (r2) {
+ list_add(&r2->res_hashchain, &ls->ls_new_rsb);
+ ls->ls_new_rsb_count++;
+ }
+ count = ls->ls_new_rsb_count;
+ spin_unlock(&ls->ls_new_rsb_spin);
+
+ if (!count)
+ return -ENOMEM;
+ return 0;
+}
+
+/* If ls->ls_new_rsb is empty, return -EAGAIN, so the caller can
+ unlock any spinlocks, go back and call pre_rsb_struct again.
+ Otherwise, take an rsb off the list and return it. */
+
+static int get_rsb_struct(struct dlm_ls *ls, char *name, int len,
+ struct dlm_rsb **r_ret)
{
struct dlm_rsb *r;
+ int count;
- r = dlm_allocate_rsb(ls, len);
- if (!r)
- return NULL;
+ spin_lock(&ls->ls_new_rsb_spin);
+ if (list_empty(&ls->ls_new_rsb)) {
+ count = ls->ls_new_rsb_count;
+ spin_unlock(&ls->ls_new_rsb_spin);
+ log_debug(ls, "find_rsb retry %d %d %s",
+ count, dlm_config.ci_new_rsb_count, name);
+ return -EAGAIN;
+ }
+
+ r = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb, res_hashchain);
+ list_del(&r->res_hashchain);
+ ls->ls_new_rsb_count--;
+ spin_unlock(&ls->ls_new_rsb_spin);
r->res_ls = ls;
r->res_length = len;
memcpy(r->res_name, name, len);
mutex_init(&r->res_mutex);
+ INIT_LIST_HEAD(&r->res_hashchain);
INIT_LIST_HEAD(&r->res_lookup);
INIT_LIST_HEAD(&r->res_grantqueue);
INIT_LIST_HEAD(&r->res_convertqueue);
@@ -347,7 +396,8 @@ static struct dlm_rsb *create_rsb(struct dlm_ls *ls, char *name, int len)
INIT_LIST_HEAD(&r->res_root_list);
INIT_LIST_HEAD(&r->res_recover_list);
- return r;
+ *r_ret = r;
+ return 0;
}
static int search_rsb_list(struct list_head *head, char *name, int len,
@@ -405,16 +455,6 @@ static int _search_rsb(struct dlm_ls *ls, char *name, int len, int b,
return error;
}
-static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
- unsigned int flags, struct dlm_rsb **r_ret)
-{
- int error;
- spin_lock(&ls->ls_rsbtbl[b].lock);
- error = _search_rsb(ls, name, len, b, flags, r_ret);
- spin_unlock(&ls->ls_rsbtbl[b].lock);
- return error;
-}
-
/*
* Find rsb in rsbtbl and potentially create/add one
*
@@ -432,35 +472,48 @@ static int search_rsb(struct dlm_ls *ls, char *name, int len, int b,
static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
unsigned int flags, struct dlm_rsb **r_ret)
{
- struct dlm_rsb *r = NULL, *tmp;
+ struct dlm_rsb *r = NULL;
uint32_t hash, bucket;
- int error = -EINVAL;
+ int error;
- if (namelen > DLM_RESNAME_MAXLEN)
+ if (namelen > DLM_RESNAME_MAXLEN) {
+ error = -EINVAL;
goto out;
+ }
if (dlm_no_directory(ls))
flags |= R_CREATE;
- error = 0;
hash = jhash(name, namelen, 0);
bucket = hash & (ls->ls_rsbtbl_size - 1);
- error = search_rsb(ls, name, namelen, bucket, flags, &r);
+ retry:
+ if (flags & R_CREATE) {
+ error = pre_rsb_struct(ls);
+ if (error < 0)
+ goto out;
+ }
+
+ spin_lock(&ls->ls_rsbtbl[bucket].lock);
+
+ error = _search_rsb(ls, name, namelen, bucket, flags, &r);
if (!error)
- goto out;
+ goto out_unlock;
if (error == -EBADR && !(flags & R_CREATE))
- goto out;
+ goto out_unlock;
/* the rsb was found but wasn't a master copy */
if (error == -ENOTBLK)
- goto out;
+ goto out_unlock;
- error = -ENOMEM;
- r = create_rsb(ls, name, namelen);
- if (!r)
- goto out;
+ error = get_rsb_struct(ls, name, namelen, &r);
+ if (error == -EAGAIN) {
+ spin_unlock(&ls->ls_rsbtbl[bucket].lock);
+ goto retry;
+ }
+ if (error)
+ goto out_unlock;
r->res_hash = hash;
r->res_bucket = bucket;
@@ -474,18 +527,10 @@ static int find_rsb(struct dlm_ls *ls, char *name, int namelen,
nodeid = 0;
r->res_nodeid = nodeid;
}
-
- spin_lock(&ls->ls_rsbtbl[bucket].lock);
- error = _search_rsb(ls, name, namelen, bucket, 0, &tmp);
- if (!error) {
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
- dlm_free_rsb(r);
- r = tmp;
- goto out;
- }
list_add(&r->res_hashchain, &ls->ls_rsbtbl[bucket].list);
- spin_unlock(&ls->ls_rsbtbl[bucket].lock);
error = 0;
+ out_unlock:
+ spin_unlock(&ls->ls_rsbtbl[bucket].lock);
out:
*r_ret = r;
return error;
@@ -580,9 +625,8 @@ static void detach_lkb(struct dlm_lkb *lkb)
static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
{
- struct dlm_lkb *lkb, *tmp;
- uint32_t lkid = 0;
- uint16_t bucket;
+ struct dlm_lkb *lkb;
+ int rv, id;
lkb = dlm_allocate_lkb(ls);
if (!lkb)
@@ -594,60 +638,42 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
INIT_LIST_HEAD(&lkb->lkb_ownqueue);
INIT_LIST_HEAD(&lkb->lkb_rsb_lookup);
INIT_LIST_HEAD(&lkb->lkb_time_list);
- INIT_LIST_HEAD(&lkb->lkb_astqueue);
+ INIT_LIST_HEAD(&lkb->lkb_cb_list);
+ mutex_init(&lkb->lkb_cb_mutex);
+ INIT_WORK(&lkb->lkb_cb_work, dlm_callback_work);
- get_random_bytes(&bucket, sizeof(bucket));
- bucket &= (ls->ls_lkbtbl_size - 1);
-
- write_lock(&ls->ls_lkbtbl[bucket].lock);
+ retry:
+ rv = idr_pre_get(&ls->ls_lkbidr, GFP_NOFS);
+ if (!rv)
+ return -ENOMEM;
- /* counter can roll over so we must verify lkid is not in use */
+ spin_lock(&ls->ls_lkbidr_spin);
+ rv = idr_get_new_above(&ls->ls_lkbidr, lkb, 1, &id);
+ if (!rv)
+ lkb->lkb_id = id;
+ spin_unlock(&ls->ls_lkbidr_spin);
- while (lkid == 0) {
- lkid = (bucket << 16) | ls->ls_lkbtbl[bucket].counter++;
+ if (rv == -EAGAIN)
+ goto retry;
- list_for_each_entry(tmp, &ls->ls_lkbtbl[bucket].list,
- lkb_idtbl_list) {
- if (tmp->lkb_id != lkid)
- continue;
- lkid = 0;
- break;
- }
+ if (rv < 0) {
+ log_error(ls, "create_lkb idr error %d", rv);
+ return rv;
}
- lkb->lkb_id = lkid;
- list_add(&lkb->lkb_idtbl_list, &ls->ls_lkbtbl[bucket].list);
- write_unlock(&ls->ls_lkbtbl[bucket].lock);
-
*lkb_ret = lkb;
return 0;
}
-static struct dlm_lkb *__find_lkb(struct dlm_ls *ls, uint32_t lkid)
-{
- struct dlm_lkb *lkb;
- uint16_t bucket = (lkid >> 16);
-
- list_for_each_entry(lkb, &ls->ls_lkbtbl[bucket].list, lkb_idtbl_list) {
- if (lkb->lkb_id == lkid)
- return lkb;
- }
- return NULL;
-}
-
static int find_lkb(struct dlm_ls *ls, uint32_t lkid, struct dlm_lkb **lkb_ret)
{
struct dlm_lkb *lkb;
- uint16_t bucket = (lkid >> 16);
-
- if (bucket >= ls->ls_lkbtbl_size)
- return -EBADSLT;
- read_lock(&ls->ls_lkbtbl[bucket].lock);
- lkb = __find_lkb(ls, lkid);
+ spin_lock(&ls->ls_lkbidr_spin);
+ lkb = idr_find(&ls->ls_lkbidr, lkid);
if (lkb)
kref_get(&lkb->lkb_ref);
- read_unlock(&ls->ls_lkbtbl[bucket].lock);
+ spin_unlock(&ls->ls_lkbidr_spin);
*lkb_ret = lkb;
return lkb ? 0 : -ENOENT;
@@ -668,12 +694,12 @@ static void kill_lkb(struct kref *kref)
static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
- uint16_t bucket = (lkb->lkb_id >> 16);
+ uint32_t lkid = lkb->lkb_id;
- write_lock(&ls->ls_lkbtbl[bucket].lock);
+ spin_lock(&ls->ls_lkbidr_spin);
if (kref_put(&lkb->lkb_ref, kill_lkb)) {
- list_del(&lkb->lkb_idtbl_list);
- write_unlock(&ls->ls_lkbtbl[bucket].lock);
+ idr_remove(&ls->ls_lkbidr, lkid);
+ spin_unlock(&ls->ls_lkbidr_spin);
detach_lkb(lkb);
@@ -683,7 +709,7 @@ static int __put_lkb(struct dlm_ls *ls, struct dlm_lkb *lkb)
dlm_free_lkb(lkb);
return 1;
} else {
- write_unlock(&ls->ls_lkbtbl[bucket].lock);
+ spin_unlock(&ls->ls_lkbidr_spin);
return 0;
}
}
@@ -849,9 +875,7 @@ void dlm_scan_waiters(struct dlm_ls *ls)
if (!num_nodes) {
num_nodes = ls->ls_num_nodes;
- warned = kmalloc(GFP_KERNEL, num_nodes * sizeof(int));
- if (warned)
- memset(warned, 0, num_nodes * sizeof(int));
+ warned = kzalloc(num_nodes * sizeof(int), GFP_KERNEL);
}
if (!warned)
continue;
@@ -863,9 +887,7 @@ void dlm_scan_waiters(struct dlm_ls *ls)
dlm_config.ci_waitwarn_us, lkb->lkb_wait_nodeid);
}
mutex_unlock(&ls->ls_waiters_mutex);
-
- if (warned)
- kfree(warned);
+ kfree(warned);
if (debug_expired)
log_debug(ls, "scan_waiters %u warn %u over %d us max %lld us",
@@ -2401,9 +2423,6 @@ static int do_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
if (deadlk) {
/* it's left on the granted queue */
- log_debug(r->res_ls, "deadlock %x node %d sts%d g%d r%d %s",
- lkb->lkb_id, lkb->lkb_nodeid, lkb->lkb_status,
- lkb->lkb_grmode, lkb->lkb_rqmode, r->res_name);
revert_lock(r, lkb);
queue_cast(r, lkb, -EDEADLK);
error = -EDEADLK;
@@ -3993,8 +4012,6 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms)
default:
log_error(ls, "unknown message type %d", ms->m_type);
}
-
- dlm_astd_wake();
}
/* If the lockspace is in recovery mode (locking stopped), then normal
@@ -4133,7 +4150,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
struct dlm_message *ms_stub;
int wait_type, stub_unlock_result, stub_cancel_result;
- ms_stub = kmalloc(GFP_KERNEL, sizeof(struct dlm_message));
+ ms_stub = kmalloc(sizeof(struct dlm_message), GFP_KERNEL);
if (!ms_stub) {
log_error(ls, "dlm_recover_waiters_pre no mem");
return;
@@ -4809,7 +4826,7 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
goto out_put;
spin_lock(&ua->proc->locks_spin);
- /* dlm_user_add_ast() may have already taken lkb off the proc list */
+ /* dlm_user_add_cb() may have already taken lkb off the proc list */
if (!list_empty(&lkb->lkb_ownqueue))
list_move(&lkb->lkb_ownqueue, &ua->proc->unlocking);
spin_unlock(&ua->proc->locks_spin);
@@ -4946,7 +4963,7 @@ static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
/* We have to release clear_proc_locks mutex before calling unlock_proc_lock()
(which does lock_rsb) due to deadlock with receiving a message that does
- lock_rsb followed by dlm_user_add_ast() */
+ lock_rsb followed by dlm_user_add_cb() */
static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
struct dlm_user_proc *proc)
@@ -4969,7 +4986,7 @@ static struct dlm_lkb *del_proc_lock(struct dlm_ls *ls,
return lkb;
}
-/* The ls_clear_proc_locks mutex protects against dlm_user_add_asts() which
+/* The ls_clear_proc_locks mutex protects against dlm_user_add_cb() which
1) references lkb->ua which we free here and 2) adds lkbs to proc->asts,
which we clear here. */
@@ -5011,10 +5028,10 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
dlm_put_lkb(lkb);
}
- list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
+ list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
memset(&lkb->lkb_callbacks, 0,
sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
- list_del_init(&lkb->lkb_astqueue);
+ list_del_init(&lkb->lkb_cb_list);
dlm_put_lkb(lkb);
}
@@ -5053,10 +5070,10 @@ static void purge_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
spin_unlock(&proc->locks_spin);
spin_lock(&proc->asts_spin);
- list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_astqueue) {
+ list_for_each_entry_safe(lkb, safe, &proc->asts, lkb_cb_list) {
memset(&lkb->lkb_callbacks, 0,
sizeof(struct dlm_callback) * DLM_CALLBACKS_SIZE);
- list_del_init(&lkb->lkb_astqueue);
+ list_del_init(&lkb->lkb_cb_list);
dlm_put_lkb(lkb);
}
spin_unlock(&proc->asts_spin);
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 14cbf409975..a1d8f1af144 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -15,7 +15,6 @@
#include "lockspace.h"
#include "member.h"
#include "recoverd.h"
-#include "ast.h"
#include "dir.h"
#include "lowcomms.h"
#include "config.h"
@@ -24,6 +23,7 @@
#include "recover.h"
#include "requestqueue.h"
#include "user.h"
+#include "ast.h"
static int ls_count;
static struct mutex ls_lock;
@@ -359,17 +359,10 @@ static int threads_start(void)
{
int error;
- /* Thread which process lock requests for all lockspace's */
- error = dlm_astd_start();
- if (error) {
- log_print("cannot start dlm_astd thread %d", error);
- goto fail;
- }
-
error = dlm_scand_start();
if (error) {
log_print("cannot start dlm_scand thread %d", error);
- goto astd_fail;
+ goto fail;
}
/* Thread for sending/receiving messages for all lockspace's */
@@ -383,8 +376,6 @@ static int threads_start(void)
scand_fail:
dlm_scand_stop();
- astd_fail:
- dlm_astd_stop();
fail:
return error;
}
@@ -393,7 +384,6 @@ static void threads_stop(void)
{
dlm_scand_stop();
dlm_lowcomms_stop();
- dlm_astd_stop();
}
static int new_lockspace(const char *name, int namelen, void **lockspace,
@@ -463,7 +453,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
size = dlm_config.ci_rsbtbl_size;
ls->ls_rsbtbl_size = size;
- ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_NOFS);
+ ls->ls_rsbtbl = vmalloc(sizeof(struct dlm_rsbtable) * size);
if (!ls->ls_rsbtbl)
goto out_lsfree;
for (i = 0; i < size; i++) {
@@ -472,22 +462,13 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
spin_lock_init(&ls->ls_rsbtbl[i].lock);
}
- size = dlm_config.ci_lkbtbl_size;
- ls->ls_lkbtbl_size = size;
-
- ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_NOFS);
- if (!ls->ls_lkbtbl)
- goto out_rsbfree;
- for (i = 0; i < size; i++) {
- INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
- rwlock_init(&ls->ls_lkbtbl[i].lock);
- ls->ls_lkbtbl[i].counter = 1;
- }
+ idr_init(&ls->ls_lkbidr);
+ spin_lock_init(&ls->ls_lkbidr_spin);
size = dlm_config.ci_dirtbl_size;
ls->ls_dirtbl_size = size;
- ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_NOFS);
+ ls->ls_dirtbl = vmalloc(sizeof(struct dlm_dirtable) * size);
if (!ls->ls_dirtbl)
goto out_lkbfree;
for (i = 0; i < size; i++) {
@@ -502,6 +483,9 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
INIT_LIST_HEAD(&ls->ls_timeout);
mutex_init(&ls->ls_timeout_mutex);
+ INIT_LIST_HEAD(&ls->ls_new_rsb);
+ spin_lock_init(&ls->ls_new_rsb_spin);
+
INIT_LIST_HEAD(&ls->ls_nodes);
INIT_LIST_HEAD(&ls->ls_nodes_gone);
ls->ls_num_nodes = 0;
@@ -520,6 +504,9 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
init_completion(&ls->ls_members_done);
ls->ls_members_result = -1;
+ mutex_init(&ls->ls_cb_mutex);
+ INIT_LIST_HEAD(&ls->ls_cb_delay);
+
ls->ls_recoverd_task = NULL;
mutex_init(&ls->ls_recoverd_active);
spin_lock_init(&ls->ls_recover_lock);
@@ -553,18 +540,26 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
list_add(&ls->ls_list, &lslist);
spin_unlock(&lslist_lock);
+ if (flags & DLM_LSFL_FS) {
+ error = dlm_callback_start(ls);
+ if (error) {
+ log_error(ls, "can't start dlm_callback %d", error);
+ goto out_delist;
+ }
+ }
+
/* needs to find ls in lslist */
error = dlm_recoverd_start(ls);
if (error) {
log_error(ls, "can't start dlm_recoverd %d", error);
- goto out_delist;
+ goto out_callback;
}
ls->ls_kobj.kset = dlm_kset;
error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
"%s", ls->ls_name);
if (error)
- goto out_stop;
+ goto out_recoverd;
kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
/* let kobject handle freeing of ls if there's an error */
@@ -578,7 +573,7 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
error = do_uevent(ls, 1);
if (error)
- goto out_stop;
+ goto out_recoverd;
wait_for_completion(&ls->ls_members_done);
error = ls->ls_members_result;
@@ -595,19 +590,20 @@ static int new_lockspace(const char *name, int namelen, void **lockspace,
do_uevent(ls, 0);
dlm_clear_members(ls);
kfree(ls->ls_node_array);
- out_stop:
+ out_recoverd:
dlm_recoverd_stop(ls);
+ out_callback:
+ dlm_callback_stop(ls);
out_delist:
spin_lock(&lslist_lock);
list_del(&ls->ls_list);
spin_unlock(&lslist_lock);
kfree(ls->ls_recover_buf);
out_dirfree:
- kfree(ls->ls_dirtbl);
+ vfree(ls->ls_dirtbl);
out_lkbfree:
- kfree(ls->ls_lkbtbl);
- out_rsbfree:
- kfree(ls->ls_rsbtbl);
+ idr_destroy(&ls->ls_lkbidr);
+ vfree(ls->ls_rsbtbl);
out_lsfree:
if (do_unreg)
kobject_put(&ls->ls_kobj);
@@ -641,50 +637,64 @@ int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
return error;
}
-/* Return 1 if the lockspace still has active remote locks,
- * 2 if the lockspace still has active local locks.
- */
-static int lockspace_busy(struct dlm_ls *ls)
-{
- int i, lkb_found = 0;
- struct dlm_lkb *lkb;
-
- /* NOTE: We check the lockidtbl here rather than the resource table.
- This is because there may be LKBs queued as ASTs that have been
- unlinked from their RSBs and are pending deletion once the AST has
- been delivered */
-
- for (i = 0; i < ls->ls_lkbtbl_size; i++) {
- read_lock(&ls->ls_lkbtbl[i].lock);
- if (!list_empty(&ls->ls_lkbtbl[i].list)) {
- lkb_found = 1;
- list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
- lkb_idtbl_list) {
- if (!lkb->lkb_nodeid) {
- read_unlock(&ls->ls_lkbtbl[i].lock);
- return 2;
- }
- }
- }
- read_unlock(&ls->ls_lkbtbl[i].lock);
+static int lkb_idr_is_local(int id, void *p, void *data)
+{
+ struct dlm_lkb *lkb = p;
+
+ if (!lkb->lkb_nodeid)
+ return 1;
+ return 0;
+}
+
+static int lkb_idr_is_any(int id, void *p, void *data)
+{
+ return 1;
+}
+
+static int lkb_idr_free(int id, void *p, void *data)
+{
+ struct dlm_lkb *lkb = p;
+
+ if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
+ dlm_free_lvb(lkb->lkb_lvbptr);
+
+ dlm_free_lkb(lkb);
+ return 0;
+}
+
+/* NOTE: We check the lkbidr here rather than the resource table.
+ This is because there may be LKBs queued as ASTs that have been unlinked
+ from their RSBs and are pending deletion once the AST has been delivered */
+
+static int lockspace_busy(struct dlm_ls *ls, int force)
+{
+ int rv;
+
+ spin_lock(&ls->ls_lkbidr_spin);
+ if (force == 0) {
+ rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
+ } else if (force == 1) {
+ rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
+ } else {
+ rv = 0;
}
- return lkb_found;
+ spin_unlock(&ls->ls_lkbidr_spin);
+ return rv;
}
static int release_lockspace(struct dlm_ls *ls, int force)
{
- struct dlm_lkb *lkb;
struct dlm_rsb *rsb;
struct list_head *head;
int i, busy, rv;
- busy = lockspace_busy(ls);
+ busy = lockspace_busy(ls, force);
spin_lock(&lslist_lock);
if (ls->ls_create_count == 1) {
- if (busy > force)
+ if (busy) {
rv = -EBUSY;
- else {
+ } else {
/* remove_lockspace takes ls off lslist */
ls->ls_create_count = 0;
rv = 0;
@@ -708,12 +718,12 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_recoverd_stop(ls);
+ dlm_callback_stop(ls);
+
remove_lockspace(ls);
dlm_delete_debug_file(ls);
- dlm_astd_suspend();
-
kfree(ls->ls_recover_buf);
/*
@@ -721,31 +731,15 @@ static int release_lockspace(struct dlm_ls *ls, int force)
*/
dlm_dir_clear(ls);
- kfree(ls->ls_dirtbl);
+ vfree(ls->ls_dirtbl);
/*
- * Free all lkb's on lkbtbl[] lists.
+ * Free all lkb's in idr
*/
- for (i = 0; i < ls->ls_lkbtbl_size; i++) {
- head = &ls->ls_lkbtbl[i].list;
- while (!list_empty(head)) {
- lkb = list_entry(head->next, struct dlm_lkb,
- lkb_idtbl_list);
-
- list_del(&lkb->lkb_idtbl_list);
-
- dlm_del_ast(lkb);
-
- if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
- dlm_free_lvb(lkb->lkb_lvbptr);
-
- dlm_free_lkb(lkb);
- }
- }
- dlm_astd_resume();
-
- kfree(ls->ls_lkbtbl);
+ idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
+ idr_remove_all(&ls->ls_lkbidr);
+ idr_destroy(&ls->ls_lkbidr);
/*
* Free all rsb's on rsbtbl[] lists
@@ -770,7 +764,14 @@ static int release_lockspace(struct dlm_ls *ls, int force)
}
}
- kfree(ls->ls_rsbtbl);
+ vfree(ls->ls_rsbtbl);
+
+ while (!list_empty(&ls->ls_new_rsb)) {
+ rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
+ res_hashchain);
+ list_del(&rsb->res_hashchain);
+ dlm_free_rsb(rsb);
+ }
/*
* Free structures on any other lists
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 5e2c71f05e4..990626e7da8 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -512,12 +512,10 @@ static void process_sctp_notification(struct connection *con,
}
make_sockaddr(&prim.ssp_addr, 0, &addr_len);
if (dlm_addr_to_nodeid(&prim.ssp_addr, &nodeid)) {
- int i;
unsigned char *b=(unsigned char *)&prim.ssp_addr;
log_print("reject connect from unknown addr");
- for (i=0; i<sizeof(struct sockaddr_storage);i++)
- printk("%02x ", b[i]);
- printk("\n");
+ print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
+ b, sizeof(struct sockaddr_storage));
sctp_send_shutdown(prim.ssp_assoc_id);
return;
}
@@ -748,7 +746,10 @@ static int tcp_accept_from_sock(struct connection *con)
/* Get the new node's NODEID */
make_sockaddr(&peeraddr, 0, &len);
if (dlm_addr_to_nodeid(&peeraddr, &nodeid)) {
+ unsigned char *b=(unsigned char *)&peeraddr;
log_print("connect from non cluster node");
+ print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
+ b, sizeof(struct sockaddr_storage));
sock_release(newsock);
mutex_unlock(&con->sock_mutex);
return -1;
diff --git a/fs/dlm/memory.c b/fs/dlm/memory.c
index 8e0d00db004..da64df7576e 100644
--- a/fs/dlm/memory.c
+++ b/fs/dlm/memory.c
@@ -16,6 +16,7 @@
#include "memory.h"
static struct kmem_cache *lkb_cache;
+static struct kmem_cache *rsb_cache;
int __init dlm_memory_init(void)
@@ -26,6 +27,14 @@ int __init dlm_memory_init(void)
__alignof__(struct dlm_lkb), 0, NULL);
if (!lkb_cache)
ret = -ENOMEM;
+
+ rsb_cache = kmem_cache_create("dlm_rsb", sizeof(struct dlm_rsb),
+ __alignof__(struct dlm_rsb), 0, NULL);
+ if (!rsb_cache) {
+ kmem_cache_destroy(lkb_cache);
+ ret = -ENOMEM;
+ }
+
return ret;
}
@@ -33,6 +42,8 @@ void dlm_memory_exit(void)
{
if (lkb_cache)
kmem_cache_destroy(lkb_cache);
+ if (rsb_cache)
+ kmem_cache_destroy(rsb_cache);
}
char *dlm_allocate_lvb(struct dlm_ls *ls)
@@ -48,16 +59,11 @@ void dlm_free_lvb(char *p)
kfree(p);
}
-/* FIXME: have some minimal space built-in to rsb for the name and
- kmalloc a separate name if needed, like dentries are done */
-
-struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls, int namelen)
+struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls)
{
struct dlm_rsb *r;
- DLM_ASSERT(namelen <= DLM_RESNAME_MAXLEN,);
-
- r = kzalloc(sizeof(*r) + namelen, GFP_NOFS);
+ r = kmem_cache_zalloc(rsb_cache, GFP_NOFS);
return r;
}
@@ -65,7 +71,7 @@ void dlm_free_rsb(struct dlm_rsb *r)
{
if (r->res_lvbptr)
dlm_free_lvb(r->res_lvbptr);
- kfree(r);
+ kmem_cache_free(rsb_cache, r);
}
struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls)
diff --git a/fs/dlm/memory.h b/fs/dlm/memory.h
index 485fb29143b..177c11cbb0a 100644
--- a/fs/dlm/memory.h
+++ b/fs/dlm/memory.h
@@ -16,7 +16,7 @@
int dlm_memory_init(void);
void dlm_memory_exit(void);
-struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls, int namelen);
+struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls);
void dlm_free_rsb(struct dlm_rsb *r);
struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls);
void dlm_free_lkb(struct dlm_lkb *l);
diff --git a/fs/dlm/recoverd.c b/fs/dlm/recoverd.c
index fd677c8c3d3..774da3cf92c 100644
--- a/fs/dlm/recoverd.c
+++ b/fs/dlm/recoverd.c
@@ -58,13 +58,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
mutex_lock(&ls->ls_recoverd_active);
- /*
- * Suspending and resuming dlm_astd ensures that no lkb's from this ls
- * will be processed by dlm_astd during recovery.
- */
-
- dlm_astd_suspend();
- dlm_astd_resume();
+ dlm_callback_suspend(ls);
/*
* Free non-master tossed rsb's. Master rsb's are kept on toss
@@ -202,6 +196,8 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
dlm_adjust_timeouts(ls);
+ dlm_callback_resume(ls);
+
error = enable_locking(ls, rv->seq);
if (error) {
log_debug(ls, "enable_locking failed %d", error);
@@ -222,8 +218,6 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
dlm_grant_after_purge(ls);
- dlm_astd_wake();
-
log_debug(ls, "recover %llx done: %u ms",
(unsigned long long)rv->seq,
jiffies_to_msecs(jiffies - start));
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index e96bf3e9be8..d8ea6075640 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -213,9 +213,9 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
goto out;
}
- if (list_empty(&lkb->lkb_astqueue)) {
+ if (list_empty(&lkb->lkb_cb_list)) {
kref_get(&lkb->lkb_ref);
- list_add_tail(&lkb->lkb_astqueue, &proc->asts);
+ list_add_tail(&lkb->lkb_cb_list, &proc->asts);
wake_up_interruptible(&proc->wait);
}
spin_unlock(&proc->asts_spin);
@@ -832,24 +832,24 @@ static ssize_t device_read(struct file *file, char __user *buf, size_t count,
}
/* if we empty lkb_callbacks, we don't want to unlock the spinlock
- without removing lkb_astqueue; so empty lkb_astqueue is always
+ without removing lkb_cb_list; so empty lkb_cb_list is always
consistent with empty lkb_callbacks */
- lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_astqueue);
+ lkb = list_entry(proc->asts.next, struct dlm_lkb, lkb_cb_list);
rv = dlm_rem_lkb_callback(lkb->lkb_resource->res_ls, lkb, &cb, &resid);
if (rv < 0) {
/* this shouldn't happen; lkb should have been removed from
list when resid was zero */
log_print("dlm_rem_lkb_callback empty %x", lkb->lkb_id);
- list_del_init(&lkb->lkb_astqueue);
+ list_del_init(&lkb->lkb_cb_list);
spin_unlock(&proc->asts_spin);
/* removes ref for proc->asts, may cause lkb to be freed */
dlm_put_lkb(lkb);
goto try_another;
}
if (!resid)
- list_del_init(&lkb->lkb_astqueue);
+ list_del_init(&lkb->lkb_cb_list);
spin_unlock(&proc->asts_spin);
if (cb.flags & DLM_CB_SKIP) {
diff --git a/fs/exec.c b/fs/exec.c
index 6075a1e727a..d9576f26181 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -963,9 +963,18 @@ static int de_thread(struct task_struct *tsk)
leader->group_leader = tsk;
tsk->exit_signal = SIGCHLD;
+ leader->exit_signal = -1;
BUG_ON(leader->exit_state != EXIT_ZOMBIE);
leader->exit_state = EXIT_DEAD;
+
+ /*
+ * We are going to release_task()->ptrace_unlink() silently,
+ * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
+ * the tracer wont't block again waiting for this thread.
+ */
+ if (unlikely(leader->ptrace))
+ __wake_up_parent(leader, leader->parent);
write_unlock_irq(&tasklist_lock);
release_task(leader);
@@ -1225,7 +1234,12 @@ int check_unsafe_exec(struct linux_binprm *bprm)
unsigned n_fs;
int res = 0;
- bprm->unsafe = tracehook_unsafe_exec(p);
+ if (p->ptrace) {
+ if (p->ptrace & PT_PTRACE_CAP)
+ bprm->unsafe |= LSM_UNSAFE_PTRACE_CAP;
+ else
+ bprm->unsafe |= LSM_UNSAFE_PTRACE;
+ }
n_fs = 1;
spin_lock(&p->fs->lock);
@@ -1353,6 +1367,7 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
unsigned int depth = bprm->recursion_depth;
int try,retval;
struct linux_binfmt *fmt;
+ pid_t old_pid;
retval = security_bprm_check(bprm);
if (retval)
@@ -1362,6 +1377,11 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
if (retval)
return retval;
+ /* Need to fetch pid before load_binary changes it */
+ rcu_read_lock();
+ old_pid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
+ rcu_read_unlock();
+
retval = -ENOENT;
for (try=0; try<2; try++) {
read_lock(&binfmt_lock);
@@ -1381,7 +1401,8 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
bprm->recursion_depth = depth;
if (retval >= 0) {
if (depth == 0)
- tracehook_report_exec(fmt, bprm, regs);
+ ptrace_event(PTRACE_EVENT_EXEC,
+ old_pid);
put_binfmt(fmt);
allow_write_access(bprm->file);
if (bprm->file)
@@ -1769,7 +1790,7 @@ static int zap_process(struct task_struct *start, int exit_code)
t = start;
do {
- task_clear_group_stop_pending(t);
+ task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
if (t != current && t->mm) {
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 2f343b4d7a7..3f7a59bfa7a 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -976,16 +976,12 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
pagevec_init(&pvec, 0);
next = 0;
- while (next <= (loff_t)-1 &&
- pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)
- ) {
+ do {
+ if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
+ break;
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
- pgoff_t page_index = page->index;
-
- ASSERTCMP(page_index, >=, next);
- next = page_index + 1;
-
+ next = page->index;
if (PageFsCache(page)) {
__fscache_wait_on_page_write(cookie, page);
__fscache_uncache_page(cookie, page);
@@ -993,7 +989,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
}
pagevec_release(&pvec);
cond_resched();
- }
+ } while (++next);
_leave("");
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index e65493a8ac0..42e477f3122 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -854,11 +854,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
blen++;
else {
if (bstart) {
- if (metadata)
- __gfs2_free_meta(ip, bstart, blen);
- else
- __gfs2_free_data(ip, bstart, blen);
-
+ __gfs2_free_blocks(ip, bstart, blen, metadata);
btotal += blen;
}
@@ -870,11 +866,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
gfs2_add_inode_blocks(&ip->i_inode, -1);
}
if (bstart) {
- if (metadata)
- __gfs2_free_meta(ip, bstart, blen);
- else
- __gfs2_free_data(ip, bstart, blen);
-
+ __gfs2_free_blocks(ip, bstart, blen, metadata);
btotal += blen;
}
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 091ee477953..1cc2f8ec52a 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -339,6 +339,67 @@ fail:
return (copied) ? copied : error;
}
+/**
+ * gfs2_dir_get_hash_table - Get pointer to the dir hash table
+ * @ip: The inode in question
+ *
+ * Returns: The hash table or an error
+ */
+
+static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip)
+{
+ struct inode *inode = &ip->i_inode;
+ int ret;
+ u32 hsize;
+ __be64 *hc;
+
+ BUG_ON(!(ip->i_diskflags & GFS2_DIF_EXHASH));
+
+ hc = ip->i_hash_cache;
+ if (hc)
+ return hc;
+
+ hsize = 1 << ip->i_depth;
+ hsize *= sizeof(__be64);
+ if (hsize != i_size_read(&ip->i_inode)) {
+ gfs2_consist_inode(ip);
+ return ERR_PTR(-EIO);
+ }
+
+ hc = kmalloc(hsize, GFP_NOFS);
+ ret = -ENOMEM;
+ if (hc == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ret = gfs2_dir_read_data(ip, (char *)hc, 0, hsize, 1);
+ if (ret < 0) {
+ kfree(hc);
+ return ERR_PTR(ret);
+ }
+
+ spin_lock(&inode->i_lock);
+ if (ip->i_hash_cache)
+ kfree(hc);
+ else
+ ip->i_hash_cache = hc;
+ spin_unlock(&inode->i_lock);
+
+ return ip->i_hash_cache;
+}
+
+/**
+ * gfs2_dir_hash_inval - Invalidate dir hash
+ * @ip: The directory inode
+ *
+ * Must be called with an exclusive glock, or during glock invalidation.
+ */
+void gfs2_dir_hash_inval(struct gfs2_inode *ip)
+{
+ __be64 *hc = ip->i_hash_cache;
+ ip->i_hash_cache = NULL;
+ kfree(hc);
+}
+
static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
{
return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0;
@@ -686,17 +747,12 @@ static int get_leaf(struct gfs2_inode *dip, u64 leaf_no,
static int get_leaf_nr(struct gfs2_inode *dip, u32 index,
u64 *leaf_out)
{
- __be64 leaf_no;
- int error;
-
- error = gfs2_dir_read_data(dip, (char *)&leaf_no,
- index * sizeof(__be64),
- sizeof(__be64), 0);
- if (error != sizeof(u64))
- return (error < 0) ? error : -EIO;
-
- *leaf_out = be64_to_cpu(leaf_no);
+ __be64 *hash;
+ hash = gfs2_dir_get_hash_table(dip);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
+ *leaf_out = be64_to_cpu(*(hash + index));
return 0;
}
@@ -966,6 +1022,8 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
for (x = 0; x < half_len; x++)
lp[x] = cpu_to_be64(bn);
+ gfs2_dir_hash_inval(dip);
+
error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64),
half_len * sizeof(u64));
if (error != half_len * sizeof(u64)) {
@@ -1052,70 +1110,54 @@ fail_brelse:
static int dir_double_exhash(struct gfs2_inode *dip)
{
- struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct buffer_head *dibh;
u32 hsize;
- u64 *buf;
- u64 *from, *to;
- u64 block;
- u64 disksize = i_size_read(&dip->i_inode);
+ u32 hsize_bytes;
+ __be64 *hc;
+ __be64 *hc2, *h;
int x;
int error = 0;
hsize = 1 << dip->i_depth;
- if (hsize * sizeof(u64) != disksize) {
- gfs2_consist_inode(dip);
- return -EIO;
- }
+ hsize_bytes = hsize * sizeof(__be64);
- /* Allocate both the "from" and "to" buffers in one big chunk */
+ hc = gfs2_dir_get_hash_table(dip);
+ if (IS_ERR(hc))
+ return PTR_ERR(hc);
- buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS);
- if (!buf)
+ h = hc2 = kmalloc(hsize_bytes * 2, GFP_NOFS);
+ if (!hc2)
return -ENOMEM;
- for (block = disksize >> sdp->sd_hash_bsize_shift; block--;) {
- error = gfs2_dir_read_data(dip, (char *)buf,
- block * sdp->sd_hash_bsize,
- sdp->sd_hash_bsize, 1);
- if (error != sdp->sd_hash_bsize) {
- if (error >= 0)
- error = -EIO;
- goto fail;
- }
-
- from = buf;
- to = (u64 *)((char *)buf + sdp->sd_hash_bsize);
-
- for (x = sdp->sd_hash_ptrs; x--; from++) {
- *to++ = *from; /* No endianess worries */
- *to++ = *from;
- }
+ error = gfs2_meta_inode_buffer(dip, &dibh);
+ if (error)
+ goto out_kfree;
- error = gfs2_dir_write_data(dip,
- (char *)buf + sdp->sd_hash_bsize,
- block * sdp->sd_sb.sb_bsize,
- sdp->sd_sb.sb_bsize);
- if (error != sdp->sd_sb.sb_bsize) {
- if (error >= 0)
- error = -EIO;
- goto fail;
- }
+ for (x = 0; x < hsize; x++) {
+ *h++ = *hc;
+ *h++ = *hc;
+ hc++;
}
- kfree(buf);
-
- error = gfs2_meta_inode_buffer(dip, &dibh);
- if (!gfs2_assert_withdraw(sdp, !error)) {
- dip->i_depth++;
- gfs2_dinode_out(dip, dibh->b_data);
- brelse(dibh);
- }
+ error = gfs2_dir_write_data(dip, (char *)hc2, 0, hsize_bytes * 2);
+ if (error != (hsize_bytes * 2))
+ goto fail;
- return error;
+ gfs2_dir_hash_inval(dip);
+ dip->i_hash_cache = hc2;
+ dip->i_depth++;
+ gfs2_dinode_out(dip, dibh->b_data);
+ brelse(dibh);
+ return 0;
fail:
- kfree(buf);
+ /* Replace original hash table & size */
+ gfs2_dir_write_data(dip, (char *)hc, 0, hsize_bytes);
+ i_size_write(&dip->i_inode, hsize_bytes);
+ gfs2_dinode_out(dip, dibh->b_data);
+ brelse(dibh);
+out_kfree:
+ kfree(hc2);
return error;
}
@@ -1348,6 +1390,7 @@ out:
return error;
}
+
/**
* dir_e_read - Reads the entries from a directory into a filldir buffer
* @dip: dinode pointer
@@ -1362,9 +1405,7 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir)
{
struct gfs2_inode *dip = GFS2_I(inode);
- struct gfs2_sbd *sdp = GFS2_SB(inode);
u32 hsize, len = 0;
- u32 ht_offset, lp_offset, ht_offset_cur = -1;
u32 hash, index;
__be64 *lp;
int copied = 0;
@@ -1372,37 +1413,17 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
unsigned depth = 0;
hsize = 1 << dip->i_depth;
- if (hsize * sizeof(u64) != i_size_read(inode)) {
- gfs2_consist_inode(dip);
- return -EIO;
- }
-
hash = gfs2_dir_offset2hash(*offset);
index = hash >> (32 - dip->i_depth);
- lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS);
- if (!lp)
- return -ENOMEM;
+ lp = gfs2_dir_get_hash_table(dip);
+ if (IS_ERR(lp))
+ return PTR_ERR(lp);
while (index < hsize) {
- lp_offset = index & (sdp->sd_hash_ptrs - 1);
- ht_offset = index - lp_offset;
-
- if (ht_offset_cur != ht_offset) {
- error = gfs2_dir_read_data(dip, (char *)lp,
- ht_offset * sizeof(__be64),
- sdp->sd_hash_bsize, 1);
- if (error != sdp->sd_hash_bsize) {
- if (error >= 0)
- error = -EIO;
- goto out;
- }
- ht_offset_cur = ht_offset;
- }
-
error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
&copied, &depth,
- be64_to_cpu(lp[lp_offset]));
+ be64_to_cpu(lp[index]));
if (error)
break;
@@ -1410,8 +1431,6 @@ static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
index = (index & ~(len - 1)) + len;
}
-out:
- kfree(lp);
if (error > 0)
error = 0;
return error;
@@ -1914,43 +1933,22 @@ out:
int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
{
- struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct buffer_head *bh;
struct gfs2_leaf *leaf;
u32 hsize, len;
- u32 ht_offset, lp_offset, ht_offset_cur = -1;
u32 index = 0, next_index;
__be64 *lp;
u64 leaf_no;
int error = 0, last;
hsize = 1 << dip->i_depth;
- if (hsize * sizeof(u64) != i_size_read(&dip->i_inode)) {
- gfs2_consist_inode(dip);
- return -EIO;
- }
- lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS);
- if (!lp)
- return -ENOMEM;
+ lp = gfs2_dir_get_hash_table(dip);
+ if (IS_ERR(lp))
+ return PTR_ERR(lp);
while (index < hsize) {
- lp_offset = index & (sdp->sd_hash_ptrs - 1);
- ht_offset = index - lp_offset;
-
- if (ht_offset_cur != ht_offset) {
- error = gfs2_dir_read_data(dip, (char *)lp,
- ht_offset * sizeof(__be64),
- sdp->sd_hash_bsize, 1);
- if (error != sdp->sd_hash_bsize) {
- if (error >= 0)
- error = -EIO;
- goto out;
- }
- ht_offset_cur = ht_offset;
- }
-
- leaf_no = be64_to_cpu(lp[lp_offset]);
+ leaf_no = be64_to_cpu(lp[index]);
if (leaf_no) {
error = get_leaf(dip, leaf_no, &bh);
if (error)
@@ -1976,7 +1974,6 @@ int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
}
out:
- kfree(lp);
return error;
}
diff --git a/fs/gfs2/dir.h b/fs/gfs2/dir.h
index e686af11bec..ff5772fbf02 100644
--- a/fs/gfs2/dir.h
+++ b/fs/gfs2/dir.h
@@ -35,6 +35,7 @@ extern int gfs2_diradd_alloc_required(struct inode *dir,
const struct qstr *filename);
extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
struct buffer_head **bhp);
+extern void gfs2_dir_hash_inval(struct gfs2_inode *ip);
static inline u32 gfs2_disk_hash(const char *data, int len)
{
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index a9f5cbe45cd..bc2590ef5fc 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -174,7 +174,9 @@ void gfs2_set_inode_flags(struct inode *inode)
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int flags = inode->i_flags;
- flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
+ if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
+ inode->i_flags |= S_NOSEC;
if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
flags |= S_IMMUTABLE;
if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 1c1336e7b3b..88e8a23d002 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -409,6 +409,10 @@ static void state_change(struct gfs2_glock *gl, unsigned int new_state)
if (held1 && held2 && list_empty(&gl->gl_holders))
clear_bit(GLF_QUEUED, &gl->gl_flags);
+ if (new_state != gl->gl_target)
+ /* shorten our minimum hold time */
+ gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
+ GL_GLOCK_MIN_HOLD);
gl->gl_state = new_state;
gl->gl_tchange = jiffies;
}
@@ -668,7 +672,7 @@ static void glock_work_func(struct work_struct *work)
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
unsigned long holdtime, now = jiffies;
- holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
+ holdtime = gl->gl_tchange + gl->gl_hold_time;
if (time_before(now, holdtime))
delay = holdtime - now;
@@ -679,9 +683,14 @@ static void glock_work_func(struct work_struct *work)
}
run_queue(gl, 0);
spin_unlock(&gl->gl_spin);
- if (!delay ||
- queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
+ if (!delay)
gfs2_glock_put(gl);
+ else {
+ if (gl->gl_name.ln_type != LM_TYPE_INODE)
+ delay = 0;
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
+ gfs2_glock_put(gl);
+ }
if (drop_ref)
gfs2_glock_put(gl);
}
@@ -743,6 +752,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_sbd = sdp;
+ gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
INIT_WORK(&gl->gl_delete, delete_work_func);
@@ -855,8 +865,15 @@ static int gfs2_glock_demote_wait(void *word)
static void wait_on_holder(struct gfs2_holder *gh)
{
+ unsigned long time1 = jiffies;
+
might_sleep();
wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
+ if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
+ /* Lengthen the minimum hold time. */
+ gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
+ GL_GLOCK_HOLD_INCR,
+ GL_GLOCK_MAX_HOLD);
}
static void wait_on_demote(struct gfs2_glock *gl)
@@ -1093,8 +1110,9 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
gfs2_glock_hold(gl);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
- !test_bit(GLF_DEMOTE, &gl->gl_flags))
- delay = gl->gl_ops->go_min_hold_time;
+ !test_bit(GLF_DEMOTE, &gl->gl_flags) &&
+ gl->gl_name.ln_type == LM_TYPE_INODE)
+ delay = gl->gl_hold_time;
if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
}
@@ -1273,12 +1291,13 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
unsigned long now = jiffies;
gfs2_glock_hold(gl);
- holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
- if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
+ holdtime = gl->gl_tchange + gl->gl_hold_time;
+ if (test_bit(GLF_QUEUED, &gl->gl_flags) &&
+ gl->gl_name.ln_type == LM_TYPE_INODE) {
if (time_before(now, holdtime))
delay = holdtime - now;
if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
- delay = gl->gl_ops->go_min_hold_time;
+ delay = gl->gl_hold_time;
}
spin_lock(&gl->gl_spin);
@@ -1667,7 +1686,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
dtime *= 1000000/HZ; /* demote time in uSec */
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
dtime = 0;
- gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d m:%ld\n",
state2str(gl->gl_state),
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number,
@@ -1676,7 +1695,7 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
state2str(gl->gl_demote_state), dtime,
atomic_read(&gl->gl_ail_count),
atomic_read(&gl->gl_revokes),
- atomic_read(&gl->gl_ref));
+ atomic_read(&gl->gl_ref), gl->gl_hold_time);
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
error = dump_holder(seq, gh);
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 6b2f757b928..66707118af2 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -113,6 +113,12 @@ enum {
#define GLR_TRYFAILED 13
+#define GL_GLOCK_MAX_HOLD (long)(HZ / 5)
+#define GL_GLOCK_DFT_HOLD (long)(HZ / 5)
+#define GL_GLOCK_MIN_HOLD (long)(10)
+#define GL_GLOCK_HOLD_INCR (long)(HZ / 20)
+#define GL_GLOCK_HOLD_DECR (long)(HZ / 40)
+
struct lm_lockops {
const char *lm_proto_name;
int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 2cca29316bd..da21ecaafcc 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -26,6 +26,7 @@
#include "rgrp.h"
#include "util.h"
#include "trans.h"
+#include "dir.h"
/**
* __gfs2_ail_flush - remove all buffers for a given lock from the AIL
@@ -218,6 +219,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
if (ip) {
set_bit(GIF_INVALID, &ip->i_flags);
forget_all_cached_acls(&ip->i_inode);
+ gfs2_dir_hash_inval(ip);
}
}
@@ -316,6 +318,8 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
ip->i_generation = be64_to_cpu(str->di_generation);
ip->i_diskflags = be32_to_cpu(str->di_flags);
+ ip->i_eattr = be64_to_cpu(str->di_eattr);
+ /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
gfs2_set_inode_flags(&ip->i_inode);
height = be16_to_cpu(str->di_height);
if (unlikely(height > GFS2_MAX_META_HEIGHT))
@@ -328,7 +332,6 @@ static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
ip->i_depth = (u8)depth;
ip->i_entries = be32_to_cpu(str->di_entries);
- ip->i_eattr = be64_to_cpu(str->di_eattr);
if (S_ISREG(ip->i_inode.i_mode))
gfs2_set_aops(&ip->i_inode);
@@ -549,7 +552,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_lock = inode_go_lock,
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
- .go_min_hold_time = HZ / 5,
.go_flags = GLOF_ASPACE,
};
@@ -560,7 +562,6 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_unlock = rgrp_go_unlock,
.go_dump = gfs2_rgrp_dump,
.go_type = LM_TYPE_RGRP,
- .go_min_hold_time = HZ / 5,
.go_flags = GLOF_ASPACE,
};
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 81206e70cbf..892ac37de8a 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -163,7 +163,6 @@ struct gfs2_glock_operations {
int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
void (*go_callback) (struct gfs2_glock *gl);
const int go_type;
- const unsigned long go_min_hold_time;
const unsigned long go_flags;
#define GLOF_ASPACE 1
};
@@ -221,6 +220,7 @@ struct gfs2_glock {
unsigned int gl_hash;
unsigned long gl_demote_time; /* time of first demote request */
+ long gl_hold_time;
struct list_head gl_holders;
const struct gfs2_glock_operations *gl_ops;
@@ -285,6 +285,7 @@ struct gfs2_inode {
u64 i_goal; /* goal block for allocations */
struct rw_semaphore i_rw_mutex;
struct list_head i_trunc_list;
+ __be64 *i_hash_cache;
u32 i_entries;
u32 i_diskflags;
u8 i_height;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index c2b34cd2abe..29e1ace7953 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -41,6 +41,7 @@ static void gfs2_init_inode_once(void *foo)
init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
ip->i_alloc = NULL;
+ ip->i_hash_cache = NULL;
}
static void gfs2_init_glock_once(void *foo)
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 2a77071fb7b..516516e0c2a 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1094,6 +1094,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
if (sdp->sd_args.ar_nobarrier)
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
+ sb->s_flags |= MS_NOSEC;
sb->s_magic = GFS2_MAGIC;
sb->s_op = &gfs2_super_ops;
sb->s_d_op = &gfs2_dops;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 9b780df3fd5..7f8af1eb02d 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1607,14 +1607,15 @@ rgrp_error:
}
/**
- * gfs2_free_data - free a contiguous run of data block(s)
+ * __gfs2_free_blocks - free a contiguous run of block(s)
* @ip: the inode these blocks are being freed from
* @bstart: first block of a run of contiguous blocks
* @blen: the length of the block run
+ * @meta: 1 if the blocks represent metadata
*
*/
-void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
+void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_rgrpd *rgd;
@@ -1631,54 +1632,11 @@ void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
gfs2_trans_add_rg(rgd);
/* Directories keep their data in the metadata address space */
- if (ip->i_depth)
+ if (meta || ip->i_depth)
gfs2_meta_wipe(ip, bstart, blen);
}
/**
- * gfs2_free_data - free a contiguous run of data block(s)
- * @ip: the inode these blocks are being freed from
- * @bstart: first block of a run of contiguous blocks
- * @blen: the length of the block run
- *
- */
-
-void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
-{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
-
- __gfs2_free_data(ip, bstart, blen);
- gfs2_statfs_change(sdp, 0, +blen, 0);
- gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
-}
-
-/**
- * gfs2_free_meta - free a contiguous run of data block(s)
- * @ip: the inode these blocks are being freed from
- * @bstart: first block of a run of contiguous blocks
- * @blen: the length of the block run
- *
- */
-
-void __gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
-{
- struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- struct gfs2_rgrpd *rgd;
-
- rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
- if (!rgd)
- return;
- trace_gfs2_block_alloc(ip, bstart, blen, GFS2_BLKST_FREE);
- rgd->rd_free += blen;
-
- gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
- gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
-
- gfs2_trans_add_rg(rgd);
- gfs2_meta_wipe(ip, bstart, blen);
-}
-
-/**
* gfs2_free_meta - free a contiguous run of data block(s)
* @ip: the inode these blocks are being freed from
* @bstart: first block of a run of contiguous blocks
@@ -1690,7 +1648,7 @@ void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
- __gfs2_free_meta(ip, bstart, blen);
+ __gfs2_free_blocks(ip, bstart, blen, 1);
gfs2_statfs_change(sdp, 0, +blen, 0);
gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
}
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h
index a80e3034ac4..d253f9a8c70 100644
--- a/fs/gfs2/rgrp.h
+++ b/fs/gfs2/rgrp.h
@@ -52,9 +52,7 @@ extern int gfs2_ri_update(struct gfs2_inode *ip);
extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n);
extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation);
-extern void __gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen);
-extern void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen);
-extern void __gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
+extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta);
extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen);
extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
extern void gfs2_unlink_di(struct inode *inode);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index fb0edf73548..b7beadd9ba4 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1533,7 +1533,7 @@ out:
/* Case 3 starts here */
truncate_inode_pages(&inode->i_data, 0);
end_writeback(inode);
-
+ gfs2_dir_hash_inval(ip);
ip->i_gl->gl_object = NULL;
gfs2_glock_add_to_lru(ip->i_gl);
gfs2_glock_put(ip->i_gl);
diff --git a/fs/hfsplus/brec.c b/fs/hfsplus/brec.c
index 2312de34bd4..2a734cfccc9 100644
--- a/fs/hfsplus/brec.c
+++ b/fs/hfsplus/brec.c
@@ -43,6 +43,10 @@ u16 hfs_brec_keylen(struct hfs_bnode *node, u16 rec)
node->tree->node_size - (rec + 1) * 2);
if (!recoff)
return 0;
+ if (recoff > node->tree->node_size - 2) {
+ printk(KERN_ERR "hfs: recoff %d too large\n", recoff);
+ return 0;
+ }
retval = hfs_bnode_read_u16(node, recoff) + 2;
if (retval > node->tree->max_key_len + 2) {
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index b4ba1b31933..4dfbfec357e 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -212,7 +212,9 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
str->name, cnid, inode->i_nlink);
- hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
+ err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
+ if (err)
+ return err;
hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL);
entry_size = hfsplus_fill_cat_thread(sb, &entry,
@@ -269,7 +271,9 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n",
str ? str->name : NULL, cnid);
- hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
+ err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
+ if (err)
+ return err;
if (!str) {
int len;
@@ -347,12 +351,14 @@ int hfsplus_rename_cat(u32 cnid,
struct hfs_find_data src_fd, dst_fd;
hfsplus_cat_entry entry;
int entry_size, type;
- int err = 0;
+ int err;
dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
cnid, src_dir->i_ino, src_name->name,
dst_dir->i_ino, dst_name->name);
- hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
+ err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd);
+ if (err)
+ return err;
dst_fd = src_fd;
/* find the old dir entry and read the data */
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
index 4df5059c25d..25b2443a004 100644
--- a/fs/hfsplus/dir.c
+++ b/fs/hfsplus/dir.c
@@ -38,7 +38,9 @@ static struct dentry *hfsplus_lookup(struct inode *dir, struct dentry *dentry,
sb = dir->i_sb;
dentry->d_fsdata = NULL;
- hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
+ err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
+ if (err)
+ return ERR_PTR(err);
hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, &dentry->d_name);
again:
err = hfs_brec_read(&fd, &entry, sizeof(entry));
@@ -132,7 +134,9 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
if (filp->f_pos >= inode->i_size)
return 0;
- hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
+ err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
+ if (err)
+ return err;
hfsplus_cat_build_key(sb, fd.search_key, inode->i_ino, NULL);
err = hfs_brec_find(&fd);
if (err)
diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c
index b1991a2a08e..5849e3ef35c 100644
--- a/fs/hfsplus/extents.c
+++ b/fs/hfsplus/extents.c
@@ -119,22 +119,31 @@ static void __hfsplus_ext_write_extent(struct inode *inode,
set_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags);
}
-static void hfsplus_ext_write_extent_locked(struct inode *inode)
+static int hfsplus_ext_write_extent_locked(struct inode *inode)
{
+ int res;
+
if (HFSPLUS_I(inode)->extent_state & HFSPLUS_EXT_DIRTY) {
struct hfs_find_data fd;
- hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
+ res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
+ if (res)
+ return res;
__hfsplus_ext_write_extent(inode, &fd);
hfs_find_exit(&fd);
}
+ return 0;
}
-void hfsplus_ext_write_extent(struct inode *inode)
+int hfsplus_ext_write_extent(struct inode *inode)
{
+ int res;
+
mutex_lock(&HFSPLUS_I(inode)->extents_lock);
- hfsplus_ext_write_extent_locked(inode);
+ res = hfsplus_ext_write_extent_locked(inode);
mutex_unlock(&HFSPLUS_I(inode)->extents_lock);
+
+ return res;
}
static inline int __hfsplus_ext_read_extent(struct hfs_find_data *fd,
@@ -194,9 +203,11 @@ static int hfsplus_ext_read_extent(struct inode *inode, u32 block)
block < hip->cached_start + hip->cached_blocks)
return 0;
- hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
- res = __hfsplus_ext_cache_extent(&fd, inode, block);
- hfs_find_exit(&fd);
+ res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->ext_tree, &fd);
+ if (!res) {
+ res = __hfsplus_ext_cache_extent(&fd, inode, block);
+ hfs_find_exit(&fd);
+ }
return res;
}
@@ -209,6 +220,7 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
int res = -EIO;
u32 ablock, dblock, mask;
+ sector_t sector;
int was_dirty = 0;
int shift;
@@ -255,10 +267,12 @@ int hfsplus_get_block(struct inode *inode, sector_t iblock,
done:
dprint(DBG_EXTENT, "get_block(%lu): %llu - %u\n",
inode->i_ino, (long long)iblock, dblock);
+
mask = (1 << sbi->fs_shift) - 1;
- map_bh(bh_result, sb,
- (dblock << sbi->fs_shift) + sbi->blockoffset +
- (iblock & mask));
+ sector = ((sector_t)dblock << sbi->fs_shift) +
+ sbi->blockoffset + (iblock & mask);
+ map_bh(bh_result, sb, sector);
+
if (create) {
set_buffer_new(bh_result);
hip->phys_size += sb->s_blocksize;
@@ -371,7 +385,9 @@ int hfsplus_free_fork(struct super_block *sb, u32 cnid,
if (total_blocks == blocks)
return 0;
- hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
+ res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
+ if (res)
+ return res;
do {
res = __hfsplus_ext_read_extent(&fd, ext_entry, cnid,
total_blocks, type);
@@ -469,7 +485,9 @@ out:
insert_extent:
dprint(DBG_EXTENT, "insert new extent\n");
- hfsplus_ext_write_extent_locked(inode);
+ res = hfsplus_ext_write_extent_locked(inode);
+ if (res)
+ goto out;
memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
hip->cached_extents[0].start_block = cpu_to_be32(start);
@@ -500,7 +518,6 @@ void hfsplus_file_truncate(struct inode *inode)
struct page *page;
void *fsdata;
u32 size = inode->i_size;
- int res;
res = pagecache_write_begin(NULL, mapping, size, 0,
AOP_FLAG_UNINTERRUPTIBLE,
@@ -523,7 +540,12 @@ void hfsplus_file_truncate(struct inode *inode)
goto out;
mutex_lock(&hip->extents_lock);
- hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
+ res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd);
+ if (res) {
+ mutex_unlock(&hip->extents_lock);
+ /* XXX: We lack error handling of hfsplus_file_truncate() */
+ return;
+ }
while (1) {
if (alloc_cnt == hip->first_blocks) {
hfsplus_free_extents(sb, hip->first_extents,
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index d6857523336..81dfd1e495e 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/mutex.h>
#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
#include "hfsplus_raw.h"
#define DBG_BNODE_REFS 0x00000001
@@ -110,7 +111,9 @@ struct hfsplus_vh;
struct hfs_btree;
struct hfsplus_sb_info {
+ void *s_vhdr_buf;
struct hfsplus_vh *s_vhdr;
+ void *s_backup_vhdr_buf;
struct hfsplus_vh *s_backup_vhdr;
struct hfs_btree *ext_tree;
struct hfs_btree *cat_tree;
@@ -258,6 +261,15 @@ struct hfsplus_readdir_data {
struct hfsplus_cat_key key;
};
+/*
+ * Find minimum acceptible I/O size for an hfsplus sb.
+ */
+static inline unsigned short hfsplus_min_io_size(struct super_block *sb)
+{
+ return max_t(unsigned short, bdev_logical_block_size(sb->s_bdev),
+ HFSPLUS_SECTOR_SIZE);
+}
+
#define hfs_btree_open hfsplus_btree_open
#define hfs_btree_close hfsplus_btree_close
#define hfs_btree_write hfsplus_btree_write
@@ -374,7 +386,7 @@ extern const struct file_operations hfsplus_dir_operations;
/* extents.c */
int hfsplus_ext_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
-void hfsplus_ext_write_extent(struct inode *);
+int hfsplus_ext_write_extent(struct inode *);
int hfsplus_get_block(struct inode *, sector_t, struct buffer_head *, int);
int hfsplus_free_fork(struct super_block *, u32,
struct hfsplus_fork_raw *, int);
@@ -436,8 +448,8 @@ int hfsplus_compare_dentry(const struct dentry *parent,
/* wrapper.c */
int hfsplus_read_wrapper(struct super_block *);
int hfs_part_find(struct super_block *, sector_t *, sector_t *);
-int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
- void *data, int rw);
+int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
+ void *buf, void **data, int rw);
/* time macros */
#define __hfsp_mt2ut(t) (be32_to_cpu(t) - 2082844800U)
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index b248a6cfcad..010cd363d08 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -195,11 +195,13 @@ static struct dentry *hfsplus_file_lookup(struct inode *dir,
hip->flags = 0;
set_bit(HFSPLUS_I_RSRC, &hip->flags);
- hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
- err = hfsplus_find_cat(sb, dir->i_ino, &fd);
- if (!err)
- err = hfsplus_cat_read_inode(inode, &fd);
- hfs_find_exit(&fd);
+ err = hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
+ if (!err) {
+ err = hfsplus_find_cat(sb, dir->i_ino, &fd);
+ if (!err)
+ err = hfsplus_cat_read_inode(inode, &fd);
+ hfs_find_exit(&fd);
+ }
if (err) {
iput(inode);
return ERR_PTR(err);
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
index 40ad88c12c6..eb355d81e27 100644
--- a/fs/hfsplus/part_tbl.c
+++ b/fs/hfsplus/part_tbl.c
@@ -88,11 +88,12 @@ static int hfs_parse_old_pmap(struct super_block *sb, struct old_pmap *pm,
return -ENOENT;
}
-static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
- sector_t *part_start, sector_t *part_size)
+static int hfs_parse_new_pmap(struct super_block *sb, void *buf,
+ struct new_pmap *pm, sector_t *part_start, sector_t *part_size)
{
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
int size = be32_to_cpu(pm->pmMapBlkCnt);
+ int buf_size = hfsplus_min_io_size(sb);
int res;
int i = 0;
@@ -107,11 +108,14 @@ static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
if (++i >= size)
return -ENOENT;
- res = hfsplus_submit_bio(sb->s_bdev,
- *part_start + HFS_PMAP_BLK + i,
- pm, READ);
- if (res)
- return res;
+ pm = (struct new_pmap *)((u8 *)pm + HFSPLUS_SECTOR_SIZE);
+ if ((u8 *)pm - (u8 *)buf >= buf_size) {
+ res = hfsplus_submit_bio(sb,
+ *part_start + HFS_PMAP_BLK + i,
+ buf, (void **)&pm, READ);
+ if (res)
+ return res;
+ }
} while (pm->pmSig == cpu_to_be16(HFS_NEW_PMAP_MAGIC));
return -ENOENT;
@@ -124,15 +128,15 @@ static int hfs_parse_new_pmap(struct super_block *sb, struct new_pmap *pm,
int hfs_part_find(struct super_block *sb,
sector_t *part_start, sector_t *part_size)
{
- void *data;
+ void *buf, *data;
int res;
- data = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
- if (!data)
+ buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
+ if (!buf)
return -ENOMEM;
- res = hfsplus_submit_bio(sb->s_bdev, *part_start + HFS_PMAP_BLK,
- data, READ);
+ res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK,
+ buf, &data, READ);
if (res)
goto out;
@@ -141,13 +145,13 @@ int hfs_part_find(struct super_block *sb,
res = hfs_parse_old_pmap(sb, data, part_start, part_size);
break;
case HFS_NEW_PMAP_MAGIC:
- res = hfs_parse_new_pmap(sb, data, part_start, part_size);
+ res = hfs_parse_new_pmap(sb, buf, data, part_start, part_size);
break;
default:
res = -ENOENT;
break;
}
out:
- kfree(data);
+ kfree(buf);
return res;
}
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 84a47b709f5..c106ca22e81 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -73,11 +73,13 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino)
if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
inode->i_ino == HFSPLUS_ROOT_CNID) {
- hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
- err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
- if (!err)
- err = hfsplus_cat_read_inode(inode, &fd);
- hfs_find_exit(&fd);
+ err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
+ if (!err) {
+ err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
+ if (!err)
+ err = hfsplus_cat_read_inode(inode, &fd);
+ hfs_find_exit(&fd);
+ }
} else {
err = hfsplus_system_read_inode(inode);
}
@@ -133,9 +135,13 @@ static int hfsplus_system_write_inode(struct inode *inode)
static int hfsplus_write_inode(struct inode *inode,
struct writeback_control *wbc)
{
+ int err;
+
dprint(DBG_INODE, "hfsplus_write_inode: %lu\n", inode->i_ino);
- hfsplus_ext_write_extent(inode);
+ err = hfsplus_ext_write_extent(inode);
+ if (err)
+ return err;
if (inode->i_ino >= HFSPLUS_FIRSTUSER_CNID ||
inode->i_ino == HFSPLUS_ROOT_CNID)
@@ -197,17 +203,17 @@ int hfsplus_sync_fs(struct super_block *sb, int wait)
write_backup = 1;
}
- error2 = hfsplus_submit_bio(sb->s_bdev,
+ error2 = hfsplus_submit_bio(sb,
sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
- sbi->s_vhdr, WRITE_SYNC);
+ sbi->s_vhdr_buf, NULL, WRITE_SYNC);
if (!error)
error = error2;
if (!write_backup)
goto out;
- error2 = hfsplus_submit_bio(sb->s_bdev,
+ error2 = hfsplus_submit_bio(sb,
sbi->part_start + sbi->sect_count - 2,
- sbi->s_backup_vhdr, WRITE_SYNC);
+ sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC);
if (!error)
error2 = error;
out:
@@ -251,8 +257,8 @@ static void hfsplus_put_super(struct super_block *sb)
hfs_btree_close(sbi->ext_tree);
iput(sbi->alloc_file);
iput(sbi->hidden_dir);
- kfree(sbi->s_vhdr);
- kfree(sbi->s_backup_vhdr);
+ kfree(sbi->s_vhdr_buf);
+ kfree(sbi->s_backup_vhdr_buf);
unload_nls(sbi->nls);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
@@ -393,6 +399,13 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
if (!sbi->rsrc_clump_blocks)
sbi->rsrc_clump_blocks = 1;
+ err = generic_check_addressable(sbi->alloc_blksz_shift,
+ sbi->total_blocks);
+ if (err) {
+ printk(KERN_ERR "hfs: filesystem size too large.\n");
+ goto out_free_vhdr;
+ }
+
/* Set up operations so we can load metadata */
sb->s_op = &hfsplus_sops;
sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -417,6 +430,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_RDONLY;
}
+ err = -EINVAL;
+
/* Load metadata objects (B*Trees) */
sbi->ext_tree = hfs_btree_open(sb, HFSPLUS_EXT_CNID);
if (!sbi->ext_tree) {
@@ -447,7 +462,9 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
str.len = sizeof(HFSP_HIDDENDIR_NAME) - 1;
str.name = HFSP_HIDDENDIR_NAME;
- hfs_find_init(sbi->cat_tree, &fd);
+ err = hfs_find_init(sbi->cat_tree, &fd);
+ if (err)
+ goto out_put_root;
hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
hfs_find_exit(&fd);
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index a3f0bfcc881..a32998f29f0 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -142,7 +142,11 @@ int hfsplus_uni2asc(struct super_block *sb,
/* search for single decomposed char */
if (likely(compose))
ce1 = hfsplus_compose_lookup(hfsplus_compose_table, c0);
- if (ce1 && (cc = ce1[0])) {
+ if (ce1)
+ cc = ce1[0];
+ else
+ cc = 0;
+ if (cc) {
/* start of a possibly decomposed Hangul char */
if (cc != 0xffff)
goto done;
@@ -209,7 +213,8 @@ int hfsplus_uni2asc(struct super_block *sb,
i++;
ce2 = ce1;
}
- if ((cc = ce2[0])) {
+ cc = ce2[0];
+ if (cc) {
ip += i;
ustrlen -= i;
goto done;
@@ -301,7 +306,11 @@ int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
while (outlen < HFSPLUS_MAX_STRLEN && len > 0) {
size = asc2unichar(sb, astr, len, &c);
- if (decompose && (dstr = decompose_unichar(c, &dsize))) {
+ if (decompose)
+ dstr = decompose_unichar(c, &dsize);
+ else
+ dstr = NULL;
+ if (dstr) {
if (outlen + dsize > HFSPLUS_MAX_STRLEN)
break;
do {
@@ -346,15 +355,23 @@ int hfsplus_hash_dentry(const struct dentry *dentry, const struct inode *inode,
astr += size;
len -= size;
- if (decompose && (dstr = decompose_unichar(c, &dsize))) {
+ if (decompose)
+ dstr = decompose_unichar(c, &dsize);
+ else
+ dstr = NULL;
+ if (dstr) {
do {
c2 = *dstr++;
- if (!casefold || (c2 = case_fold(c2)))
+ if (casefold)
+ c2 = case_fold(c2);
+ if (!casefold || c2)
hash = partial_name_hash(c2, hash);
} while (--dsize > 0);
} else {
c2 = c;
- if (!casefold || (c2 = case_fold(c2)))
+ if (casefold)
+ c2 = case_fold(c2);
+ if (!casefold || c2)
hash = partial_name_hash(c2, hash);
}
}
@@ -422,12 +439,14 @@ int hfsplus_compare_dentry(const struct dentry *parent,
c1 = *dstr1;
c2 = *dstr2;
if (casefold) {
- if (!(c1 = case_fold(c1))) {
+ c1 = case_fold(c1);
+ if (!c1) {
dstr1++;
dsize1--;
continue;
}
- if (!(c2 = case_fold(c2))) {
+ c2 = case_fold(c2);
+ if (!c2) {
dstr2++;
dsize2--;
continue;
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 4ac88ff79aa..10e515a0d45 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -31,25 +31,67 @@ static void hfsplus_end_io_sync(struct bio *bio, int err)
complete(bio->bi_private);
}
-int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
- void *data, int rw)
+/*
+ * hfsplus_submit_bio - Perfrom block I/O
+ * @sb: super block of volume for I/O
+ * @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
+ * @buf: buffer for I/O
+ * @data: output pointer for location of requested data
+ * @rw: direction of I/O
+ *
+ * The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
+ * HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
+ * @data will return a pointer to the start of the requested sector,
+ * which may not be the same location as @buf.
+ *
+ * If @sector is not aligned to the bdev logical block size it will
+ * be rounded down. For writes this means that @buf should contain data
+ * that starts at the rounded-down address. As long as the data was
+ * read using hfsplus_submit_bio() and the same buffer is used things
+ * will work correctly.
+ */
+int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
+ void *buf, void **data, int rw)
{
DECLARE_COMPLETION_ONSTACK(wait);
struct bio *bio;
int ret = 0;
+ unsigned int io_size;
+ loff_t start;
+ int offset;
+
+ /*
+ * Align sector to hardware sector size and find offset. We
+ * assume that io_size is a power of two, which _should_
+ * be true.
+ */
+ io_size = hfsplus_min_io_size(sb);
+ start = (loff_t)sector << HFSPLUS_SECTOR_SHIFT;
+ offset = start & (io_size - 1);
+ sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = sector;
- bio->bi_bdev = bdev;
+ bio->bi_bdev = sb->s_bdev;
bio->bi_end_io = hfsplus_end_io_sync;
bio->bi_private = &wait;
- /*
- * We always submit one sector at a time, so bio_add_page must not fail.
- */
- if (bio_add_page(bio, virt_to_page(data), HFSPLUS_SECTOR_SIZE,
- offset_in_page(data)) != HFSPLUS_SECTOR_SIZE)
- BUG();
+ if (!(rw & WRITE) && data)
+ *data = (u8 *)buf + offset;
+
+ while (io_size > 0) {
+ unsigned int page_offset = offset_in_page(buf);
+ unsigned int len = min_t(unsigned int, PAGE_SIZE - page_offset,
+ io_size);
+
+ ret = bio_add_page(bio, virt_to_page(buf), len, page_offset);
+ if (ret != len) {
+ ret = -EIO;
+ goto out;
+ }
+ io_size -= len;
+ buf = (u8 *)buf + len;
+ }
submit_bio(rw, bio);
wait_for_completion(&wait);
@@ -57,8 +99,9 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
+out:
bio_put(bio);
- return ret;
+ return ret < 0 ? ret : 0;
}
static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
@@ -141,23 +184,19 @@ int hfsplus_read_wrapper(struct super_block *sb)
if (hfsplus_get_last_session(sb, &part_start, &part_size))
goto out;
- if ((u64)part_start + part_size > 0x100000000ULL) {
- pr_err("hfs: volumes larger than 2TB are not supported yet\n");
- goto out;
- }
error = -ENOMEM;
- sbi->s_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
- if (!sbi->s_vhdr)
+ sbi->s_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
+ if (!sbi->s_vhdr_buf)
goto out;
- sbi->s_backup_vhdr = kmalloc(HFSPLUS_SECTOR_SIZE, GFP_KERNEL);
- if (!sbi->s_backup_vhdr)
+ sbi->s_backup_vhdr_buf = kmalloc(hfsplus_min_io_size(sb), GFP_KERNEL);
+ if (!sbi->s_backup_vhdr_buf)
goto out_free_vhdr;
reread:
- error = hfsplus_submit_bio(sb->s_bdev,
- part_start + HFSPLUS_VOLHEAD_SECTOR,
- sbi->s_vhdr, READ);
+ error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
+ sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
+ READ);
if (error)
goto out_free_backup_vhdr;
@@ -172,8 +211,9 @@ reread:
if (!hfsplus_read_mdb(sbi->s_vhdr, &wd))
goto out_free_backup_vhdr;
wd.ablk_size >>= HFSPLUS_SECTOR_SHIFT;
- part_start += wd.ablk_start + wd.embed_start * wd.ablk_size;
- part_size = wd.embed_count * wd.ablk_size;
+ part_start += (sector_t)wd.ablk_start +
+ (sector_t)wd.embed_start * wd.ablk_size;
+ part_size = (sector_t)wd.embed_count * wd.ablk_size;
goto reread;
default:
/*
@@ -186,9 +226,9 @@ reread:
goto reread;
}
- error = hfsplus_submit_bio(sb->s_bdev,
- part_start + part_size - 2,
- sbi->s_backup_vhdr, READ);
+ error = hfsplus_submit_bio(sb, part_start + part_size - 2,
+ sbi->s_backup_vhdr_buf,
+ (void **)&sbi->s_backup_vhdr, READ);
if (error)
goto out_free_backup_vhdr;
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 9b45ee84fbc..3a1dafd228d 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -172,7 +172,7 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
tpid = 0;
if (pid_alive(p)) {
- struct task_struct *tracer = tracehook_tracer_task(p);
+ struct task_struct *tracer = ptrace_parent(p);
if (tracer)
tpid = task_pid_nr_ns(tracer, ns);
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index fc5bc276769..c47719aaade 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -216,7 +216,7 @@ static struct mm_struct *__check_mem_permission(struct task_struct *task)
if (task_is_stopped_or_traced(task)) {
int match;
rcu_read_lock();
- match = (tracehook_tracer_task(task) == current);
+ match = (ptrace_parent(task) == current);
rcu_read_unlock();
if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
return mm;
diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c
index 87cd0ead863..fb3b5c813a3 100644
--- a/fs/ubifs/commit.c
+++ b/fs/ubifs/commit.c
@@ -78,7 +78,7 @@ static int nothing_to_commit(struct ubifs_info *c)
* If the root TNC node is dirty, we definitely have something to
* commit.
*/
- if (c->zroot.znode && test_bit(DIRTY_ZNODE, &c->zroot.znode->flags))
+ if (c->zroot.znode && ubifs_zn_dirty(c->zroot.znode))
return 0;
/*
@@ -418,7 +418,7 @@ int ubifs_run_commit(struct ubifs_info *c)
spin_lock(&c->cs_lock);
if (c->cmt_state == COMMIT_BROKEN) {
- err = -EINVAL;
+ err = -EROFS;
goto out;
}
@@ -444,7 +444,7 @@ int ubifs_run_commit(struct ubifs_info *c)
* re-check it.
*/
if (c->cmt_state == COMMIT_BROKEN) {
- err = -EINVAL;
+ err = -EROFS;
goto out_cmt_unlock;
}
@@ -576,7 +576,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot)
struct idx_node *i;
size_t sz;
- if (!(ubifs_chk_flags & UBIFS_CHK_OLD_IDX))
+ if (!dbg_is_chk_index(c))
return 0;
INIT_LIST_HEAD(&list);
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 0bb2bcef0de..eef109a1a92 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -27,13 +27,12 @@
* various local functions of those subsystems.
*/
-#define UBIFS_DBG_PRESERVE_UBI
-
-#include "ubifs.h"
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/debugfs.h>
#include <linux/math64.h>
+#include <linux/uaccess.h>
+#include <linux/random.h>
+#include "ubifs.h"
#ifdef CONFIG_UBIFS_FS_DEBUG
@@ -42,15 +41,6 @@ DEFINE_SPINLOCK(dbg_lock);
static char dbg_key_buf0[128];
static char dbg_key_buf1[128];
-unsigned int ubifs_chk_flags;
-unsigned int ubifs_tst_flags;
-
-module_param_named(debug_chks, ubifs_chk_flags, uint, S_IRUGO | S_IWUSR);
-module_param_named(debug_tsts, ubifs_tst_flags, uint, S_IRUGO | S_IWUSR);
-
-MODULE_PARM_DESC(debug_chks, "Debug check flags");
-MODULE_PARM_DESC(debug_tsts, "Debug special test flags");
-
static const char *get_key_fmt(int fmt)
{
switch (fmt) {
@@ -91,6 +81,28 @@ static const char *get_key_type(int type)
}
}
+static const char *get_dent_type(int type)
+{
+ switch (type) {
+ case UBIFS_ITYPE_REG:
+ return "file";
+ case UBIFS_ITYPE_DIR:
+ return "dir";
+ case UBIFS_ITYPE_LNK:
+ return "symlink";
+ case UBIFS_ITYPE_BLK:
+ return "blkdev";
+ case UBIFS_ITYPE_CHR:
+ return "char dev";
+ case UBIFS_ITYPE_FIFO:
+ return "fifo";
+ case UBIFS_ITYPE_SOCK:
+ return "socket";
+ default:
+ return "unknown/invalid type";
+ }
+}
+
static void sprintf_key(const struct ubifs_info *c, const union ubifs_key *key,
char *buffer)
{
@@ -234,9 +246,13 @@ static void dump_ch(const struct ubifs_ch *ch)
printk(KERN_DEBUG "\tlen %u\n", le32_to_cpu(ch->len));
}
-void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode)
+void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode)
{
const struct ubifs_inode *ui = ubifs_inode(inode);
+ struct qstr nm = { .name = NULL };
+ union ubifs_key key;
+ struct ubifs_dent_node *dent, *pdent = NULL;
+ int count = 2;
printk(KERN_DEBUG "Dump in-memory inode:");
printk(KERN_DEBUG "\tinode %lu\n", inode->i_ino);
@@ -270,6 +286,32 @@ void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode)
printk(KERN_DEBUG "\tlast_page_read %lu\n", ui->last_page_read);
printk(KERN_DEBUG "\tread_in_a_row %lu\n", ui->read_in_a_row);
printk(KERN_DEBUG "\tdata_len %d\n", ui->data_len);
+
+ if (!S_ISDIR(inode->i_mode))
+ return;
+
+ printk(KERN_DEBUG "List of directory entries:\n");
+ ubifs_assert(!mutex_is_locked(&c->tnc_mutex));
+
+ lowest_dent_key(c, &key, inode->i_ino);
+ while (1) {
+ dent = ubifs_tnc_next_ent(c, &key, &nm);
+ if (IS_ERR(dent)) {
+ if (PTR_ERR(dent) != -ENOENT)
+ printk(KERN_DEBUG "error %ld\n", PTR_ERR(dent));
+ break;
+ }
+
+ printk(KERN_DEBUG "\t%d: %s (%s)\n",
+ count++, dent->name, get_dent_type(dent->type));
+
+ nm.name = dent->name;
+ nm.len = le16_to_cpu(dent->nlen);
+ kfree(pdent);
+ pdent = dent;
+ key_read(c, &dent->key, &key);
+ }
+ kfree(pdent);
}
void dbg_dump_node(const struct ubifs_info *c, const void *node)
@@ -278,7 +320,7 @@ void dbg_dump_node(const struct ubifs_info *c, const void *node)
union ubifs_key key;
const struct ubifs_ch *ch = node;
- if (dbg_failure_mode)
+ if (dbg_is_tst_rcvry(c))
return;
/* If the magic is incorrect, just hexdump the first bytes */
@@ -834,7 +876,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum)
struct ubifs_scan_node *snod;
void *buf;
- if (dbg_failure_mode)
+ if (dbg_is_tst_rcvry(c))
return;
printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
@@ -1080,6 +1122,7 @@ out:
/**
* dbg_check_synced_i_size - check synchronized inode size.
+ * @c: UBIFS file-system description object
* @inode: inode to check
*
* If inode is clean, synchronized inode size has to be equivalent to current
@@ -1087,12 +1130,12 @@ out:
* has to be locked). Returns %0 if synchronized inode size if correct, and
* %-EINVAL if not.
*/
-int dbg_check_synced_i_size(struct inode *inode)
+int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode)
{
int err = 0;
struct ubifs_inode *ui = ubifs_inode(inode);
- if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
+ if (!dbg_is_chk_gen(c))
return 0;
if (!S_ISREG(inode->i_mode))
return 0;
@@ -1125,7 +1168,7 @@ int dbg_check_synced_i_size(struct inode *inode)
* Note, it is good idea to make sure the @dir->i_mutex is locked before
* calling this function.
*/
-int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir)
+int dbg_check_dir(struct ubifs_info *c, const struct inode *dir)
{
unsigned int nlink = 2;
union ubifs_key key;
@@ -1133,7 +1176,7 @@ int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir)
struct qstr nm = { .name = NULL };
loff_t size = UBIFS_INO_NODE_SZ;
- if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
+ if (!dbg_is_chk_gen(c))
return 0;
if (!S_ISDIR(dir->i_mode))
@@ -1167,12 +1210,14 @@ int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir)
"but calculated size is %llu", dir->i_ino,
(unsigned long long)i_size_read(dir),
(unsigned long long)size);
+ dbg_dump_inode(c, dir);
dump_stack();
return -EINVAL;
}
if (dir->i_nlink != nlink) {
ubifs_err("directory inode %lu has nlink %u, but calculated "
"nlink is %u", dir->i_ino, dir->i_nlink, nlink);
+ dbg_dump_inode(c, dir);
dump_stack();
return -EINVAL;
}
@@ -1489,7 +1534,7 @@ int dbg_check_tnc(struct ubifs_info *c, int extra)
long clean_cnt = 0, dirty_cnt = 0;
int err, last;
- if (!(ubifs_chk_flags & UBIFS_CHK_TNC))
+ if (!dbg_is_chk_index(c))
return 0;
ubifs_assert(mutex_is_locked(&c->tnc_mutex));
@@ -1736,7 +1781,7 @@ int dbg_check_idx_size(struct ubifs_info *c, long long idx_size)
int err;
long long calc = 0;
- if (!(ubifs_chk_flags & UBIFS_CHK_IDX_SZ))
+ if (!dbg_is_chk_index(c))
return 0;
err = dbg_walk_index(c, NULL, add_size, &calc);
@@ -2312,7 +2357,7 @@ int dbg_check_filesystem(struct ubifs_info *c)
int err;
struct fsck_data fsckd;
- if (!(ubifs_chk_flags & UBIFS_CHK_FS))
+ if (!dbg_is_chk_fs(c))
return 0;
fsckd.inodes = RB_ROOT;
@@ -2347,7 +2392,7 @@ int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head)
struct list_head *cur;
struct ubifs_scan_node *sa, *sb;
- if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
+ if (!dbg_is_chk_gen(c))
return 0;
for (cur = head->next; cur->next != head; cur = cur->next) {
@@ -2414,7 +2459,7 @@ int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head)
struct list_head *cur;
struct ubifs_scan_node *sa, *sb;
- if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
+ if (!dbg_is_chk_gen(c))
return 0;
for (cur = head->next; cur->next != head; cur = cur->next) {
@@ -2491,214 +2536,141 @@ error_dump:
return 0;
}
-int dbg_force_in_the_gaps(void)
+static inline int chance(unsigned int n, unsigned int out_of)
{
- if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
- return 0;
+ return !!((random32() % out_of) + 1 <= n);
- return !(random32() & 7);
}
-/* Failure mode for recovery testing */
-
-#define chance(n, d) (simple_rand() <= (n) * 32768LL / (d))
-
-struct failure_mode_info {
- struct list_head list;
- struct ubifs_info *c;
-};
-
-static LIST_HEAD(fmi_list);
-static DEFINE_SPINLOCK(fmi_lock);
-
-static unsigned int next;
-
-static int simple_rand(void)
-{
- if (next == 0)
- next = current->pid;
- next = next * 1103515245 + 12345;
- return (next >> 16) & 32767;
-}
-
-static void failure_mode_init(struct ubifs_info *c)
-{
- struct failure_mode_info *fmi;
-
- fmi = kmalloc(sizeof(struct failure_mode_info), GFP_NOFS);
- if (!fmi) {
- ubifs_err("Failed to register failure mode - no memory");
- return;
- }
- fmi->c = c;
- spin_lock(&fmi_lock);
- list_add_tail(&fmi->list, &fmi_list);
- spin_unlock(&fmi_lock);
-}
-
-static void failure_mode_exit(struct ubifs_info *c)
+static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
{
- struct failure_mode_info *fmi, *tmp;
-
- spin_lock(&fmi_lock);
- list_for_each_entry_safe(fmi, tmp, &fmi_list, list)
- if (fmi->c == c) {
- list_del(&fmi->list);
- kfree(fmi);
- }
- spin_unlock(&fmi_lock);
-}
-
-static struct ubifs_info *dbg_find_info(struct ubi_volume_desc *desc)
-{
- struct failure_mode_info *fmi;
-
- spin_lock(&fmi_lock);
- list_for_each_entry(fmi, &fmi_list, list)
- if (fmi->c->ubi == desc) {
- struct ubifs_info *c = fmi->c;
-
- spin_unlock(&fmi_lock);
- return c;
- }
- spin_unlock(&fmi_lock);
- return NULL;
-}
-
-static int in_failure_mode(struct ubi_volume_desc *desc)
-{
- struct ubifs_info *c = dbg_find_info(desc);
-
- if (c && dbg_failure_mode)
- return c->dbg->failure_mode;
- return 0;
-}
+ struct ubifs_debug_info *d = c->dbg;
-static int do_fail(struct ubi_volume_desc *desc, int lnum, int write)
-{
- struct ubifs_info *c = dbg_find_info(desc);
- struct ubifs_debug_info *d;
+ ubifs_assert(dbg_is_tst_rcvry(c));
- if (!c || !dbg_failure_mode)
- return 0;
- d = c->dbg;
- if (d->failure_mode)
- return 1;
- if (!d->fail_cnt) {
- /* First call - decide delay to failure */
+ if (!d->pc_cnt) {
+ /* First call - decide delay to the power cut */
if (chance(1, 2)) {
- unsigned int delay = 1 << (simple_rand() >> 11);
+ unsigned long delay;
if (chance(1, 2)) {
- d->fail_delay = 1;
- d->fail_timeout = jiffies +
- msecs_to_jiffies(delay);
- dbg_rcvry("failing after %ums", delay);
+ d->pc_delay = 1;
+ /* Fail withing 1 minute */
+ delay = random32() % 60000;
+ d->pc_timeout = jiffies;
+ d->pc_timeout += msecs_to_jiffies(delay);
+ ubifs_warn("failing after %lums", delay);
} else {
- d->fail_delay = 2;
- d->fail_cnt_max = delay;
- dbg_rcvry("failing after %u calls", delay);
+ d->pc_delay = 2;
+ delay = random32() % 10000;
+ /* Fail within 10000 operations */
+ d->pc_cnt_max = delay;
+ ubifs_warn("failing after %lu calls", delay);
}
}
- d->fail_cnt += 1;
+
+ d->pc_cnt += 1;
}
+
/* Determine if failure delay has expired */
- if (d->fail_delay == 1) {
- if (time_before(jiffies, d->fail_timeout))
+ if (d->pc_delay == 1 && time_before(jiffies, d->pc_timeout))
return 0;
- } else if (d->fail_delay == 2)
- if (d->fail_cnt++ < d->fail_cnt_max)
+ if (d->pc_delay == 2 && d->pc_cnt++ < d->pc_cnt_max)
return 0;
+
if (lnum == UBIFS_SB_LNUM) {
- if (write) {
- if (chance(1, 2))
- return 0;
- } else if (chance(19, 20))
+ if (write && chance(1, 2))
+ return 0;
+ if (chance(19, 20))
return 0;
- dbg_rcvry("failing in super block LEB %d", lnum);
+ ubifs_warn("failing in super block LEB %d", lnum);
} else if (lnum == UBIFS_MST_LNUM || lnum == UBIFS_MST_LNUM + 1) {
if (chance(19, 20))
return 0;
- dbg_rcvry("failing in master LEB %d", lnum);
+ ubifs_warn("failing in master LEB %d", lnum);
} else if (lnum >= UBIFS_LOG_LNUM && lnum <= c->log_last) {
- if (write) {
- if (chance(99, 100))
- return 0;
- } else if (chance(399, 400))
+ if (write && chance(99, 100))
return 0;
- dbg_rcvry("failing in log LEB %d", lnum);
+ if (chance(399, 400))
+ return 0;
+ ubifs_warn("failing in log LEB %d", lnum);
} else if (lnum >= c->lpt_first && lnum <= c->lpt_last) {
- if (write) {
- if (chance(7, 8))
- return 0;
- } else if (chance(19, 20))
+ if (write && chance(7, 8))
return 0;
- dbg_rcvry("failing in LPT LEB %d", lnum);
+ if (chance(19, 20))
+ return 0;
+ ubifs_warn("failing in LPT LEB %d", lnum);
} else if (lnum >= c->orph_first && lnum <= c->orph_last) {
- if (write) {
- if (chance(1, 2))
- return 0;
- } else if (chance(9, 10))
+ if (write && chance(1, 2))
+ return 0;
+ if (chance(9, 10))
return 0;
- dbg_rcvry("failing in orphan LEB %d", lnum);
+ ubifs_warn("failing in orphan LEB %d", lnum);
} else if (lnum == c->ihead_lnum) {
if (chance(99, 100))
return 0;
- dbg_rcvry("failing in index head LEB %d", lnum);
+ ubifs_warn("failing in index head LEB %d", lnum);
} else if (c->jheads && lnum == c->jheads[GCHD].wbuf.lnum) {
if (chance(9, 10))
return 0;
- dbg_rcvry("failing in GC head LEB %d", lnum);
+ ubifs_warn("failing in GC head LEB %d", lnum);
} else if (write && !RB_EMPTY_ROOT(&c->buds) &&
!ubifs_search_bud(c, lnum)) {
if (chance(19, 20))
return 0;
- dbg_rcvry("failing in non-bud LEB %d", lnum);
+ ubifs_warn("failing in non-bud LEB %d", lnum);
} else if (c->cmt_state == COMMIT_RUNNING_BACKGROUND ||
c->cmt_state == COMMIT_RUNNING_REQUIRED) {
if (chance(999, 1000))
return 0;
- dbg_rcvry("failing in bud LEB %d commit running", lnum);
+ ubifs_warn("failing in bud LEB %d commit running", lnum);
} else {
if (chance(9999, 10000))
return 0;
- dbg_rcvry("failing in bud LEB %d commit not running", lnum);
+ ubifs_warn("failing in bud LEB %d commit not running", lnum);
}
- ubifs_err("*** SETTING FAILURE MODE ON (LEB %d) ***", lnum);
- d->failure_mode = 1;
+
+ d->pc_happened = 1;
+ ubifs_warn("========== Power cut emulated ==========");
dump_stack();
return 1;
}
-static void cut_data(const void *buf, int len)
+static void cut_data(const void *buf, unsigned int len)
{
- int flen, i;
+ unsigned int from, to, i, ffs = chance(1, 2);
unsigned char *p = (void *)buf;
- flen = (len * (long long)simple_rand()) >> 15;
- for (i = flen; i < len; i++)
- p[i] = 0xff;
-}
+ from = random32() % (len + 1);
+ if (chance(1, 2))
+ to = random32() % (len - from + 1);
+ else
+ to = len;
-int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
- int len, int check)
-{
- if (in_failure_mode(desc))
- return -EROFS;
- return ubi_leb_read(desc, lnum, buf, offset, len, check);
+ if (from < to)
+ ubifs_warn("filled bytes %u-%u with %s", from, to - 1,
+ ffs ? "0xFFs" : "random data");
+
+ if (ffs)
+ for (i = from; i < to; i++)
+ p[i] = 0xFF;
+ else
+ for (i = from; i < to; i++)
+ p[i] = random32() % 0x100;
}
-int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int offset, int len, int dtype)
+int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf,
+ int offs, int len, int dtype)
{
int err, failing;
- if (in_failure_mode(desc))
+ if (c->dbg->pc_happened)
return -EROFS;
- failing = do_fail(desc, lnum, 1);
+
+ failing = power_cut_emulated(c, lnum, 1);
if (failing)
cut_data(buf, len);
- err = ubi_leb_write(desc, lnum, buf, offset, len, dtype);
+ err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype);
if (err)
return err;
if (failing)
@@ -2706,162 +2678,207 @@ int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
return 0;
}
-int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
+int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf,
int len, int dtype)
{
int err;
- if (do_fail(desc, lnum, 1))
+ if (c->dbg->pc_happened)
return -EROFS;
- err = ubi_leb_change(desc, lnum, buf, len, dtype);
+ if (power_cut_emulated(c, lnum, 1))
+ return -EROFS;
+ err = ubi_leb_change(c->ubi, lnum, buf, len, dtype);
if (err)
return err;
- if (do_fail(desc, lnum, 1))
+ if (power_cut_emulated(c, lnum, 1))
return -EROFS;
return 0;
}
-int dbg_leb_erase(struct ubi_volume_desc *desc, int lnum)
+int dbg_leb_unmap(struct ubifs_info *c, int lnum)
{
int err;
- if (do_fail(desc, lnum, 0))
+ if (c->dbg->pc_happened)
+ return -EROFS;
+ if (power_cut_emulated(c, lnum, 0))
return -EROFS;
- err = ubi_leb_erase(desc, lnum);
+ err = ubi_leb_unmap(c->ubi, lnum);
if (err)
return err;
- if (do_fail(desc, lnum, 0))
+ if (power_cut_emulated(c, lnum, 0))
return -EROFS;
return 0;
}
-int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum)
+int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype)
{
int err;
- if (do_fail(desc, lnum, 0))
+ if (c->dbg->pc_happened)
return -EROFS;
- err = ubi_leb_unmap(desc, lnum);
+ if (power_cut_emulated(c, lnum, 0))
+ return -EROFS;
+ err = ubi_leb_map(c->ubi, lnum, dtype);
if (err)
return err;
- if (do_fail(desc, lnum, 0))
+ if (power_cut_emulated(c, lnum, 0))
return -EROFS;
return 0;
}
-int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum)
-{
- if (in_failure_mode(desc))
- return -EROFS;
- return ubi_is_mapped(desc, lnum);
-}
+/*
+ * Root directory for UBIFS stuff in debugfs. Contains sub-directories which
+ * contain the stuff specific to particular file-system mounts.
+ */
+static struct dentry *dfs_rootdir;
-int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+static int dfs_file_open(struct inode *inode, struct file *file)
{
- int err;
-
- if (do_fail(desc, lnum, 0))
- return -EROFS;
- err = ubi_leb_map(desc, lnum, dtype);
- if (err)
- return err;
- if (do_fail(desc, lnum, 0))
- return -EROFS;
- return 0;
+ file->private_data = inode->i_private;
+ return nonseekable_open(inode, file);
}
/**
- * ubifs_debugging_init - initialize UBIFS debugging.
- * @c: UBIFS file-system description object
+ * provide_user_output - provide output to the user reading a debugfs file.
+ * @val: boolean value for the answer
+ * @u: the buffer to store the answer at
+ * @count: size of the buffer
+ * @ppos: position in the @u output buffer
*
- * This function initializes debugging-related data for the file system.
- * Returns zero in case of success and a negative error code in case of
+ * This is a simple helper function which stores @val boolean value in the user
+ * buffer when the user reads one of UBIFS debugfs files. Returns amount of
+ * bytes written to @u in case of success and a negative error code in case of
* failure.
*/
-int ubifs_debugging_init(struct ubifs_info *c)
+static int provide_user_output(int val, char __user *u, size_t count,
+ loff_t *ppos)
{
- c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL);
- if (!c->dbg)
- return -ENOMEM;
+ char buf[3];
- failure_mode_init(c);
- return 0;
+ if (val)
+ buf[0] = '1';
+ else
+ buf[0] = '0';
+ buf[1] = '\n';
+ buf[2] = 0x00;
+
+ return simple_read_from_buffer(u, count, ppos, buf, 2);
}
-/**
- * ubifs_debugging_exit - free debugging data.
- * @c: UBIFS file-system description object
- */
-void ubifs_debugging_exit(struct ubifs_info *c)
+static ssize_t dfs_file_read(struct file *file, char __user *u, size_t count,
+ loff_t *ppos)
{
- failure_mode_exit(c);
- kfree(c->dbg);
-}
+ struct dentry *dent = file->f_path.dentry;
+ struct ubifs_info *c = file->private_data;
+ struct ubifs_debug_info *d = c->dbg;
+ int val;
+
+ if (dent == d->dfs_chk_gen)
+ val = d->chk_gen;
+ else if (dent == d->dfs_chk_index)
+ val = d->chk_index;
+ else if (dent == d->dfs_chk_orph)
+ val = d->chk_orph;
+ else if (dent == d->dfs_chk_lprops)
+ val = d->chk_lprops;
+ else if (dent == d->dfs_chk_fs)
+ val = d->chk_fs;
+ else if (dent == d->dfs_tst_rcvry)
+ val = d->tst_rcvry;
+ else
+ return -EINVAL;
-/*
- * Root directory for UBIFS stuff in debugfs. Contains sub-directories which
- * contain the stuff specific to particular file-system mounts.
- */
-static struct dentry *dfs_rootdir;
+ return provide_user_output(val, u, count, ppos);
+}
/**
- * dbg_debugfs_init - initialize debugfs file-system.
+ * interpret_user_input - interpret user debugfs file input.
+ * @u: user-provided buffer with the input
+ * @count: buffer size
*
- * UBIFS uses debugfs file-system to expose various debugging knobs to
- * user-space. This function creates "ubifs" directory in the debugfs
- * file-system. Returns zero in case of success and a negative error code in
- * case of failure.
+ * This is a helper function which interpret user input to a boolean UBIFS
+ * debugfs file. Returns %0 or %1 in case of success and a negative error code
+ * in case of failure.
*/
-int dbg_debugfs_init(void)
+static int interpret_user_input(const char __user *u, size_t count)
{
- dfs_rootdir = debugfs_create_dir("ubifs", NULL);
- if (IS_ERR(dfs_rootdir)) {
- int err = PTR_ERR(dfs_rootdir);
- ubifs_err("cannot create \"ubifs\" debugfs directory, "
- "error %d\n", err);
- return err;
- }
+ size_t buf_size;
+ char buf[8];
- return 0;
-}
+ buf_size = min_t(size_t, count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, u, buf_size))
+ return -EFAULT;
-/**
- * dbg_debugfs_exit - remove the "ubifs" directory from debugfs file-system.
- */
-void dbg_debugfs_exit(void)
-{
- debugfs_remove(dfs_rootdir);
-}
+ if (buf[0] == '1')
+ return 1;
+ else if (buf[0] == '0')
+ return 0;
-static int open_debugfs_file(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return nonseekable_open(inode, file);
+ return -EINVAL;
}
-static ssize_t write_debugfs_file(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t dfs_file_write(struct file *file, const char __user *u,
+ size_t count, loff_t *ppos)
{
struct ubifs_info *c = file->private_data;
struct ubifs_debug_info *d = c->dbg;
+ struct dentry *dent = file->f_path.dentry;
+ int val;
- if (file->f_path.dentry == d->dfs_dump_lprops)
+ /*
+ * TODO: this is racy - the file-system might have already been
+ * unmounted and we'd oops in this case. The plan is to fix it with
+ * help of 'iterate_supers_type()' which we should have in v3.0: when
+ * a debugfs opened, we rember FS's UUID in file->private_data. Then
+ * whenever we access the FS via a debugfs file, we iterate all UBIFS
+ * superblocks and fine the one with the same UUID, and take the
+ * locking right.
+ *
+ * The other way to go suggested by Al Viro is to create a separate
+ * 'ubifs-debug' file-system instead.
+ */
+ if (file->f_path.dentry == d->dfs_dump_lprops) {
dbg_dump_lprops(c);
- else if (file->f_path.dentry == d->dfs_dump_budg)
+ return count;
+ }
+ if (file->f_path.dentry == d->dfs_dump_budg) {
dbg_dump_budg(c, &c->bi);
- else if (file->f_path.dentry == d->dfs_dump_tnc) {
+ return count;
+ }
+ if (file->f_path.dentry == d->dfs_dump_tnc) {
mutex_lock(&c->tnc_mutex);
dbg_dump_tnc(c);
mutex_unlock(&c->tnc_mutex);
- } else
+ return count;
+ }
+
+ val = interpret_user_input(u, count);
+ if (val < 0)
+ return val;
+
+ if (dent == d->dfs_chk_gen)
+ d->chk_gen = val;
+ else if (dent == d->dfs_chk_index)
+ d->chk_index = val;
+ else if (dent == d->dfs_chk_orph)
+ d->chk_orph = val;
+ else if (dent == d->dfs_chk_lprops)
+ d->chk_lprops = val;
+ else if (dent == d->dfs_chk_fs)
+ d->chk_fs = val;
+ else if (dent == d->dfs_tst_rcvry)
+ d->tst_rcvry = val;
+ else
return -EINVAL;
return count;
}
static const struct file_operations dfs_fops = {
- .open = open_debugfs_file,
- .write = write_debugfs_file,
+ .open = dfs_file_open,
+ .read = dfs_file_read,
+ .write = dfs_file_write,
.owner = THIS_MODULE,
.llseek = no_llseek,
};
@@ -2880,12 +2897,20 @@ static const struct file_operations dfs_fops = {
*/
int dbg_debugfs_init_fs(struct ubifs_info *c)
{
- int err;
+ int err, n;
const char *fname;
struct dentry *dent;
struct ubifs_debug_info *d = c->dbg;
- sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
+ n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME,
+ c->vi.ubi_num, c->vi.vol_id);
+ if (n == UBIFS_DFS_DIR_LEN) {
+ /* The array size is too small */
+ fname = UBIFS_DFS_DIR_NAME;
+ dent = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
fname = d->dfs_dir_name;
dent = debugfs_create_dir(fname, dfs_rootdir);
if (IS_ERR_OR_NULL(dent))
@@ -2910,13 +2935,55 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
goto out_remove;
d->dfs_dump_tnc = dent;
+ fname = "chk_general";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_gen = dent;
+
+ fname = "chk_index";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_index = dent;
+
+ fname = "chk_orphans";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_orph = dent;
+
+ fname = "chk_lprops";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_lprops = dent;
+
+ fname = "chk_fs";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_chk_fs = dent;
+
+ fname = "tst_recovery";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, d->dfs_dir, c,
+ &dfs_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ d->dfs_tst_rcvry = dent;
+
return 0;
out_remove:
debugfs_remove_recursive(d->dfs_dir);
out:
err = dent ? PTR_ERR(dent) : -ENODEV;
- ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
+ ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n",
fname, err);
return err;
}
@@ -2930,4 +2997,179 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c)
debugfs_remove_recursive(c->dbg->dfs_dir);
}
+struct ubifs_global_debug_info ubifs_dbg;
+
+static struct dentry *dfs_chk_gen;
+static struct dentry *dfs_chk_index;
+static struct dentry *dfs_chk_orph;
+static struct dentry *dfs_chk_lprops;
+static struct dentry *dfs_chk_fs;
+static struct dentry *dfs_tst_rcvry;
+
+static ssize_t dfs_global_file_read(struct file *file, char __user *u,
+ size_t count, loff_t *ppos)
+{
+ struct dentry *dent = file->f_path.dentry;
+ int val;
+
+ if (dent == dfs_chk_gen)
+ val = ubifs_dbg.chk_gen;
+ else if (dent == dfs_chk_index)
+ val = ubifs_dbg.chk_index;
+ else if (dent == dfs_chk_orph)
+ val = ubifs_dbg.chk_orph;
+ else if (dent == dfs_chk_lprops)
+ val = ubifs_dbg.chk_lprops;
+ else if (dent == dfs_chk_fs)
+ val = ubifs_dbg.chk_fs;
+ else if (dent == dfs_tst_rcvry)
+ val = ubifs_dbg.tst_rcvry;
+ else
+ return -EINVAL;
+
+ return provide_user_output(val, u, count, ppos);
+}
+
+static ssize_t dfs_global_file_write(struct file *file, const char __user *u,
+ size_t count, loff_t *ppos)
+{
+ struct dentry *dent = file->f_path.dentry;
+ int val;
+
+ val = interpret_user_input(u, count);
+ if (val < 0)
+ return val;
+
+ if (dent == dfs_chk_gen)
+ ubifs_dbg.chk_gen = val;
+ else if (dent == dfs_chk_index)
+ ubifs_dbg.chk_index = val;
+ else if (dent == dfs_chk_orph)
+ ubifs_dbg.chk_orph = val;
+ else if (dent == dfs_chk_lprops)
+ ubifs_dbg.chk_lprops = val;
+ else if (dent == dfs_chk_fs)
+ ubifs_dbg.chk_fs = val;
+ else if (dent == dfs_tst_rcvry)
+ ubifs_dbg.tst_rcvry = val;
+ else
+ return -EINVAL;
+
+ return count;
+}
+
+static const struct file_operations dfs_global_fops = {
+ .read = dfs_global_file_read,
+ .write = dfs_global_file_write,
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+};
+
+/**
+ * dbg_debugfs_init - initialize debugfs file-system.
+ *
+ * UBIFS uses debugfs file-system to expose various debugging knobs to
+ * user-space. This function creates "ubifs" directory in the debugfs
+ * file-system. Returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int dbg_debugfs_init(void)
+{
+ int err;
+ const char *fname;
+ struct dentry *dent;
+
+ fname = "ubifs";
+ dent = debugfs_create_dir(fname, NULL);
+ if (IS_ERR_OR_NULL(dent))
+ goto out;
+ dfs_rootdir = dent;
+
+ fname = "chk_general";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
+ &dfs_global_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dfs_chk_gen = dent;
+
+ fname = "chk_index";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
+ &dfs_global_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dfs_chk_index = dent;
+
+ fname = "chk_orphans";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
+ &dfs_global_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dfs_chk_orph = dent;
+
+ fname = "chk_lprops";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
+ &dfs_global_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dfs_chk_lprops = dent;
+
+ fname = "chk_fs";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
+ &dfs_global_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dfs_chk_fs = dent;
+
+ fname = "tst_recovery";
+ dent = debugfs_create_file(fname, S_IRUSR | S_IWUSR, dfs_rootdir, NULL,
+ &dfs_global_fops);
+ if (IS_ERR_OR_NULL(dent))
+ goto out_remove;
+ dfs_tst_rcvry = dent;
+
+ return 0;
+
+out_remove:
+ debugfs_remove_recursive(dfs_rootdir);
+out:
+ err = dent ? PTR_ERR(dent) : -ENODEV;
+ ubifs_err("cannot create \"%s\" debugfs file or directory, error %d\n",
+ fname, err);
+ return err;
+}
+
+/**
+ * dbg_debugfs_exit - remove the "ubifs" directory from debugfs file-system.
+ */
+void dbg_debugfs_exit(void)
+{
+ debugfs_remove_recursive(dfs_rootdir);
+}
+
+/**
+ * ubifs_debugging_init - initialize UBIFS debugging.
+ * @c: UBIFS file-system description object
+ *
+ * This function initializes debugging-related data for the file system.
+ * Returns zero in case of success and a negative error code in case of
+ * failure.
+ */
+int ubifs_debugging_init(struct ubifs_info *c)
+{
+ c->dbg = kzalloc(sizeof(struct ubifs_debug_info), GFP_KERNEL);
+ if (!c->dbg)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * ubifs_debugging_exit - free debugging data.
+ * @c: UBIFS file-system description object
+ */
+void ubifs_debugging_exit(struct ubifs_info *c)
+{
+ kfree(c->dbg);
+}
+
#endif /* CONFIG_UBIFS_FS_DEBUG */
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index a811ac4a26b..45174b53437 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -31,18 +31,25 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c,
#ifdef CONFIG_UBIFS_FS_DEBUG
-#include <linux/random.h>
+/*
+ * The UBIFS debugfs directory name pattern and maximum name length (3 for "ubi"
+ * + 1 for "_" and plus 2x2 for 2 UBI numbers and 1 for the trailing zero byte.
+ */
+#define UBIFS_DFS_DIR_NAME "ubi%d_%d"
+#define UBIFS_DFS_DIR_LEN (3 + 1 + 2*2 + 1)
/**
* ubifs_debug_info - per-FS debugging information.
* @old_zroot: old index root - used by 'dbg_check_old_index()'
* @old_zroot_level: old index root level - used by 'dbg_check_old_index()'
* @old_zroot_sqnum: old index root sqnum - used by 'dbg_check_old_index()'
- * @failure_mode: failure mode for recovery testing
- * @fail_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls
- * @fail_timeout: time in jiffies when delay of failure mode expires
- * @fail_cnt: current number of calls to failure mode I/O functions
- * @fail_cnt_max: number of calls by which to delay failure mode
+ *
+ * @pc_happened: non-zero if an emulated power cut happened
+ * @pc_delay: 0=>don't delay, 1=>delay a time, 2=>delay a number of calls
+ * @pc_timeout: time in jiffies when delay of failure mode expires
+ * @pc_cnt: current number of calls to failure mode I/O functions
+ * @pc_cnt_max: number of calls by which to delay failure mode
+ *
* @chk_lpt_sz: used by LPT tree size checker
* @chk_lpt_sz2: used by LPT tree size checker
* @chk_lpt_wastage: used by LPT tree size checker
@@ -56,21 +63,36 @@ typedef int (*dbg_znode_callback)(struct ubifs_info *c,
* @saved_free: saved amount of free space
* @saved_idx_gc_cnt: saved value of @c->idx_gc_cnt
*
+ * @chk_gen: if general extra checks are enabled
+ * @chk_index: if index xtra checks are enabled
+ * @chk_orph: if orphans extra checks are enabled
+ * @chk_lprops: if lprops extra checks are enabled
+ * @chk_fs: if UBIFS contents extra checks are enabled
+ * @tst_rcvry: if UBIFS recovery testing mode enabled
+ *
* @dfs_dir_name: name of debugfs directory containing this file-system's files
* @dfs_dir: direntry object of the file-system debugfs directory
* @dfs_dump_lprops: "dump lprops" debugfs knob
* @dfs_dump_budg: "dump budgeting information" debugfs knob
* @dfs_dump_tnc: "dump TNC" debugfs knob
+ * @dfs_chk_gen: debugfs knob to enable UBIFS general extra checks
+ * @dfs_chk_index: debugfs knob to enable UBIFS index extra checks
+ * @dfs_chk_orph: debugfs knob to enable UBIFS orphans extra checks
+ * @dfs_chk_lprops: debugfs knob to enable UBIFS LEP properties extra checks
+ * @dfs_chk_fs: debugfs knob to enable UBIFS contents extra checks
+ * @dfs_tst_rcvry: debugfs knob to enable UBIFS recovery testing
*/
struct ubifs_debug_info {
struct ubifs_zbranch old_zroot;
int old_zroot_level;
unsigned long long old_zroot_sqnum;
- int failure_mode;
- int fail_delay;
- unsigned long fail_timeout;
- unsigned int fail_cnt;
- unsigned int fail_cnt_max;
+
+ int pc_happened;
+ int pc_delay;
+ unsigned long pc_timeout;
+ unsigned int pc_cnt;
+ unsigned int pc_cnt_max;
+
long long chk_lpt_sz;
long long chk_lpt_sz2;
long long chk_lpt_wastage;
@@ -84,11 +106,43 @@ struct ubifs_debug_info {
long long saved_free;
int saved_idx_gc_cnt;
- char dfs_dir_name[100];
+ unsigned int chk_gen:1;
+ unsigned int chk_index:1;
+ unsigned int chk_orph:1;
+ unsigned int chk_lprops:1;
+ unsigned int chk_fs:1;
+ unsigned int tst_rcvry:1;
+
+ char dfs_dir_name[UBIFS_DFS_DIR_LEN + 1];
struct dentry *dfs_dir;
struct dentry *dfs_dump_lprops;
struct dentry *dfs_dump_budg;
struct dentry *dfs_dump_tnc;
+ struct dentry *dfs_chk_gen;
+ struct dentry *dfs_chk_index;
+ struct dentry *dfs_chk_orph;
+ struct dentry *dfs_chk_lprops;
+ struct dentry *dfs_chk_fs;
+ struct dentry *dfs_tst_rcvry;
+};
+
+/**
+ * ubifs_global_debug_info - global (not per-FS) UBIFS debugging information.
+ *
+ * @chk_gen: if general extra checks are enabled
+ * @chk_index: if index xtra checks are enabled
+ * @chk_orph: if orphans extra checks are enabled
+ * @chk_lprops: if lprops extra checks are enabled
+ * @chk_fs: if UBIFS contents extra checks are enabled
+ * @tst_rcvry: if UBIFS recovery testing mode enabled
+ */
+struct ubifs_global_debug_info {
+ unsigned int chk_gen:1;
+ unsigned int chk_index:1;
+ unsigned int chk_orph:1;
+ unsigned int chk_lprops:1;
+ unsigned int chk_fs:1;
+ unsigned int tst_rcvry:1;
};
#define ubifs_assert(expr) do { \
@@ -127,6 +181,8 @@ const char *dbg_key_str1(const struct ubifs_info *c,
#define DBGKEY(key) dbg_key_str0(c, (key))
#define DBGKEY1(key) dbg_key_str1(c, (key))
+extern spinlock_t dbg_lock;
+
#define ubifs_dbg_msg(type, fmt, ...) do { \
spin_lock(&dbg_lock); \
pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__); \
@@ -162,41 +218,36 @@ const char *dbg_key_str1(const struct ubifs_info *c,
/* Additional recovery messages */
#define dbg_rcvry(fmt, ...) ubifs_dbg_msg("rcvry", fmt, ##__VA_ARGS__)
-/*
- * Debugging check flags.
- *
- * UBIFS_CHK_GEN: general checks
- * UBIFS_CHK_TNC: check TNC
- * UBIFS_CHK_IDX_SZ: check index size
- * UBIFS_CHK_ORPH: check orphans
- * UBIFS_CHK_OLD_IDX: check the old index
- * UBIFS_CHK_LPROPS: check lprops
- * UBIFS_CHK_FS: check the file-system
- */
-enum {
- UBIFS_CHK_GEN = 0x1,
- UBIFS_CHK_TNC = 0x2,
- UBIFS_CHK_IDX_SZ = 0x4,
- UBIFS_CHK_ORPH = 0x8,
- UBIFS_CHK_OLD_IDX = 0x10,
- UBIFS_CHK_LPROPS = 0x20,
- UBIFS_CHK_FS = 0x40,
-};
-
-/*
- * Special testing flags.
- *
- * UBIFS_TST_RCVRY: failure mode for recovery testing
- */
-enum {
- UBIFS_TST_RCVRY = 0x4,
-};
-
-extern spinlock_t dbg_lock;
+extern struct ubifs_global_debug_info ubifs_dbg;
-extern unsigned int ubifs_msg_flags;
-extern unsigned int ubifs_chk_flags;
-extern unsigned int ubifs_tst_flags;
+static inline int dbg_is_chk_gen(const struct ubifs_info *c)
+{
+ return !!(ubifs_dbg.chk_gen || c->dbg->chk_gen);
+}
+static inline int dbg_is_chk_index(const struct ubifs_info *c)
+{
+ return !!(ubifs_dbg.chk_index || c->dbg->chk_index);
+}
+static inline int dbg_is_chk_orph(const struct ubifs_info *c)
+{
+ return !!(ubifs_dbg.chk_orph || c->dbg->chk_orph);
+}
+static inline int dbg_is_chk_lprops(const struct ubifs_info *c)
+{
+ return !!(ubifs_dbg.chk_lprops || c->dbg->chk_lprops);
+}
+static inline int dbg_is_chk_fs(const struct ubifs_info *c)
+{
+ return !!(ubifs_dbg.chk_fs || c->dbg->chk_fs);
+}
+static inline int dbg_is_tst_rcvry(const struct ubifs_info *c)
+{
+ return !!(ubifs_dbg.tst_rcvry || c->dbg->tst_rcvry);
+}
+static inline int dbg_is_power_cut(const struct ubifs_info *c)
+{
+ return !!c->dbg->pc_happened;
+}
int ubifs_debugging_init(struct ubifs_info *c);
void ubifs_debugging_exit(struct ubifs_info *c);
@@ -207,7 +258,7 @@ const char *dbg_cstate(int cmt_state);
const char *dbg_jhead(int jhead);
const char *dbg_get_key_dump(const struct ubifs_info *c,
const union ubifs_key *key);
-void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode);
+void dbg_dump_inode(struct ubifs_info *c, const struct inode *inode);
void dbg_dump_node(const struct ubifs_info *c, const void *node);
void dbg_dump_lpt_node(const struct ubifs_info *c, void *node, int lnum,
int offs);
@@ -240,8 +291,8 @@ int dbg_check_cats(struct ubifs_info *c);
int dbg_check_ltab(struct ubifs_info *c);
int dbg_chk_lpt_free_spc(struct ubifs_info *c);
int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len);
-int dbg_check_synced_i_size(struct inode *inode);
-int dbg_check_dir_size(struct ubifs_info *c, const struct inode *dir);
+int dbg_check_synced_i_size(const struct ubifs_info *c, struct inode *inode);
+int dbg_check_dir(struct ubifs_info *c, const struct inode *dir);
int dbg_check_tnc(struct ubifs_info *c, int extra);
int dbg_check_idx_size(struct ubifs_info *c, long long idx_size);
int dbg_check_filesystem(struct ubifs_info *c);
@@ -254,54 +305,12 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
int dbg_check_data_nodes_order(struct ubifs_info *c, struct list_head *head);
int dbg_check_nondata_nodes_order(struct ubifs_info *c, struct list_head *head);
-/* Force the use of in-the-gaps method for testing */
-static inline int dbg_force_in_the_gaps_enabled(void)
-{
- return ubifs_chk_flags & UBIFS_CHK_GEN;
-}
-int dbg_force_in_the_gaps(void);
-
-/* Failure mode for recovery testing */
-#define dbg_failure_mode (ubifs_tst_flags & UBIFS_TST_RCVRY)
-
-#ifndef UBIFS_DBG_PRESERVE_UBI
-#define ubi_leb_read dbg_leb_read
-#define ubi_leb_write dbg_leb_write
-#define ubi_leb_change dbg_leb_change
-#define ubi_leb_erase dbg_leb_erase
-#define ubi_leb_unmap dbg_leb_unmap
-#define ubi_is_mapped dbg_is_mapped
-#define ubi_leb_map dbg_leb_map
-#endif
-
-int dbg_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
- int len, int check);
-int dbg_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int offset, int len, int dtype);
-int dbg_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
- int len, int dtype);
-int dbg_leb_erase(struct ubi_volume_desc *desc, int lnum);
-int dbg_leb_unmap(struct ubi_volume_desc *desc, int lnum);
-int dbg_is_mapped(struct ubi_volume_desc *desc, int lnum);
-int dbg_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype);
-
-static inline int dbg_read(struct ubi_volume_desc *desc, int lnum, char *buf,
- int offset, int len)
-{
- return dbg_leb_read(desc, lnum, buf, offset, len, 0);
-}
-
-static inline int dbg_write(struct ubi_volume_desc *desc, int lnum,
- const void *buf, int offset, int len)
-{
- return dbg_leb_write(desc, lnum, buf, offset, len, UBI_UNKNOWN);
-}
-
-static inline int dbg_change(struct ubi_volume_desc *desc, int lnum,
- const void *buf, int len)
-{
- return dbg_leb_change(desc, lnum, buf, len, UBI_UNKNOWN);
-}
+int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
+ int len, int dtype);
+int dbg_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len,
+ int dtype);
+int dbg_leb_unmap(struct ubifs_info *c, int lnum);
+int dbg_leb_map(struct ubifs_info *c, int lnum, int dtype);
/* Debugfs-related stuff */
int dbg_debugfs_init(void);
@@ -313,7 +322,7 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
/* Use "if (0)" to make compiler check arguments even if debugging is off */
#define ubifs_assert(expr) do { \
- if (0 && (expr)) \
+ if (0) \
printk(KERN_CRIT "UBIFS assert failed in %s at %u (pid %d)\n", \
__func__, __LINE__, current->pid); \
} while (0)
@@ -323,6 +332,9 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
ubifs_err(fmt, ##__VA_ARGS__); \
} while (0)
+#define DBGKEY(key) ((char *)(key))
+#define DBGKEY1(key) ((char *)(key))
+
#define ubifs_dbg_msg(fmt, ...) do { \
if (0) \
pr_debug(fmt "\n", ##__VA_ARGS__); \
@@ -346,9 +358,6 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
#define dbg_scan(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
#define dbg_rcvry(fmt, ...) ubifs_dbg_msg(fmt, ##__VA_ARGS__)
-#define DBGKEY(key) ((char *)(key))
-#define DBGKEY1(key) ((char *)(key))
-
static inline int ubifs_debugging_init(struct ubifs_info *c) { return 0; }
static inline void ubifs_debugging_exit(struct ubifs_info *c) { return; }
static inline const char *dbg_ntype(int type) { return ""; }
@@ -357,7 +366,7 @@ static inline const char *dbg_jhead(int jhead) { return ""; }
static inline const char *
dbg_get_key_dump(const struct ubifs_info *c,
const union ubifs_key *key) { return ""; }
-static inline void dbg_dump_inode(const struct ubifs_info *c,
+static inline void dbg_dump_inode(struct ubifs_info *c,
const struct inode *inode) { return; }
static inline void dbg_dump_node(const struct ubifs_info *c,
const void *node) { return; }
@@ -409,9 +418,11 @@ static inline int dbg_check_ltab(struct ubifs_info *c) { return 0; }
static inline int dbg_chk_lpt_free_spc(struct ubifs_info *c) { return 0; }
static inline int dbg_chk_lpt_sz(struct ubifs_info *c,
int action, int len) { return 0; }
-static inline int dbg_check_synced_i_size(struct inode *inode) { return 0; }
-static inline int dbg_check_dir_size(struct ubifs_info *c,
- const struct inode *dir) { return 0; }
+static inline int
+dbg_check_synced_i_size(const struct ubifs_info *c,
+ struct inode *inode) { return 0; }
+static inline int dbg_check_dir(struct ubifs_info *c,
+ const struct inode *dir) { return 0; }
static inline int dbg_check_tnc(struct ubifs_info *c, int extra) { return 0; }
static inline int dbg_check_idx_size(struct ubifs_info *c,
long long idx_size) { return 0; }
@@ -431,9 +442,23 @@ static inline int
dbg_check_nondata_nodes_order(struct ubifs_info *c,
struct list_head *head) { return 0; }
-static inline int dbg_force_in_the_gaps(void) { return 0; }
-#define dbg_force_in_the_gaps_enabled() 0
-#define dbg_failure_mode 0
+static inline int dbg_leb_write(struct ubifs_info *c, int lnum,
+ const void *buf, int offset,
+ int len, int dtype) { return 0; }
+static inline int dbg_leb_change(struct ubifs_info *c, int lnum,
+ const void *buf, int len,
+ int dtype) { return 0; }
+static inline int dbg_leb_unmap(struct ubifs_info *c, int lnum) { return 0; }
+static inline int dbg_leb_map(struct ubifs_info *c, int lnum,
+ int dtype) { return 0; }
+
+static inline int dbg_is_chk_gen(const struct ubifs_info *c) { return 0; }
+static inline int dbg_is_chk_index(const struct ubifs_info *c) { return 0; }
+static inline int dbg_is_chk_orph(const struct ubifs_info *c) { return 0; }
+static inline int dbg_is_chk_lprops(const struct ubifs_info *c) { return 0; }
+static inline int dbg_is_chk_fs(const struct ubifs_info *c) { return 0; }
+static inline int dbg_is_tst_rcvry(const struct ubifs_info *c) { return 0; }
+static inline int dbg_is_power_cut(const struct ubifs_info *c) { return 0; }
static inline int dbg_debugfs_init(void) { return 0; }
static inline void dbg_debugfs_exit(void) { return; }
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index ef5abd38f0b..68349204331 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -102,7 +102,7 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
* UBIFS has to fully control "clean <-> dirty" transitions of inodes
* to make budgeting work.
*/
- inode->i_flags |= (S_NOCMTIME);
+ inode->i_flags |= S_NOCMTIME;
inode_init_owner(inode, dir, mode);
inode->i_mtime = inode->i_atime = inode->i_ctime =
@@ -172,9 +172,11 @@ struct inode *ubifs_new_inode(struct ubifs_info *c, const struct inode *dir,
#ifdef CONFIG_UBIFS_FS_DEBUG
-static int dbg_check_name(struct ubifs_dent_node *dent, struct qstr *nm)
+static int dbg_check_name(const struct ubifs_info *c,
+ const struct ubifs_dent_node *dent,
+ const struct qstr *nm)
{
- if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
+ if (!dbg_is_chk_gen(c))
return 0;
if (le16_to_cpu(dent->nlen) != nm->len)
return -EINVAL;
@@ -185,7 +187,7 @@ static int dbg_check_name(struct ubifs_dent_node *dent, struct qstr *nm)
#else
-#define dbg_check_name(dent, nm) 0
+#define dbg_check_name(c, dent, nm) 0
#endif
@@ -219,7 +221,7 @@ static struct dentry *ubifs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
- if (dbg_check_name(dent, &dentry->d_name)) {
+ if (dbg_check_name(c, dent, &dentry->d_name)) {
err = -EINVAL;
goto out;
}
@@ -522,7 +524,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
ubifs_assert(mutex_is_locked(&dir->i_mutex));
ubifs_assert(mutex_is_locked(&inode->i_mutex));
- err = dbg_check_synced_i_size(inode);
+ err = dbg_check_synced_i_size(c, inode);
if (err)
return err;
@@ -577,7 +579,7 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
inode->i_nlink, dir->i_ino);
ubifs_assert(mutex_is_locked(&dir->i_mutex));
ubifs_assert(mutex_is_locked(&inode->i_mutex));
- err = dbg_check_synced_i_size(inode);
+ err = dbg_check_synced_i_size(c, inode);
if (err)
return err;
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 5e7fccfc4b2..7cf738a4544 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1263,7 +1263,7 @@ int ubifs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
return err;
- err = dbg_check_synced_i_size(inode);
+ err = dbg_check_synced_i_size(c, inode);
if (err)
return err;
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 3be645e012c..9228950a658 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -86,8 +86,125 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
c->no_chk_data_crc = 0;
c->vfs_sb->s_flags |= MS_RDONLY;
ubifs_warn("switched to read-only mode, error %d", err);
+ dump_stack();
+ }
+}
+
+/*
+ * Below are simple wrappers over UBI I/O functions which include some
+ * additional checks and UBIFS debugging stuff. See corresponding UBI function
+ * for more information.
+ */
+
+int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
+ int len, int even_ebadmsg)
+{
+ int err;
+
+ err = ubi_read(c->ubi, lnum, buf, offs, len);
+ /*
+ * In case of %-EBADMSG print the error message only if the
+ * @even_ebadmsg is true.
+ */
+ if (err && (err != -EBADMSG || even_ebadmsg)) {
+ ubifs_err("reading %d bytes from LEB %d:%d failed, error %d",
+ len, lnum, offs, err);
+ dbg_dump_stack();
+ }
+ return err;
+}
+
+int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
+ int len, int dtype)
+{
+ int err;
+
+ ubifs_assert(!c->ro_media && !c->ro_mount);
+ if (c->ro_error)
+ return -EROFS;
+ if (!dbg_is_tst_rcvry(c))
+ err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype);
+ else
+ err = dbg_leb_write(c, lnum, buf, offs, len, dtype);
+ if (err) {
+ ubifs_err("writing %d bytes to LEB %d:%d failed, error %d",
+ len, lnum, offs, err);
+ ubifs_ro_mode(c, err);
+ dbg_dump_stack();
+ }
+ return err;
+}
+
+int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len,
+ int dtype)
+{
+ int err;
+
+ ubifs_assert(!c->ro_media && !c->ro_mount);
+ if (c->ro_error)
+ return -EROFS;
+ if (!dbg_is_tst_rcvry(c))
+ err = ubi_leb_change(c->ubi, lnum, buf, len, dtype);
+ else
+ err = dbg_leb_change(c, lnum, buf, len, dtype);
+ if (err) {
+ ubifs_err("changing %d bytes in LEB %d failed, error %d",
+ len, lnum, err);
+ ubifs_ro_mode(c, err);
+ dbg_dump_stack();
+ }
+ return err;
+}
+
+int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
+{
+ int err;
+
+ ubifs_assert(!c->ro_media && !c->ro_mount);
+ if (c->ro_error)
+ return -EROFS;
+ if (!dbg_is_tst_rcvry(c))
+ err = ubi_leb_unmap(c->ubi, lnum);
+ else
+ err = dbg_leb_unmap(c, lnum);
+ if (err) {
+ ubifs_err("unmap LEB %d failed, error %d", lnum, err);
+ ubifs_ro_mode(c, err);
+ dbg_dump_stack();
+ }
+ return err;
+}
+
+int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype)
+{
+ int err;
+
+ ubifs_assert(!c->ro_media && !c->ro_mount);
+ if (c->ro_error)
+ return -EROFS;
+ if (!dbg_is_tst_rcvry(c))
+ err = ubi_leb_map(c->ubi, lnum, dtype);
+ else
+ err = dbg_leb_map(c, lnum, dtype);
+ if (err) {
+ ubifs_err("mapping LEB %d failed, error %d", lnum, err);
+ ubifs_ro_mode(c, err);
+ dbg_dump_stack();
+ }
+ return err;
+}
+
+int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
+{
+ int err;
+
+ err = ubi_is_mapped(c->ubi, lnum);
+ if (err < 0) {
+ ubifs_err("ubi_is_mapped failed for LEB %d, error %d",
+ lnum, err);
dbg_dump_stack();
}
+ return err;
}
/**
@@ -406,14 +523,10 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
dirt = sync_len - wbuf->used;
if (dirt)
ubifs_pad(c, wbuf->buf + wbuf->used, dirt);
- err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
- sync_len, wbuf->dtype);
- if (err) {
- ubifs_err("cannot write %d bytes to LEB %d:%d",
- sync_len, wbuf->lnum, wbuf->offs);
- dbg_dump_stack();
+ err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len,
+ wbuf->dtype);
+ if (err)
return err;
- }
spin_lock(&wbuf->lock);
wbuf->offs += sync_len;
@@ -605,9 +718,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
if (aligned_len == wbuf->avail) {
dbg_io("flush jhead %s wbuf to LEB %d:%d",
dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
- err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
- wbuf->offs, wbuf->size,
- wbuf->dtype);
+ err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf,
+ wbuf->offs, wbuf->size,
+ wbuf->dtype);
if (err)
goto out;
@@ -642,8 +755,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
dbg_io("flush jhead %s wbuf to LEB %d:%d",
dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
- err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
- wbuf->size, wbuf->dtype);
+ err = ubifs_leb_write(c, wbuf->lnum, wbuf->buf, wbuf->offs,
+ wbuf->size, wbuf->dtype);
if (err)
goto out;
@@ -661,8 +774,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
*/
dbg_io("write %d bytes to LEB %d:%d",
wbuf->size, wbuf->lnum, wbuf->offs);
- err = ubi_leb_write(c->ubi, wbuf->lnum, buf, wbuf->offs,
- wbuf->size, wbuf->dtype);
+ err = ubifs_leb_write(c, wbuf->lnum, buf, wbuf->offs,
+ wbuf->size, wbuf->dtype);
if (err)
goto out;
@@ -683,8 +796,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
n <<= c->max_write_shift;
dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum,
wbuf->offs);
- err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written,
- wbuf->offs, n, wbuf->dtype);
+ err = ubifs_leb_write(c, wbuf->lnum, buf + written,
+ wbuf->offs, n, wbuf->dtype);
if (err)
goto out;
wbuf->offs += n;
@@ -766,13 +879,9 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
return -EROFS;
ubifs_prepare_node(c, buf, len, 1);
- err = ubi_leb_write(c->ubi, lnum, buf, offs, buf_len, dtype);
- if (err) {
- ubifs_err("cannot write %d bytes to LEB %d:%d, error %d",
- buf_len, lnum, offs, err);
+ err = ubifs_leb_write(c, lnum, buf, offs, buf_len, dtype);
+ if (err)
dbg_dump_node(c, buf);
- dbg_dump_stack();
- }
return err;
}
@@ -824,13 +933,9 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
if (rlen > 0) {
/* Read everything that goes before write-buffer */
- err = ubi_read(c->ubi, lnum, buf, offs, rlen);
- if (err && err != -EBADMSG) {
- ubifs_err("failed to read node %d from LEB %d:%d, "
- "error %d", type, lnum, offs, err);
- dbg_dump_stack();
+ err = ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
+ if (err && err != -EBADMSG)
return err;
- }
}
if (type != ch->node_type) {
@@ -885,12 +990,9 @@ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len,
ubifs_assert(!(offs & 7) && offs < c->leb_size);
ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
- err = ubi_read(c->ubi, lnum, buf, offs, len);
- if (err && err != -EBADMSG) {
- ubifs_err("cannot read node %d from LEB %d:%d, error %d",
- type, lnum, offs, err);
+ err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
+ if (err && err != -EBADMSG)
return err;
- }
if (type != ch->node_type) {
ubifs_err("bad node type (%d but expected %d)",
diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c
index affea9494ae..f9fd068d1ae 100644
--- a/fs/ubifs/log.c
+++ b/fs/ubifs/log.c
@@ -262,7 +262,7 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
* an unclean reboot, because the target LEB might have been
* unmapped, but not yet physically erased.
*/
- err = ubi_leb_map(c->ubi, bud->lnum, UBI_SHORTTERM);
+ err = ubifs_leb_map(c, bud->lnum, UBI_SHORTTERM);
if (err)
goto out_unlock;
}
@@ -283,8 +283,6 @@ int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
return 0;
out_unlock:
- if (err != -EAGAIN)
- ubifs_ro_mode(c, err);
mutex_unlock(&c->log_mutex);
kfree(ref);
kfree(bud);
@@ -752,7 +750,7 @@ static int dbg_check_bud_bytes(struct ubifs_info *c)
struct ubifs_bud *bud;
long long bud_bytes = 0;
- if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
+ if (!dbg_is_chk_gen(c))
return 0;
spin_lock(&c->buds_lock);
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index 667884f4a61..f8a181e647c 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -504,7 +504,7 @@ static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops)
pnode = (struct ubifs_pnode *)container_of(lprops - pos,
struct ubifs_pnode,
lprops[0]);
- return !test_bit(COW_ZNODE, &pnode->flags) &&
+ return !test_bit(COW_CNODE, &pnode->flags) &&
test_bit(DIRTY_CNODE, &pnode->flags);
}
@@ -860,7 +860,7 @@ int dbg_check_cats(struct ubifs_info *c)
struct list_head *pos;
int i, cat;
- if (!(ubifs_chk_flags & (UBIFS_CHK_GEN | UBIFS_CHK_LPROPS)))
+ if (!dbg_is_chk_gen(c) && !dbg_is_chk_lprops(c))
return 0;
list_for_each_entry(lprops, &c->empty_list, list) {
@@ -958,7 +958,7 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
{
int i = 0, j, err = 0;
- if (!(ubifs_chk_flags & (UBIFS_CHK_GEN | UBIFS_CHK_LPROPS)))
+ if (!dbg_is_chk_gen(c) && !dbg_is_chk_lprops(c))
return;
for (i = 0; i < heap->cnt; i++) {
@@ -1262,7 +1262,7 @@ int dbg_check_lprops(struct ubifs_info *c)
int i, err;
struct ubifs_lp_stats lst;
- if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
+ if (!dbg_is_chk_lprops(c))
return 0;
/*
diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c
index ef5155e109a..6189c74d97f 100644
--- a/fs/ubifs/lpt.c
+++ b/fs/ubifs/lpt.c
@@ -701,8 +701,8 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
alen = ALIGN(len, c->min_io_size);
set_ltab(c, lnum, c->leb_size - alen, alen - len);
memset(p, 0xff, alen - len);
- err = ubi_leb_change(c->ubi, lnum++, buf, alen,
- UBI_SHORTTERM);
+ err = ubifs_leb_change(c, lnum++, buf, alen,
+ UBI_SHORTTERM);
if (err)
goto out;
p = buf;
@@ -732,8 +732,8 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
set_ltab(c, lnum, c->leb_size - alen,
alen - len);
memset(p, 0xff, alen - len);
- err = ubi_leb_change(c->ubi, lnum++, buf, alen,
- UBI_SHORTTERM);
+ err = ubifs_leb_change(c, lnum++, buf, alen,
+ UBI_SHORTTERM);
if (err)
goto out;
p = buf;
@@ -780,8 +780,8 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
alen = ALIGN(len, c->min_io_size);
set_ltab(c, lnum, c->leb_size - alen, alen - len);
memset(p, 0xff, alen - len);
- err = ubi_leb_change(c->ubi, lnum++, buf, alen,
- UBI_SHORTTERM);
+ err = ubifs_leb_change(c, lnum++, buf, alen,
+ UBI_SHORTTERM);
if (err)
goto out;
p = buf;
@@ -806,7 +806,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
alen = ALIGN(len, c->min_io_size);
set_ltab(c, lnum, c->leb_size - alen, alen - len);
memset(p, 0xff, alen - len);
- err = ubi_leb_change(c->ubi, lnum++, buf, alen, UBI_SHORTTERM);
+ err = ubifs_leb_change(c, lnum++, buf, alen, UBI_SHORTTERM);
if (err)
goto out;
p = buf;
@@ -826,7 +826,7 @@ int ubifs_create_dflt_lpt(struct ubifs_info *c, int *main_lebs, int lpt_first,
/* Write remaining buffer */
memset(p, 0xff, alen - len);
- err = ubi_leb_change(c->ubi, lnum, buf, alen, UBI_SHORTTERM);
+ err = ubifs_leb_change(c, lnum, buf, alen, UBI_SHORTTERM);
if (err)
goto out;
@@ -1222,7 +1222,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
if (c->big_lpt)
nnode->num = calc_nnode_num_from_parent(c, parent, iip);
} else {
- err = ubi_read(c->ubi, lnum, buf, offs, c->nnode_sz);
+ err = ubifs_leb_read(c, lnum, buf, offs, c->nnode_sz, 1);
if (err)
goto out;
err = ubifs_unpack_nnode(c, buf, nnode);
@@ -1247,6 +1247,7 @@ int ubifs_read_nnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
out:
ubifs_err("error %d reading nnode at %d:%d", err, lnum, offs);
+ dbg_dump_stack();
kfree(nnode);
return err;
}
@@ -1290,7 +1291,7 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
lprops->flags = ubifs_categorize_lprops(c, lprops);
}
} else {
- err = ubi_read(c->ubi, lnum, buf, offs, c->pnode_sz);
+ err = ubifs_leb_read(c, lnum, buf, offs, c->pnode_sz, 1);
if (err)
goto out;
err = unpack_pnode(c, buf, pnode);
@@ -1312,6 +1313,7 @@ static int read_pnode(struct ubifs_info *c, struct ubifs_nnode *parent, int iip)
out:
ubifs_err("error %d reading pnode at %d:%d", err, lnum, offs);
dbg_dump_pnode(c, pnode, parent, iip);
+ dbg_dump_stack();
dbg_msg("calc num: %d", calc_pnode_num_from_parent(c, parent, iip));
kfree(pnode);
return err;
@@ -1331,7 +1333,7 @@ static int read_ltab(struct ubifs_info *c)
buf = vmalloc(c->ltab_sz);
if (!buf)
return -ENOMEM;
- err = ubi_read(c->ubi, c->ltab_lnum, buf, c->ltab_offs, c->ltab_sz);
+ err = ubifs_leb_read(c, c->ltab_lnum, buf, c->ltab_offs, c->ltab_sz, 1);
if (err)
goto out;
err = unpack_ltab(c, buf);
@@ -1354,7 +1356,8 @@ static int read_lsave(struct ubifs_info *c)
buf = vmalloc(c->lsave_sz);
if (!buf)
return -ENOMEM;
- err = ubi_read(c->ubi, c->lsave_lnum, buf, c->lsave_offs, c->lsave_sz);
+ err = ubifs_leb_read(c, c->lsave_lnum, buf, c->lsave_offs,
+ c->lsave_sz, 1);
if (err)
goto out;
err = unpack_lsave(c, buf);
@@ -1814,8 +1817,8 @@ static struct ubifs_nnode *scan_get_nnode(struct ubifs_info *c,
if (c->big_lpt)
nnode->num = calc_nnode_num_from_parent(c, parent, iip);
} else {
- err = ubi_read(c->ubi, branch->lnum, buf, branch->offs,
- c->nnode_sz);
+ err = ubifs_leb_read(c, branch->lnum, buf, branch->offs,
+ c->nnode_sz, 1);
if (err)
return ERR_PTR(err);
err = ubifs_unpack_nnode(c, buf, nnode);
@@ -1883,8 +1886,8 @@ static struct ubifs_pnode *scan_get_pnode(struct ubifs_info *c,
ubifs_assert(branch->lnum >= c->lpt_first &&
branch->lnum <= c->lpt_last);
ubifs_assert(branch->offs >= 0 && branch->offs < c->leb_size);
- err = ubi_read(c->ubi, branch->lnum, buf, branch->offs,
- c->pnode_sz);
+ err = ubifs_leb_read(c, branch->lnum, buf, branch->offs,
+ c->pnode_sz, 1);
if (err)
return ERR_PTR(err);
err = unpack_pnode(c, buf, pnode);
@@ -2224,7 +2227,7 @@ int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
struct ubifs_cnode *cn;
int num, iip = 0, err;
- if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
+ if (!dbg_is_chk_lprops(c))
return 0;
while (cnode) {
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index dfcb5748a7d..cddd6bd214f 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -27,6 +27,7 @@
#include <linux/crc16.h>
#include <linux/slab.h>
+#include <linux/random.h>
#include "ubifs.h"
#ifdef CONFIG_UBIFS_FS_DEBUG
@@ -116,8 +117,8 @@ static int get_cnodes_to_commit(struct ubifs_info *c)
return 0;
cnt += 1;
while (1) {
- ubifs_assert(!test_bit(COW_ZNODE, &cnode->flags));
- __set_bit(COW_ZNODE, &cnode->flags);
+ ubifs_assert(!test_bit(COW_CNODE, &cnode->flags));
+ __set_bit(COW_CNODE, &cnode->flags);
cnext = next_dirty_cnode(cnode);
if (!cnext) {
cnode->cnext = c->lpt_cnext;
@@ -465,7 +466,7 @@ static int write_cnodes(struct ubifs_info *c)
*/
clear_bit(DIRTY_CNODE, &cnode->flags);
smp_mb__before_clear_bit();
- clear_bit(COW_ZNODE, &cnode->flags);
+ clear_bit(COW_CNODE, &cnode->flags);
smp_mb__after_clear_bit();
offs += len;
dbg_chk_lpt_sz(c, 1, len);
@@ -1160,11 +1161,11 @@ static int lpt_gc_lnum(struct ubifs_info *c, int lnum)
void *buf = c->lpt_buf;
dbg_lp("LEB %d", lnum);
- err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size);
- if (err) {
- ubifs_err("cannot read LEB %d, error %d", lnum, err);
+
+ err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1);
+ if (err)
return err;
- }
+
while (1) {
if (!is_a_node(c, buf, len)) {
int pad_len;
@@ -1640,7 +1641,7 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
int ret;
void *buf, *p;
- if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
+ if (!dbg_is_chk_lprops(c))
return 0;
buf = p = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
@@ -1650,11 +1651,11 @@ static int dbg_check_ltab_lnum(struct ubifs_info *c, int lnum)
}
dbg_lp("LEB %d", lnum);
- err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size);
- if (err) {
- dbg_msg("ubi_read failed, LEB %d, error %d", lnum, err);
+
+ err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1);
+ if (err)
goto out;
- }
+
while (1) {
if (!is_a_node(c, p, len)) {
int i, pad_len;
@@ -1711,7 +1712,7 @@ int dbg_check_ltab(struct ubifs_info *c)
{
int lnum, err, i, cnt;
- if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
+ if (!dbg_is_chk_lprops(c))
return 0;
/* Bring the entire tree into memory */
@@ -1754,7 +1755,7 @@ int dbg_chk_lpt_free_spc(struct ubifs_info *c)
long long free = 0;
int i;
- if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
+ if (!dbg_is_chk_lprops(c))
return 0;
for (i = 0; i < c->lpt_lebs; i++) {
@@ -1796,7 +1797,7 @@ int dbg_chk_lpt_sz(struct ubifs_info *c, int action, int len)
long long chk_lpt_sz, lpt_sz;
int err = 0;
- if (!(ubifs_chk_flags & UBIFS_CHK_LPROPS))
+ if (!dbg_is_chk_lprops(c))
return 0;
switch (action) {
@@ -1901,11 +1902,10 @@ static void dump_lpt_leb(const struct ubifs_info *c, int lnum)
return;
}
- err = ubi_read(c->ubi, lnum, buf, 0, c->leb_size);
- if (err) {
- ubifs_err("cannot read LEB %d, error %d", lnum, err);
+ err = ubifs_leb_read(c, lnum, buf, 0, c->leb_size, 1);
+ if (err)
goto out;
- }
+
while (1) {
offs = c->leb_size - len;
if (!is_a_node(c, p, len)) {
@@ -2019,7 +2019,7 @@ static int dbg_populate_lsave(struct ubifs_info *c)
struct ubifs_lpt_heap *heap;
int i;
- if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
+ if (!dbg_is_chk_gen(c))
return 0;
if (random32() & 3)
return 0;
diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h
index 0b5296a9a4c..ee7cb5ebb6e 100644
--- a/fs/ubifs/misc.h
+++ b/fs/ubifs/misc.h
@@ -39,6 +39,29 @@ static inline int ubifs_zn_dirty(const struct ubifs_znode *znode)
}
/**
+ * ubifs_zn_obsolete - check if znode is obsolete.
+ * @znode: znode to check
+ *
+ * This helper function returns %1 if @znode is obsolete and %0 otherwise.
+ */
+static inline int ubifs_zn_obsolete(const struct ubifs_znode *znode)
+{
+ return !!test_bit(OBSOLETE_ZNODE, &znode->flags);
+}
+
+/**
+ * ubifs_zn_cow - check if znode has to be copied on write.
+ * @znode: znode to check
+ *
+ * This helper function returns %1 if @znode is has COW flag set and %0
+ * otherwise.
+ */
+static inline int ubifs_zn_cow(const struct ubifs_znode *znode)
+{
+ return !!test_bit(COW_ZNODE, &znode->flags);
+}
+
+/**
* ubifs_wake_up_bgt - wake up background thread.
* @c: UBIFS file-system description object
*/
@@ -122,86 +145,6 @@ static inline int ubifs_wbuf_sync(struct ubifs_wbuf *wbuf)
}
/**
- * ubifs_leb_unmap - unmap an LEB.
- * @c: UBIFS file-system description object
- * @lnum: LEB number to unmap
- *
- * This function returns %0 on success and a negative error code on failure.
- */
-static inline int ubifs_leb_unmap(const struct ubifs_info *c, int lnum)
-{
- int err;
-
- ubifs_assert(!c->ro_media && !c->ro_mount);
- if (c->ro_error)
- return -EROFS;
- err = ubi_leb_unmap(c->ubi, lnum);
- if (err) {
- ubifs_err("unmap LEB %d failed, error %d", lnum, err);
- return err;
- }
-
- return 0;
-}
-
-/**
- * ubifs_leb_write - write to a LEB.
- * @c: UBIFS file-system description object
- * @lnum: LEB number to write
- * @buf: buffer to write from
- * @offs: offset within LEB to write to
- * @len: length to write
- * @dtype: data type
- *
- * This function returns %0 on success and a negative error code on failure.
- */
-static inline int ubifs_leb_write(const struct ubifs_info *c, int lnum,
- const void *buf, int offs, int len, int dtype)
-{
- int err;
-
- ubifs_assert(!c->ro_media && !c->ro_mount);
- if (c->ro_error)
- return -EROFS;
- err = ubi_leb_write(c->ubi, lnum, buf, offs, len, dtype);
- if (err) {
- ubifs_err("writing %d bytes at %d:%d, error %d",
- len, lnum, offs, err);
- return err;
- }
-
- return 0;
-}
-
-/**
- * ubifs_leb_change - atomic LEB change.
- * @c: UBIFS file-system description object
- * @lnum: LEB number to write
- * @buf: buffer to write from
- * @len: length to write
- * @dtype: data type
- *
- * This function returns %0 on success and a negative error code on failure.
- */
-static inline int ubifs_leb_change(const struct ubifs_info *c, int lnum,
- const void *buf, int len, int dtype)
-{
- int err;
-
- ubifs_assert(!c->ro_media && !c->ro_mount);
- if (c->ro_error)
- return -EROFS;
- err = ubi_leb_change(c->ubi, lnum, buf, len, dtype);
- if (err) {
- ubifs_err("changing %d bytes in LEB %d, error %d",
- len, lnum, err);
- return err;
- }
-
- return 0;
-}
-
-/**
* ubifs_encode_dev - encode device node IDs.
* @dev: UBIFS device node information
* @rdev: device IDs to encode
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index a5422fffbd6..c542c73cfa3 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -929,7 +929,7 @@ static int dbg_check_orphans(struct ubifs_info *c)
struct check_info ci;
int err;
- if (!(ubifs_chk_flags & UBIFS_CHK_ORPH))
+ if (!dbg_is_chk_orph(c))
return 0;
ci.last_ino = 0;
diff --git a/fs/ubifs/recovery.c b/fs/ubifs/recovery.c
index 783d8e0beb7..af02790d932 100644
--- a/fs/ubifs/recovery.c
+++ b/fs/ubifs/recovery.c
@@ -117,7 +117,7 @@ static int get_master_node(const struct ubifs_info *c, int lnum, void **pbuf,
if (!sbuf)
return -ENOMEM;
- err = ubi_read(c->ubi, lnum, sbuf, 0, c->leb_size);
+ err = ubifs_leb_read(c, lnum, sbuf, 0, c->leb_size, 0);
if (err && err != -EBADMSG)
goto out_free;
@@ -213,10 +213,10 @@ static int write_rcvrd_mst_node(struct ubifs_info *c,
mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY);
ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1);
- err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM);
+ err = ubifs_leb_change(c, lnum, mst, sz, UBI_SHORTTERM);
if (err)
goto out;
- err = ubi_leb_change(c->ubi, lnum + 1, mst, sz, UBI_SHORTTERM);
+ err = ubifs_leb_change(c, lnum + 1, mst, sz, UBI_SHORTTERM);
if (err)
goto out;
out:
@@ -274,7 +274,8 @@ int ubifs_recover_master_node(struct ubifs_info *c)
if (cor1)
goto out_err;
mst = mst1;
- } else if (offs1 == 0 && offs2 + sz >= c->leb_size) {
+ } else if (offs1 == 0 &&
+ c->leb_size - offs2 - sz < sz) {
/* 1st LEB was unmapped and written, 2nd not */
if (cor1)
goto out_err;
@@ -539,8 +540,8 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
int len = ALIGN(endpt, c->min_io_size);
if (start) {
- err = ubi_read(c->ubi, lnum, sleb->buf, 0,
- start);
+ err = ubifs_leb_read(c, lnum, sleb->buf, 0,
+ start, 1);
if (err)
return err;
}
@@ -554,8 +555,8 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
ubifs_pad(c, buf, pad_len);
}
}
- err = ubi_leb_change(c->ubi, lnum, sleb->buf, len,
- UBI_UNKNOWN);
+ err = ubifs_leb_change(c, lnum, sleb->buf, len,
+ UBI_UNKNOWN);
if (err)
return err;
}
@@ -819,7 +820,8 @@ static int get_cs_sqnum(struct ubifs_info *c, int lnum, int offs,
return -ENOMEM;
if (c->leb_size - offs < UBIFS_CS_NODE_SZ)
goto out_err;
- err = ubi_read(c->ubi, lnum, (void *)cs_node, offs, UBIFS_CS_NODE_SZ);
+ err = ubifs_leb_read(c, lnum, (void *)cs_node, offs,
+ UBIFS_CS_NODE_SZ, 0);
if (err && err != -EBADMSG)
goto out_free;
ret = ubifs_scan_a_node(c, cs_node, UBIFS_CS_NODE_SZ, lnum, offs, 0);
@@ -919,8 +921,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
*
* This function returns %0 on success and a negative error code on failure.
*/
-static int recover_head(const struct ubifs_info *c, int lnum, int offs,
- void *sbuf)
+static int recover_head(struct ubifs_info *c, int lnum, int offs, void *sbuf)
{
int len = c->max_write_size, err;
@@ -931,15 +932,15 @@ static int recover_head(const struct ubifs_info *c, int lnum, int offs,
return 0;
/* Read at the head location and check it is empty flash */
- err = ubi_read(c->ubi, lnum, sbuf, offs, len);
+ err = ubifs_leb_read(c, lnum, sbuf, offs, len, 1);
if (err || !is_empty(sbuf, len)) {
dbg_rcvry("cleaning head at %d:%d", lnum, offs);
if (offs == 0)
return ubifs_leb_unmap(c, lnum);
- err = ubi_read(c->ubi, lnum, sbuf, 0, offs);
+ err = ubifs_leb_read(c, lnum, sbuf, 0, offs, 1);
if (err)
return err;
- return ubi_leb_change(c->ubi, lnum, sbuf, offs, UBI_UNKNOWN);
+ return ubifs_leb_change(c, lnum, sbuf, offs, UBI_UNKNOWN);
}
return 0;
@@ -962,7 +963,7 @@ static int recover_head(const struct ubifs_info *c, int lnum, int offs,
*
* This function returns %0 on success and a negative error code on failure.
*/
-int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf)
+int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf)
{
int err;
@@ -993,7 +994,7 @@ int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf)
*
* This function returns %0 on success and a negative error code on failure.
*/
-static int clean_an_unclean_leb(const struct ubifs_info *c,
+static int clean_an_unclean_leb(struct ubifs_info *c,
struct ubifs_unclean_leb *ucleb, void *sbuf)
{
int err, lnum = ucleb->lnum, offs = 0, len = ucleb->endpt, quiet = 1;
@@ -1009,7 +1010,7 @@ static int clean_an_unclean_leb(const struct ubifs_info *c,
return 0;
}
- err = ubi_read(c->ubi, lnum, buf, offs, len);
+ err = ubifs_leb_read(c, lnum, buf, offs, len, 0);
if (err && err != -EBADMSG)
return err;
@@ -1069,7 +1070,7 @@ static int clean_an_unclean_leb(const struct ubifs_info *c,
}
/* Write back the LEB atomically */
- err = ubi_leb_change(c->ubi, lnum, sbuf, len, UBI_UNKNOWN);
+ err = ubifs_leb_change(c, lnum, sbuf, len, UBI_UNKNOWN);
if (err)
return err;
@@ -1089,7 +1090,7 @@ static int clean_an_unclean_leb(const struct ubifs_info *c,
*
* This function returns %0 on success and a negative error code on failure.
*/
-int ubifs_clean_lebs(const struct ubifs_info *c, void *sbuf)
+int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf)
{
dbg_rcvry("recovery");
while (!list_empty(&c->unclean_leb_list)) {
@@ -1454,7 +1455,7 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
if (i_size >= e->d_size)
return 0;
/* Read the LEB */
- err = ubi_read(c->ubi, lnum, c->sbuf, 0, c->leb_size);
+ err = ubifs_leb_read(c, lnum, c->sbuf, 0, c->leb_size, 1);
if (err)
goto out;
/* Change the size field and recalculate the CRC */
@@ -1470,7 +1471,7 @@ static int fix_size_in_place(struct ubifs_info *c, struct size_entry *e)
len -= 1;
len = ALIGN(len + 1, c->min_io_size);
/* Atomically write the fixed LEB back again */
- err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN);
+ err = ubifs_leb_change(c, lnum, c->sbuf, len, UBI_UNKNOWN);
if (err)
goto out;
dbg_rcvry("inode %lu at %d:%d size %lld -> %lld",
diff --git a/fs/ubifs/replay.c b/fs/ubifs/replay.c
index 5e97161ce4d..ccabaf1164b 100644
--- a/fs/ubifs/replay.c
+++ b/fs/ubifs/replay.c
@@ -523,8 +523,7 @@ static int is_last_bud(struct ubifs_info *c, struct ubifs_bud *bud)
if (!list_is_last(&next->list, &jh->buds_list))
return 0;
- err = ubi_read(c->ubi, next->lnum, (char *)&data,
- next->start, 4);
+ err = ubifs_leb_read(c, next->lnum, (char *)&data, next->start, 4, 1);
if (err)
return 0;
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index c606f010e8d..93d938ad3d2 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -674,15 +674,15 @@ static int fixup_leb(struct ubifs_info *c, int lnum, int len)
if (len == 0) {
dbg_mnt("unmap empty LEB %d", lnum);
- return ubi_leb_unmap(c->ubi, lnum);
+ return ubifs_leb_unmap(c, lnum);
}
dbg_mnt("fixup LEB %d, data len %d", lnum, len);
- err = ubi_read(c->ubi, lnum, c->sbuf, 0, len);
+ err = ubifs_leb_read(c, lnum, c->sbuf, 0, len, 1);
if (err)
return err;
- return ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN);
+ return ubifs_leb_change(c, lnum, c->sbuf, len, UBI_UNKNOWN);
}
/**
diff --git a/fs/ubifs/scan.c b/fs/ubifs/scan.c
index 36216b46f77..37383e8011b 100644
--- a/fs/ubifs/scan.c
+++ b/fs/ubifs/scan.c
@@ -148,7 +148,7 @@ struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum,
INIT_LIST_HEAD(&sleb->nodes);
sleb->buf = sbuf;
- err = ubi_read(c->ubi, lnum, sbuf + offs, offs, c->leb_size - offs);
+ err = ubifs_leb_read(c, lnum, sbuf + offs, offs, c->leb_size - offs, 0);
if (err && err != -EBADMSG) {
ubifs_err("cannot read %d bytes from LEB %d:%d,"
" error %d", c->leb_size - offs, lnum, offs, err);
@@ -240,7 +240,7 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
int len;
ubifs_err("corruption at LEB %d:%d", lnum, offs);
- if (dbg_failure_mode)
+ if (dbg_is_tst_rcvry(c))
return;
len = c->leb_size - offs;
if (len > 8192)
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 529be058202..b28121278d4 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -85,7 +85,7 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode)
if (ui->data_len < 0 || ui->data_len > UBIFS_MAX_INO_DATA)
return 4;
- if (ui->xattr && (inode->i_mode & S_IFMT) != S_IFREG)
+ if (ui->xattr && !S_ISREG(inode->i_mode))
return 5;
if (!ubifs_compr_present(ui->compr_type)) {
@@ -94,7 +94,7 @@ static int validate_inode(struct ubifs_info *c, const struct inode *inode)
ubifs_compr_name(ui->compr_type));
}
- err = dbg_check_dir_size(c, inode);
+ err = dbg_check_dir(c, inode);
return err;
}
@@ -914,7 +914,7 @@ static int check_volume_empty(struct ubifs_info *c)
c->empty = 1;
for (lnum = 0; lnum < c->leb_cnt; lnum++) {
- err = ubi_is_mapped(c->ubi, lnum);
+ err = ubifs_is_mapped(c, lnum);
if (unlikely(err < 0))
return err;
if (err == 1) {
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 91b4213dde8..06673864768 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -223,7 +223,7 @@ static struct ubifs_znode *copy_znode(struct ubifs_info *c,
__set_bit(DIRTY_ZNODE, &zn->flags);
__clear_bit(COW_ZNODE, &zn->flags);
- ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags));
+ ubifs_assert(!ubifs_zn_obsolete(znode));
__set_bit(OBSOLETE_ZNODE, &znode->flags);
if (znode->level != 0) {
@@ -271,7 +271,7 @@ static struct ubifs_znode *dirty_cow_znode(struct ubifs_info *c,
struct ubifs_znode *zn;
int err;
- if (!test_bit(COW_ZNODE, &znode->flags)) {
+ if (!ubifs_zn_cow(znode)) {
/* znode is not being committed */
if (!test_and_set_bit(DIRTY_ZNODE, &znode->flags)) {
atomic_long_inc(&c->dirty_zn_cnt);
@@ -462,7 +462,7 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type,
dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len);
- err = ubi_read(c->ubi, lnum, buf, offs, len);
+ err = ubifs_leb_read(c, lnum, buf, offs, len, 1);
if (err) {
ubifs_err("cannot read node type %d from LEB %d:%d, error %d",
type, lnum, offs, err);
@@ -1666,7 +1666,7 @@ static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
if (!overlap) {
/* We may safely unlock the write-buffer and read the data */
spin_unlock(&wbuf->lock);
- return ubi_read(c->ubi, lnum, buf, offs, len);
+ return ubifs_leb_read(c, lnum, buf, offs, len, 0);
}
/* Don't read under wbuf */
@@ -1680,7 +1680,7 @@ static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
if (rlen > 0)
/* Read everything that goes before write-buffer */
- return ubi_read(c->ubi, lnum, buf, offs, rlen);
+ return ubifs_leb_read(c, lnum, buf, offs, rlen, 0);
return 0;
}
@@ -1767,7 +1767,7 @@ int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
if (wbuf)
err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
else
- err = ubi_read(c->ubi, lnum, bu->buf, offs, len);
+ err = ubifs_leb_read(c, lnum, bu->buf, offs, len, 0);
/* Check for a race with GC */
if (maybe_leb_gced(c, lnum, bu->gc_seq))
@@ -2423,7 +2423,7 @@ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
*/
do {
- ubifs_assert(!test_bit(OBSOLETE_ZNODE, &znode->flags));
+ ubifs_assert(!ubifs_zn_obsolete(znode));
ubifs_assert(ubifs_zn_dirty(znode));
zp = znode->parent;
@@ -2479,9 +2479,8 @@ static int tnc_delete(struct ubifs_info *c, struct ubifs_znode *znode, int n)
c->zroot.offs = zbr->offs;
c->zroot.len = zbr->len;
c->zroot.znode = znode;
- ubifs_assert(!test_bit(OBSOLETE_ZNODE,
- &zp->flags));
- ubifs_assert(test_bit(DIRTY_ZNODE, &zp->flags));
+ ubifs_assert(!ubifs_zn_obsolete(zp));
+ ubifs_assert(ubifs_zn_dirty(zp));
atomic_long_dec(&c->dirty_zn_cnt);
if (zp->cnext) {
@@ -2865,7 +2864,7 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
struct ubifs_znode *znode = cnext;
cnext = cnext->cnext;
- if (test_bit(OBSOLETE_ZNODE, &znode->flags))
+ if (ubifs_zn_obsolete(znode))
kfree(znode);
} while (cnext && cnext != c->cnext);
}
@@ -3301,7 +3300,7 @@ int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
if (!S_ISREG(inode->i_mode))
return 0;
- if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
+ if (!dbg_is_chk_gen(c))
return 0;
block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
@@ -3337,9 +3336,10 @@ out_dump:
ubifs_err("inode %lu has size %lld, but there are data at offset %lld "
"(data key %s)", (unsigned long)inode->i_ino, size,
((loff_t)block) << UBIFS_BLOCK_SHIFT, DBGKEY(key));
+ mutex_unlock(&c->tnc_mutex);
dbg_dump_inode(c, inode);
dbg_dump_stack();
- err = -EINVAL;
+ return -EINVAL;
out_unlock:
mutex_unlock(&c->tnc_mutex);
diff --git a/fs/ubifs/tnc_commit.c b/fs/ubifs/tnc_commit.c
index 41920f357bb..4c15f07a8bb 100644
--- a/fs/ubifs/tnc_commit.c
+++ b/fs/ubifs/tnc_commit.c
@@ -22,6 +22,7 @@
/* This file implements TNC functions for committing */
+#include <linux/random.h>
#include "ubifs.h"
/**
@@ -87,8 +88,12 @@ static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
atomic_long_dec(&c->dirty_zn_cnt);
ubifs_assert(ubifs_zn_dirty(znode));
- ubifs_assert(test_bit(COW_ZNODE, &znode->flags));
+ ubifs_assert(ubifs_zn_cow(znode));
+ /*
+ * Note, unlike 'write_index()' we do not add memory barriers here
+ * because this function is called with @c->tnc_mutex locked.
+ */
__clear_bit(DIRTY_ZNODE, &znode->flags);
__clear_bit(COW_ZNODE, &znode->flags);
@@ -377,7 +382,7 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
c->gap_lebs = NULL;
return err;
}
- if (dbg_force_in_the_gaps_enabled()) {
+ if (!dbg_is_chk_index(c)) {
/*
* Do not print scary warnings if the debugging
* option which forces in-the-gaps is enabled.
@@ -491,25 +496,6 @@ static int layout_in_empty_space(struct ubifs_info *c)
else
next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
- if (c->min_io_size == 1) {
- buf_offs += ALIGN(len, 8);
- if (next_len) {
- if (buf_offs + next_len <= c->leb_size)
- continue;
- err = ubifs_update_one_lp(c, lnum, 0,
- c->leb_size - buf_offs, 0, 0);
- if (err)
- return err;
- lnum = -1;
- continue;
- }
- err = ubifs_update_one_lp(c, lnum,
- c->leb_size - buf_offs, 0, 0, 0);
- if (err)
- return err;
- break;
- }
-
/* Update buffer positions */
wlen = used + len;
used += ALIGN(len, 8);
@@ -658,7 +644,7 @@ static int get_znodes_to_commit(struct ubifs_info *c)
}
cnt += 1;
while (1) {
- ubifs_assert(!test_bit(COW_ZNODE, &znode->flags));
+ ubifs_assert(!ubifs_zn_cow(znode));
__set_bit(COW_ZNODE, &znode->flags);
znode->alt = 0;
cnext = find_next_dirty(znode);
@@ -704,7 +690,7 @@ static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
c->ilebs[c->ileb_cnt++] = lnum;
dbg_cmt("LEB %d", lnum);
}
- if (dbg_force_in_the_gaps())
+ if (dbg_is_chk_index(c) && !(random32() & 7))
return -ENOSPC;
return 0;
}
@@ -830,7 +816,7 @@ static int write_index(struct ubifs_info *c)
struct ubifs_idx_node *idx;
struct ubifs_znode *znode, *cnext;
int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
- int avail, wlen, err, lnum_pos = 0;
+ int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
cnext = c->enext;
if (!cnext)
@@ -907,7 +893,7 @@ static int write_index(struct ubifs_info *c)
cnext = znode->cnext;
ubifs_assert(ubifs_zn_dirty(znode));
- ubifs_assert(test_bit(COW_ZNODE, &znode->flags));
+ ubifs_assert(ubifs_zn_cow(znode));
/*
* It is important that other threads should see %DIRTY_ZNODE
@@ -922,6 +908,28 @@ static int write_index(struct ubifs_info *c)
clear_bit(COW_ZNODE, &znode->flags);
smp_mb__after_clear_bit();
+ /*
+ * We have marked the znode as clean but have not updated the
+ * @c->clean_zn_cnt counter. If this znode becomes dirty again
+ * before 'free_obsolete_znodes()' is called, then
+ * @c->clean_zn_cnt will be decremented before it gets
+ * incremented (resulting in 2 decrements for the same znode).
+ * This means that @c->clean_zn_cnt may become negative for a
+ * while.
+ *
+ * Q: why we cannot increment @c->clean_zn_cnt?
+ * A: because we do not have the @c->tnc_mutex locked, and the
+ * following code would be racy and buggy:
+ *
+ * if (!ubifs_zn_obsolete(znode)) {
+ * atomic_long_inc(&c->clean_zn_cnt);
+ * atomic_long_inc(&ubifs_clean_zn_cnt);
+ * }
+ *
+ * Thus, we just delay the @c->clean_zn_cnt update until we
+ * have the mutex locked.
+ */
+
/* Do not access znode from this point on */
/* Update buffer positions */
@@ -938,65 +946,38 @@ static int write_index(struct ubifs_info *c)
else
next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
- if (c->min_io_size == 1) {
- /*
- * Write the prepared index node immediately if there is
- * no minimum IO size
- */
- err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs,
- wlen, UBI_SHORTTERM);
- if (err)
- return err;
- buf_offs += ALIGN(wlen, 8);
- if (next_len) {
- used = 0;
- avail = buf_len;
- if (buf_offs + next_len > c->leb_size) {
- err = ubifs_update_one_lp(c, lnum,
- LPROPS_NC, 0, 0, LPROPS_TAKEN);
- if (err)
- return err;
- lnum = -1;
- }
+ nxt_offs = buf_offs + used + next_len;
+ if (next_len && nxt_offs <= c->leb_size) {
+ if (avail > 0)
continue;
- }
+ else
+ blen = buf_len;
} else {
- int blen, nxt_offs = buf_offs + used + next_len;
-
- if (next_len && nxt_offs <= c->leb_size) {
- if (avail > 0)
- continue;
- else
- blen = buf_len;
- } else {
- wlen = ALIGN(wlen, 8);
- blen = ALIGN(wlen, c->min_io_size);
- ubifs_pad(c, c->cbuf + wlen, blen - wlen);
- }
- /*
- * The buffer is full or there are no more znodes
- * to do
- */
- err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs,
- blen, UBI_SHORTTERM);
- if (err)
- return err;
- buf_offs += blen;
- if (next_len) {
- if (nxt_offs > c->leb_size) {
- err = ubifs_update_one_lp(c, lnum,
- LPROPS_NC, 0, 0, LPROPS_TAKEN);
- if (err)
- return err;
- lnum = -1;
- }
- used -= blen;
- if (used < 0)
- used = 0;
- avail = buf_len - used;
- memmove(c->cbuf, c->cbuf + blen, used);
- continue;
+ wlen = ALIGN(wlen, 8);
+ blen = ALIGN(wlen, c->min_io_size);
+ ubifs_pad(c, c->cbuf + wlen, blen - wlen);
+ }
+
+ /* The buffer is full or there are no more znodes to do */
+ err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen,
+ UBI_SHORTTERM);
+ if (err)
+ return err;
+ buf_offs += blen;
+ if (next_len) {
+ if (nxt_offs > c->leb_size) {
+ err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
+ 0, LPROPS_TAKEN);
+ if (err)
+ return err;
+ lnum = -1;
}
+ used -= blen;
+ if (used < 0)
+ used = 0;
+ avail = buf_len - used;
+ memmove(c->cbuf, c->cbuf + blen, used);
+ continue;
}
break;
}
@@ -1029,7 +1010,7 @@ static void free_obsolete_znodes(struct ubifs_info *c)
do {
znode = cnext;
cnext = znode->cnext;
- if (test_bit(OBSOLETE_ZNODE, &znode->flags))
+ if (ubifs_zn_obsolete(znode))
kfree(znode);
else {
znode->cnext = NULL;
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index f79983d6f86..702b79258e3 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -230,14 +230,14 @@ enum {
* LPT cnode flag bits.
*
* DIRTY_CNODE: cnode is dirty
- * COW_CNODE: cnode is being committed and must be copied before writing
* OBSOLETE_CNODE: cnode is being committed and has been copied (or deleted),
- * so it can (and must) be freed when the commit is finished
+ * so it can (and must) be freed when the commit is finished
+ * COW_CNODE: cnode is being committed and must be copied before writing
*/
enum {
DIRTY_CNODE = 0,
- COW_CNODE = 1,
- OBSOLETE_CNODE = 2,
+ OBSOLETE_CNODE = 1,
+ COW_CNODE = 2,
};
/*
@@ -1468,6 +1468,15 @@ extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
/* io.c */
void ubifs_ro_mode(struct ubifs_info *c, int err);
+int ubifs_leb_read(const struct ubifs_info *c, int lnum, void *buf, int offs,
+ int len, int even_ebadmsg);
+int ubifs_leb_write(struct ubifs_info *c, int lnum, const void *buf, int offs,
+ int len, int dtype);
+int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len,
+ int dtype);
+int ubifs_leb_unmap(struct ubifs_info *c, int lnum);
+int ubifs_leb_map(struct ubifs_info *c, int lnum, int dtype);
+int ubifs_is_mapped(const struct ubifs_info *c, int lnum);
int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len);
int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
int dtype);
@@ -1747,8 +1756,8 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
int offs, void *sbuf, int jhead);
struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
int offs, void *sbuf);
-int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf);
-int ubifs_clean_lebs(const struct ubifs_info *c, void *sbuf);
+int ubifs_recover_inl_heads(struct ubifs_info *c, void *sbuf);
+int ubifs_clean_lebs(struct ubifs_info *c, void *sbuf);
int ubifs_rcvry_gc_commit(struct ubifs_info *c);
int ubifs_recover_size_accum(struct ubifs_info *c, union ubifs_key *key,
int deletion, loff_t new_size);
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 284a7c89697..75bb316529d 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -88,8 +88,6 @@ xfs-y += xfs_alloc.o \
xfs_vnodeops.o \
xfs_rw.o
-xfs-$(CONFIG_XFS_TRACE) += xfs_btree_trace.o
-
# Objects in linux/
xfs-y += $(addprefix $(XFS_LINUX)/, \
kmem.o \
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 39f4f809bb6..115ac691953 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -264,7 +264,7 @@ xfs_set_mode(struct inode *inode, mode_t mode)
iattr.ia_mode = mode;
iattr.ia_ctime = current_fs_time(inode->i_sb);
- error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
+ error = -xfs_setattr_nonsize(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
}
return error;
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 79ce38be15a..26384fe3f26 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -181,6 +181,7 @@ xfs_setfilesize(
isize = xfs_ioend_new_eof(ioend);
if (isize) {
+ trace_xfs_setfilesize(ip, ioend->io_offset, ioend->io_size);
ip->i_d.di_size = isize;
xfs_mark_inode_dirty(ip);
}
@@ -894,11 +895,6 @@ out_invalidate:
* For unwritten space on the page we need to start the conversion to
* regular allocated space.
* For any other dirty buffer heads on the page we should flush them.
- *
- * If we detect that a transaction would be required to flush the page, we
- * have to check the process flags first, if we are already in a transaction
- * or disk I/O during allocations is off, we need to fail the writepage and
- * redirty the page.
*/
STATIC int
xfs_vm_writepage(
@@ -906,7 +902,6 @@ xfs_vm_writepage(
struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
- int delalloc, unwritten;
struct buffer_head *bh, *head;
struct xfs_bmbt_irec imap;
xfs_ioend_t *ioend = NULL, *iohead = NULL;
@@ -938,15 +933,10 @@ xfs_vm_writepage(
goto redirty;
/*
- * We need a transaction if there are delalloc or unwritten buffers
- * on the page.
- *
- * If we need a transaction and the process flags say we are already
- * in a transaction, or no IO is allowed then mark the page dirty
- * again and leave the page as is.
+ * Given that we do not allow direct reclaim to call us, we should
+ * never be called while in a filesystem transaction.
*/
- xfs_count_page_state(page, &delalloc, &unwritten);
- if ((current->flags & PF_FSTRANS) && (delalloc || unwritten))
+ if (WARN_ON(current->flags & PF_FSTRANS))
goto redirty;
/* Is this page beyond the end of the file? */
@@ -970,7 +960,7 @@ xfs_vm_writepage(
offset = page_offset(page);
type = IO_OVERWRITE;
- if (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking)
+ if (wbc->sync_mode == WB_SYNC_NONE)
nonblocking = 1;
do {
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 5e68099db2a..b2b41198559 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -499,16 +499,14 @@ found:
spin_unlock(&pag->pag_buf_lock);
xfs_perag_put(pag);
- if (xfs_buf_cond_lock(bp)) {
- /* failed, so wait for the lock if requested. */
- if (!(flags & XBF_TRYLOCK)) {
- xfs_buf_lock(bp);
- XFS_STATS_INC(xb_get_locked_waited);
- } else {
+ if (!xfs_buf_trylock(bp)) {
+ if (flags & XBF_TRYLOCK) {
xfs_buf_rele(bp);
XFS_STATS_INC(xb_busy_locked);
return NULL;
}
+ xfs_buf_lock(bp);
+ XFS_STATS_INC(xb_get_locked_waited);
}
/*
@@ -594,10 +592,8 @@ _xfs_buf_read(
ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
- bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
- XBF_READ_AHEAD | _XBF_RUN_QUEUES);
- bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
- XBF_READ_AHEAD | _XBF_RUN_QUEUES);
+ bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
+ bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
status = xfs_buf_iorequest(bp);
if (status || XFS_BUF_ISERROR(bp) || (flags & XBF_ASYNC))
@@ -681,7 +677,6 @@ xfs_buf_read_uncached(
return NULL;
/* set up the buffer for a read IO */
- xfs_buf_lock(bp);
XFS_BUF_SET_ADDR(bp, daddr);
XFS_BUF_READ(bp);
XFS_BUF_BUSY(bp);
@@ -816,8 +811,6 @@ xfs_buf_get_uncached(
goto fail_free_mem;
}
- xfs_buf_unlock(bp);
-
trace_xfs_buf_get_uncached(bp, _RET_IP_);
return bp;
@@ -896,8 +889,8 @@ xfs_buf_rele(
* to push on stale inode buffers.
*/
int
-xfs_buf_cond_lock(
- xfs_buf_t *bp)
+xfs_buf_trylock(
+ struct xfs_buf *bp)
{
int locked;
@@ -907,15 +900,8 @@ xfs_buf_cond_lock(
else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
- trace_xfs_buf_cond_lock(bp, _RET_IP_);
- return locked ? 0 : -EBUSY;
-}
-
-int
-xfs_buf_lock_value(
- xfs_buf_t *bp)
-{
- return bp->b_sema.count;
+ trace_xfs_buf_trylock(bp, _RET_IP_);
+ return locked;
}
/*
@@ -929,7 +915,7 @@ xfs_buf_lock_value(
*/
void
xfs_buf_lock(
- xfs_buf_t *bp)
+ struct xfs_buf *bp)
{
trace_xfs_buf_lock(bp, _RET_IP_);
@@ -950,7 +936,7 @@ xfs_buf_lock(
*/
void
xfs_buf_unlock(
- xfs_buf_t *bp)
+ struct xfs_buf *bp)
{
if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
atomic_inc(&bp->b_hold);
@@ -1121,7 +1107,7 @@ xfs_bioerror_relse(
XFS_BUF_UNDELAYWRITE(bp);
XFS_BUF_DONE(bp);
XFS_BUF_STALE(bp);
- XFS_BUF_CLR_IODONE_FUNC(bp);
+ bp->b_iodone = NULL;
if (!(fl & XBF_ASYNC)) {
/*
* Mark b_error and B_ERROR _both_.
@@ -1223,23 +1209,21 @@ _xfs_buf_ioapply(
total_nr_pages = bp->b_page_count;
map_i = 0;
- if (bp->b_flags & XBF_ORDERED) {
- ASSERT(!(bp->b_flags & XBF_READ));
- rw = WRITE_FLUSH_FUA;
- } else if (bp->b_flags & XBF_LOG_BUFFER) {
- ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
- bp->b_flags &= ~_XBF_RUN_QUEUES;
- rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
- } else if (bp->b_flags & _XBF_RUN_QUEUES) {
- ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
- bp->b_flags &= ~_XBF_RUN_QUEUES;
- rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
+ if (bp->b_flags & XBF_WRITE) {
+ if (bp->b_flags & XBF_SYNCIO)
+ rw = WRITE_SYNC;
+ else
+ rw = WRITE;
+ if (bp->b_flags & XBF_FUA)
+ rw |= REQ_FUA;
+ if (bp->b_flags & XBF_FLUSH)
+ rw |= REQ_FLUSH;
+ } else if (bp->b_flags & XBF_READ_AHEAD) {
+ rw = READA;
} else {
- rw = (bp->b_flags & XBF_WRITE) ? WRITE :
- (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
+ rw = READ;
}
-
next_chunk:
atomic_inc(&bp->b_io_remaining);
nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
@@ -1694,15 +1678,14 @@ xfs_buf_delwri_split(
list_for_each_entry_safe(bp, n, dwq, b_list) {
ASSERT(bp->b_flags & XBF_DELWRI);
- if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
+ if (!XFS_BUF_ISPINNED(bp) && xfs_buf_trylock(bp)) {
if (!force &&
time_before(jiffies, bp->b_queuetime + age)) {
xfs_buf_unlock(bp);
break;
}
- bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
- _XBF_RUN_QUEUES);
+ bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
bp->b_flags |= XBF_WRITE;
list_move_tail(&bp->b_list, list);
trace_xfs_buf_delwri_split(bp, _RET_IP_);
@@ -1738,14 +1721,6 @@ xfs_buf_cmp(
return 0;
}
-void
-xfs_buf_delwri_sort(
- xfs_buftarg_t *target,
- struct list_head *list)
-{
- list_sort(NULL, list, xfs_buf_cmp);
-}
-
STATIC int
xfsbufd(
void *data)
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 50a7d5fb3b7..6a83b46b4bc 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -46,43 +46,46 @@ typedef enum {
#define XBF_READ (1 << 0) /* buffer intended for reading from device */
#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
-#define XBF_MAPPED (1 << 2) /* buffer mapped (b_addr valid) */
+#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
+#define XBF_MAPPED (1 << 3) /* buffer mapped (b_addr valid) */
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_DELWRI (1 << 6) /* buffer has dirty pages */
#define XBF_STALE (1 << 7) /* buffer has been staled, do not find it */
-#define XBF_ORDERED (1 << 11)/* use ordered writes */
-#define XBF_READ_AHEAD (1 << 12)/* asynchronous read-ahead */
-#define XBF_LOG_BUFFER (1 << 13)/* this is a buffer used for the log */
+
+/* I/O hints for the BIO layer */
+#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
+#define XBF_FUA (1 << 11)/* force cache write through mode */
+#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
/* flags used only as arguments to access routines */
-#define XBF_LOCK (1 << 14)/* lock requested */
-#define XBF_TRYLOCK (1 << 15)/* lock requested, but do not wait */
-#define XBF_DONT_BLOCK (1 << 16)/* do not block in current thread */
+#define XBF_LOCK (1 << 15)/* lock requested */
+#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
+#define XBF_DONT_BLOCK (1 << 17)/* do not block in current thread */
/* flags used only internally */
-#define _XBF_PAGES (1 << 18)/* backed by refcounted pages */
-#define _XBF_RUN_QUEUES (1 << 19)/* run block device task queue */
-#define _XBF_KMEM (1 << 20)/* backed by heap memory */
-#define _XBF_DELWRI_Q (1 << 21)/* buffer on delwri queue */
+#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
+#define _XBF_KMEM (1 << 21)/* backed by heap memory */
+#define _XBF_DELWRI_Q (1 << 22)/* buffer on delwri queue */
typedef unsigned int xfs_buf_flags_t;
#define XFS_BUF_FLAGS \
{ XBF_READ, "READ" }, \
{ XBF_WRITE, "WRITE" }, \
+ { XBF_READ_AHEAD, "READ_AHEAD" }, \
{ XBF_MAPPED, "MAPPED" }, \
{ XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \
{ XBF_DELWRI, "DELWRI" }, \
{ XBF_STALE, "STALE" }, \
- { XBF_ORDERED, "ORDERED" }, \
- { XBF_READ_AHEAD, "READ_AHEAD" }, \
+ { XBF_SYNCIO, "SYNCIO" }, \
+ { XBF_FUA, "FUA" }, \
+ { XBF_FLUSH, "FLUSH" }, \
{ XBF_LOCK, "LOCK" }, /* should never be set */\
{ XBF_TRYLOCK, "TRYLOCK" }, /* ditto */\
{ XBF_DONT_BLOCK, "DONT_BLOCK" }, /* ditto */\
{ _XBF_PAGES, "PAGES" }, \
- { _XBF_RUN_QUEUES, "RUN_QUEUES" }, \
{ _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" }
@@ -91,11 +94,6 @@ typedef enum {
XBT_FORCE_FLUSH = 1,
} xfs_buftarg_flags_t;
-typedef struct xfs_bufhash {
- struct list_head bh_list;
- spinlock_t bh_lock;
-} xfs_bufhash_t;
-
typedef struct xfs_buftarg {
dev_t bt_dev;
struct block_device *bt_bdev;
@@ -151,7 +149,7 @@ typedef struct xfs_buf {
xfs_buf_iodone_t b_iodone; /* I/O completion function */
struct completion b_iowait; /* queue for I/O waiters */
void *b_fspriv;
- void *b_fspriv2;
+ struct xfs_trans *b_transp;
struct page **b_pages; /* array of page pointers */
struct page *b_page_array[XB_PAGES]; /* inline pages */
unsigned long b_queuetime; /* time buffer was queued */
@@ -192,10 +190,11 @@ extern void xfs_buf_free(xfs_buf_t *);
extern void xfs_buf_rele(xfs_buf_t *);
/* Locking and Unlocking Buffers */
-extern int xfs_buf_cond_lock(xfs_buf_t *);
-extern int xfs_buf_lock_value(xfs_buf_t *);
+extern int xfs_buf_trylock(xfs_buf_t *);
extern void xfs_buf_lock(xfs_buf_t *);
extern void xfs_buf_unlock(xfs_buf_t *);
+#define xfs_buf_islocked(bp) \
+ ((bp)->b_sema.count <= 0)
/* Buffer Read and Write Routines */
extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp);
@@ -234,8 +233,9 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
-#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
- ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
+#define XFS_BUF_ZEROFLAGS(bp) \
+ ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI| \
+ XBF_SYNCIO|XBF_FUA|XBF_FLUSH))
void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_STALE(bp) xfs_buf_stale(bp);
@@ -267,10 +267,6 @@ void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNASYNC(bp) ((bp)->b_flags &= ~XBF_ASYNC)
#define XFS_BUF_ISASYNC(bp) ((bp)->b_flags & XBF_ASYNC)
-#define XFS_BUF_ORDERED(bp) ((bp)->b_flags |= XBF_ORDERED)
-#define XFS_BUF_UNORDERED(bp) ((bp)->b_flags &= ~XBF_ORDERED)
-#define XFS_BUF_ISORDERED(bp) ((bp)->b_flags & XBF_ORDERED)
-
#define XFS_BUF_HOLD(bp) xfs_buf_hold(bp)
#define XFS_BUF_READ(bp) ((bp)->b_flags |= XBF_READ)
#define XFS_BUF_UNREAD(bp) ((bp)->b_flags &= ~XBF_READ)
@@ -280,14 +276,6 @@ void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNWRITE(bp) ((bp)->b_flags &= ~XBF_WRITE)
#define XFS_BUF_ISWRITE(bp) ((bp)->b_flags & XBF_WRITE)
-#define XFS_BUF_IODONE_FUNC(bp) ((bp)->b_iodone)
-#define XFS_BUF_SET_IODONE_FUNC(bp, func) ((bp)->b_iodone = (func))
-#define XFS_BUF_CLR_IODONE_FUNC(bp) ((bp)->b_iodone = NULL)
-
-#define XFS_BUF_FSPRIVATE(bp, type) ((type)(bp)->b_fspriv)
-#define XFS_BUF_SET_FSPRIVATE(bp, val) ((bp)->b_fspriv = (void*)(val))
-#define XFS_BUF_FSPRIVATE2(bp, type) ((type)(bp)->b_fspriv2)
-#define XFS_BUF_SET_FSPRIVATE2(bp, val) ((bp)->b_fspriv2 = (void*)(val))
#define XFS_BUF_SET_START(bp) do { } while (0)
#define XFS_BUF_PTR(bp) (xfs_caddr_t)((bp)->b_addr)
@@ -313,10 +301,6 @@ xfs_buf_set_ref(
#define XFS_BUF_ISPINNED(bp) atomic_read(&((bp)->b_pin_count))
-#define XFS_BUF_VALUSEMA(bp) xfs_buf_lock_value(bp)
-#define XFS_BUF_CPSEMA(bp) (xfs_buf_cond_lock(bp) == 0)
-#define XFS_BUF_VSEMA(bp) xfs_buf_unlock(bp)
-#define XFS_BUF_PSEMA(bp,x) xfs_buf_lock(bp)
#define XFS_BUF_FINISH_IOWAIT(bp) complete(&bp->b_iowait);
#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index f4f878fc008..75e5d322e48 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -151,14 +151,14 @@ xfs_nfs_get_inode(
* We don't use ESTALE directly down the chain to not
* confuse applications using bulkstat that expect EINVAL.
*/
- if (error == EINVAL)
+ if (error == EINVAL || error == ENOENT)
error = ESTALE;
return ERR_PTR(-error);
}
if (ip->i_d.di_gen != generation) {
IRELE(ip);
- return ERR_PTR(-ENOENT);
+ return ERR_PTR(-ESTALE);
}
return VFS_I(ip);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 7f782af286b..8073f61efb8 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -944,7 +944,7 @@ xfs_file_fallocate(
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = new_size;
- error = -xfs_setattr(ip, &iattr, XFS_ATTR_NOLOCK);
+ error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK);
}
out_unlock:
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index d44d92cd12b..501e4f63054 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -39,6 +39,7 @@
#include "xfs_buf_item.h"
#include "xfs_utils.h"
#include "xfs_vnodeops.h"
+#include "xfs_inode_item.h"
#include "xfs_trace.h"
#include <linux/capability.h>
@@ -497,12 +498,442 @@ xfs_vn_getattr(
return 0;
}
+int
+xfs_setattr_nonsize(
+ struct xfs_inode *ip,
+ struct iattr *iattr,
+ int flags)
+{
+ xfs_mount_t *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
+ int mask = iattr->ia_valid;
+ xfs_trans_t *tp;
+ int error;
+ uid_t uid = 0, iuid = 0;
+ gid_t gid = 0, igid = 0;
+ struct xfs_dquot *udqp = NULL, *gdqp = NULL;
+ struct xfs_dquot *olddquot1 = NULL, *olddquot2 = NULL;
+
+ trace_xfs_setattr(ip);
+
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
+ return XFS_ERROR(EROFS);
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return XFS_ERROR(EIO);
+
+ error = -inode_change_ok(inode, iattr);
+ if (error)
+ return XFS_ERROR(error);
+
+ ASSERT((mask & ATTR_SIZE) == 0);
+
+ /*
+ * If disk quotas is on, we make sure that the dquots do exist on disk,
+ * before we start any other transactions. Trying to do this later
+ * is messy. We don't care to take a readlock to look at the ids
+ * in inode here, because we can't hold it across the trans_reserve.
+ * If the IDs do change before we take the ilock, we're covered
+ * because the i_*dquot fields will get updated anyway.
+ */
+ if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
+ uint qflags = 0;
+
+ if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
+ uid = iattr->ia_uid;
+ qflags |= XFS_QMOPT_UQUOTA;
+ } else {
+ uid = ip->i_d.di_uid;
+ }
+ if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
+ gid = iattr->ia_gid;
+ qflags |= XFS_QMOPT_GQUOTA;
+ } else {
+ gid = ip->i_d.di_gid;
+ }
+
+ /*
+ * We take a reference when we initialize udqp and gdqp,
+ * so it is important that we never blindly double trip on
+ * the same variable. See xfs_create() for an example.
+ */
+ ASSERT(udqp == NULL);
+ ASSERT(gdqp == NULL);
+ error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
+ qflags, &udqp, &gdqp);
+ if (error)
+ return error;
+ }
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+ error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
+ if (error)
+ goto out_dqrele;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ /*
+ * Change file ownership. Must be the owner or privileged.
+ */
+ if (mask & (ATTR_UID|ATTR_GID)) {
+ /*
+ * These IDs could have changed since we last looked at them.
+ * But, we're assured that if the ownership did change
+ * while we didn't have the inode locked, inode's dquot(s)
+ * would have changed also.
+ */
+ iuid = ip->i_d.di_uid;
+ igid = ip->i_d.di_gid;
+ gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
+ uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
+
+ /*
+ * Do a quota reservation only if uid/gid is actually
+ * going to change.
+ */
+ if (XFS_IS_QUOTA_RUNNING(mp) &&
+ ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
+ (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
+ ASSERT(tp);
+ error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
+ capable(CAP_FOWNER) ?
+ XFS_QMOPT_FORCE_RES : 0);
+ if (error) /* out of quota */
+ goto out_trans_cancel;
+ }
+ }
+
+ xfs_trans_ijoin(tp, ip);
+
+ /*
+ * Change file ownership. Must be the owner or privileged.
+ */
+ if (mask & (ATTR_UID|ATTR_GID)) {
+ /*
+ * CAP_FSETID overrides the following restrictions:
+ *
+ * The set-user-ID and set-group-ID bits of a file will be
+ * cleared upon successful return from chown()
+ */
+ if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+ !capable(CAP_FSETID))
+ ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
+
+ /*
+ * Change the ownerships and register quota modifications
+ * in the transaction.
+ */
+ if (iuid != uid) {
+ if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
+ ASSERT(mask & ATTR_UID);
+ ASSERT(udqp);
+ olddquot1 = xfs_qm_vop_chown(tp, ip,
+ &ip->i_udquot, udqp);
+ }
+ ip->i_d.di_uid = uid;
+ inode->i_uid = uid;
+ }
+ if (igid != gid) {
+ if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
+ ASSERT(!XFS_IS_PQUOTA_ON(mp));
+ ASSERT(mask & ATTR_GID);
+ ASSERT(gdqp);
+ olddquot2 = xfs_qm_vop_chown(tp, ip,
+ &ip->i_gdquot, gdqp);
+ }
+ ip->i_d.di_gid = gid;
+ inode->i_gid = gid;
+ }
+ }
+
+ /*
+ * Change file access modes.
+ */
+ if (mask & ATTR_MODE) {
+ umode_t mode = iattr->ia_mode;
+
+ if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
+ mode &= ~S_ISGID;
+
+ ip->i_d.di_mode &= S_IFMT;
+ ip->i_d.di_mode |= mode & ~S_IFMT;
+
+ inode->i_mode &= S_IFMT;
+ inode->i_mode |= mode & ~S_IFMT;
+ }
+
+ /*
+ * Change file access or modified times.
+ */
+ if (mask & ATTR_ATIME) {
+ inode->i_atime = iattr->ia_atime;
+ ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
+ ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
+ ip->i_update_core = 1;
+ }
+ if (mask & ATTR_CTIME) {
+ inode->i_ctime = iattr->ia_ctime;
+ ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
+ ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
+ ip->i_update_core = 1;
+ }
+ if (mask & ATTR_MTIME) {
+ inode->i_mtime = iattr->ia_mtime;
+ ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
+ ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
+ ip->i_update_core = 1;
+ }
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ XFS_STATS_INC(xs_ig_attrchg);
+
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(tp);
+ error = xfs_trans_commit(tp, 0);
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+ /*
+ * Release any dquot(s) the inode had kept before chown.
+ */
+ xfs_qm_dqrele(olddquot1);
+ xfs_qm_dqrele(olddquot2);
+ xfs_qm_dqrele(udqp);
+ xfs_qm_dqrele(gdqp);
+
+ if (error)
+ return XFS_ERROR(error);
+
+ /*
+ * XXX(hch): Updating the ACL entries is not atomic vs the i_mode
+ * update. We could avoid this with linked transactions
+ * and passing down the transaction pointer all the way
+ * to attr_set. No previous user of the generic
+ * Posix ACL code seems to care about this issue either.
+ */
+ if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
+ error = -xfs_acl_chmod(inode);
+ if (error)
+ return XFS_ERROR(error);
+ }
+
+ return 0;
+
+out_trans_cancel:
+ xfs_trans_cancel(tp, 0);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+out_dqrele:
+ xfs_qm_dqrele(udqp);
+ xfs_qm_dqrele(gdqp);
+ return error;
+}
+
+/*
+ * Truncate file. Must have write permission and not be a directory.
+ */
+int
+xfs_setattr_size(
+ struct xfs_inode *ip,
+ struct iattr *iattr,
+ int flags)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
+ int mask = iattr->ia_valid;
+ struct xfs_trans *tp;
+ int error;
+ uint lock_flags;
+ uint commit_flags = 0;
+
+ trace_xfs_setattr(ip);
+
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
+ return XFS_ERROR(EROFS);
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return XFS_ERROR(EIO);
+
+ error = -inode_change_ok(inode, iattr);
+ if (error)
+ return XFS_ERROR(error);
+
+ ASSERT(S_ISREG(ip->i_d.di_mode));
+ ASSERT((mask & (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
+ ATTR_MTIME_SET|ATTR_KILL_SUID|ATTR_KILL_SGID|
+ ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+
+ lock_flags = XFS_ILOCK_EXCL;
+ if (!(flags & XFS_ATTR_NOLOCK))
+ lock_flags |= XFS_IOLOCK_EXCL;
+ xfs_ilock(ip, lock_flags);
+
+ /*
+ * Short circuit the truncate case for zero length files.
+ */
+ if (iattr->ia_size == 0 &&
+ ip->i_size == 0 && ip->i_d.di_nextents == 0) {
+ if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
+ goto out_unlock;
+
+ /*
+ * Use the regular setattr path to update the timestamps.
+ */
+ xfs_iunlock(ip, lock_flags);
+ iattr->ia_valid &= ~ATTR_SIZE;
+ return xfs_setattr_nonsize(ip, iattr, 0);
+ }
+
+ /*
+ * Make sure that the dquots are attached to the inode.
+ */
+ error = xfs_qm_dqattach_locked(ip, 0);
+ if (error)
+ goto out_unlock;
+
+ /*
+ * Now we can make the changes. Before we join the inode to the
+ * transaction, take care of the part of the truncation that must be
+ * done without the inode lock. This needs to be done before joining
+ * the inode to the transaction, because the inode cannot be unlocked
+ * once it is a part of the transaction.
+ */
+ if (iattr->ia_size > ip->i_size) {
+ /*
+ * Do the first part of growing a file: zero any data in the
+ * last block that is beyond the old EOF. We need to do this
+ * before the inode is joined to the transaction to modify
+ * i_size.
+ */
+ error = xfs_zero_eof(ip, iattr->ia_size, ip->i_size);
+ if (error)
+ goto out_unlock;
+ }
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ lock_flags &= ~XFS_ILOCK_EXCL;
+
+ /*
+ * We are going to log the inode size change in this transaction so
+ * any previous writes that are beyond the on disk EOF and the new
+ * EOF that have not been written out need to be written here. If we
+ * do not write the data out, we expose ourselves to the null files
+ * problem.
+ *
+ * Only flush from the on disk size to the smaller of the in memory
+ * file size or the new size as that's the range we really care about
+ * here and prevents waiting for other data not within the range we
+ * care about here.
+ */
+ if (ip->i_size != ip->i_d.di_size && iattr->ia_size > ip->i_d.di_size) {
+ error = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size,
+ XBF_ASYNC, FI_NONE);
+ if (error)
+ goto out_unlock;
+ }
+
+ /*
+ * Wait for all I/O to complete.
+ */
+ xfs_ioend_wait(ip);
+
+ error = -block_truncate_page(inode->i_mapping, iattr->ia_size,
+ xfs_get_blocks);
+ if (error)
+ goto out_unlock;
+
+ tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
+ error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
+ XFS_TRANS_PERM_LOG_RES,
+ XFS_ITRUNCATE_LOG_COUNT);
+ if (error)
+ goto out_trans_cancel;
+
+ truncate_setsize(inode, iattr->ia_size);
+
+ commit_flags = XFS_TRANS_RELEASE_LOG_RES;
+ lock_flags |= XFS_ILOCK_EXCL;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ xfs_trans_ijoin(tp, ip);
+
+ /*
+ * Only change the c/mtime if we are changing the size or we are
+ * explicitly asked to change it. This handles the semantic difference
+ * between truncate() and ftruncate() as implemented in the VFS.
+ *
+ * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
+ * special case where we need to update the times despite not having
+ * these flags set. For all other operations the VFS set these flags
+ * explicitly if it wants a timestamp update.
+ */
+ if (iattr->ia_size != ip->i_size &&
+ (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
+ iattr->ia_ctime = iattr->ia_mtime =
+ current_fs_time(inode->i_sb);
+ mask |= ATTR_CTIME | ATTR_MTIME;
+ }
+
+ if (iattr->ia_size > ip->i_size) {
+ ip->i_d.di_size = iattr->ia_size;
+ ip->i_size = iattr->ia_size;
+ } else if (iattr->ia_size <= ip->i_size ||
+ (iattr->ia_size == 0 && ip->i_d.di_nextents)) {
+ error = xfs_itruncate_data(&tp, ip, iattr->ia_size);
+ if (error)
+ goto out_trans_abort;
+
+ /*
+ * Truncated "down", so we're removing references to old data
+ * here - if we delay flushing for a long time, we expose
+ * ourselves unduly to the notorious NULL files problem. So,
+ * we mark this inode and flush it when the file is closed,
+ * and do not wait the usual (long) time for writeout.
+ */
+ xfs_iflags_set(ip, XFS_ITRUNCATED);
+ }
+
+ if (mask & ATTR_CTIME) {
+ inode->i_ctime = iattr->ia_ctime;
+ ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
+ ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
+ ip->i_update_core = 1;
+ }
+ if (mask & ATTR_MTIME) {
+ inode->i_mtime = iattr->ia_mtime;
+ ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
+ ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
+ ip->i_update_core = 1;
+ }
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+ XFS_STATS_INC(xs_ig_attrchg);
+
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(tp);
+
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+out_unlock:
+ if (lock_flags)
+ xfs_iunlock(ip, lock_flags);
+ return error;
+
+out_trans_abort:
+ commit_flags |= XFS_TRANS_ABORT;
+out_trans_cancel:
+ xfs_trans_cancel(tp, commit_flags);
+ goto out_unlock;
+}
+
STATIC int
xfs_vn_setattr(
struct dentry *dentry,
struct iattr *iattr)
{
- return -xfs_setattr(XFS_I(dentry->d_inode), iattr, 0);
+ if (iattr->ia_valid & ATTR_SIZE)
+ return -xfs_setattr_size(XFS_I(dentry->d_inode), iattr, 0);
+ return -xfs_setattr_nonsize(XFS_I(dentry->d_inode), iattr, 0);
}
#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 8633521b3b2..d42f814e4d3 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -33,7 +33,6 @@
#endif
#include <xfs_types.h>
-#include <xfs_arch.h>
#include <kmem.h>
#include <mrlock.h>
@@ -88,6 +87,12 @@
#include <xfs_buf.h>
#include <xfs_message.h>
+#ifdef __BIG_ENDIAN
+#define XFS_NATIVE_HOST 1
+#else
+#undef XFS_NATIVE_HOST
+#endif
+
/*
* Feature macros (disable/enable)
*/
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index a1a881e68a9..25fd2cd6c8b 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -33,7 +33,6 @@
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_btree_trace.h"
#include "xfs_ialloc.h"
#include "xfs_bmap.h"
#include "xfs_rtalloc.h"
@@ -1412,37 +1411,35 @@ xfs_fs_fill_super(
sb->s_time_gran = 1;
set_posix_acl_flag(sb);
- error = xfs_syncd_init(mp);
- if (error)
- goto out_filestream_unmount;
-
xfs_inode_shrinker_register(mp);
error = xfs_mountfs(mp);
if (error)
- goto out_syncd_stop;
+ goto out_filestream_unmount;
+
+ error = xfs_syncd_init(mp);
+ if (error)
+ goto out_unmount;
root = igrab(VFS_I(mp->m_rootip));
if (!root) {
error = ENOENT;
- goto fail_unmount;
+ goto out_syncd_stop;
}
if (is_bad_inode(root)) {
error = EINVAL;
- goto fail_vnrele;
+ goto out_syncd_stop;
}
sb->s_root = d_alloc_root(root);
if (!sb->s_root) {
error = ENOMEM;
- goto fail_vnrele;
+ goto out_iput;
}
return 0;
- out_syncd_stop:
- xfs_inode_shrinker_unregister(mp);
- xfs_syncd_stop(mp);
out_filestream_unmount:
+ xfs_inode_shrinker_unregister(mp);
xfs_filestream_unmount(mp);
out_free_sb:
xfs_freesb(mp);
@@ -1456,17 +1453,12 @@ xfs_fs_fill_super(
out:
return -error;
- fail_vnrele:
- if (sb->s_root) {
- dput(sb->s_root);
- sb->s_root = NULL;
- } else {
- iput(root);
- }
-
- fail_unmount:
- xfs_inode_shrinker_unregister(mp);
+ out_iput:
+ iput(root);
+ out_syncd_stop:
xfs_syncd_stop(mp);
+ out_unmount:
+ xfs_inode_shrinker_unregister(mp);
/*
* Blow away any referenced inode in the filestreams cache.
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 8ecad5ff9f9..5cc158e52d4 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -359,14 +359,12 @@ xfs_quiesce_data(
{
int error, error2 = 0;
- /* push non-blocking */
- xfs_sync_data(mp, 0);
xfs_qm_sync(mp, SYNC_TRYLOCK);
-
- /* push and block till complete */
- xfs_sync_data(mp, SYNC_WAIT);
xfs_qm_sync(mp, SYNC_WAIT);
+ /* force out the newly dirtied log buffers */
+ xfs_log_force(mp, XFS_LOG_SYNC);
+
/* write superblock and hoover up shutdown errors */
error = xfs_sync_fsdata(mp);
@@ -436,7 +434,7 @@ xfs_quiesce_attr(
WARN_ON(atomic_read(&mp->m_active_trans) != 0);
/* Push the superblock and write an unmount record */
- error = xfs_log_sbcount(mp, 1);
+ error = xfs_log_sbcount(mp);
if (error)
xfs_warn(mp, "xfs_attr_quiesce: failed to log sb changes. "
"Frozen image may not be consistent.");
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index e3a6ad27415..e914fd62174 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -21,14 +21,6 @@
struct xfs_mount;
struct xfs_perag;
-typedef struct xfs_sync_work {
- struct list_head w_list;
- struct xfs_mount *w_mount;
- void *w_data; /* syncer routine argument */
- void (*w_syncer)(struct xfs_mount *, void *);
- struct completion *w_completion;
-} xfs_sync_work_t;
-
#define SYNC_WAIT 0x0001 /* wait for i/o to complete */
#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index d48b7a579ae..fda0708ef2e 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -293,7 +293,7 @@ DECLARE_EVENT_CLASS(xfs_buf_class,
__entry->buffer_length = bp->b_buffer_length;
__entry->hold = atomic_read(&bp->b_hold);
__entry->pincount = atomic_read(&bp->b_pin_count);
- __entry->lockval = xfs_buf_lock_value(bp);
+ __entry->lockval = bp->b_sema.count;
__entry->flags = bp->b_flags;
__entry->caller_ip = caller_ip;
),
@@ -323,7 +323,7 @@ DEFINE_BUF_EVENT(xfs_buf_bawrite);
DEFINE_BUF_EVENT(xfs_buf_bdwrite);
DEFINE_BUF_EVENT(xfs_buf_lock);
DEFINE_BUF_EVENT(xfs_buf_lock_done);
-DEFINE_BUF_EVENT(xfs_buf_cond_lock);
+DEFINE_BUF_EVENT(xfs_buf_trylock);
DEFINE_BUF_EVENT(xfs_buf_unlock);
DEFINE_BUF_EVENT(xfs_buf_iowait);
DEFINE_BUF_EVENT(xfs_buf_iowait_done);
@@ -366,7 +366,7 @@ DECLARE_EVENT_CLASS(xfs_buf_flags_class,
__entry->flags = flags;
__entry->hold = atomic_read(&bp->b_hold);
__entry->pincount = atomic_read(&bp->b_pin_count);
- __entry->lockval = xfs_buf_lock_value(bp);
+ __entry->lockval = bp->b_sema.count;
__entry->caller_ip = caller_ip;
),
TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
@@ -409,7 +409,7 @@ TRACE_EVENT(xfs_buf_ioerror,
__entry->buffer_length = bp->b_buffer_length;
__entry->hold = atomic_read(&bp->b_hold);
__entry->pincount = atomic_read(&bp->b_pin_count);
- __entry->lockval = xfs_buf_lock_value(bp);
+ __entry->lockval = bp->b_sema.count;
__entry->error = error;
__entry->flags = bp->b_flags;
__entry->caller_ip = caller_ip;
@@ -454,7 +454,7 @@ DECLARE_EVENT_CLASS(xfs_buf_item_class,
__entry->buf_flags = bip->bli_buf->b_flags;
__entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
__entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
- __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf);
+ __entry->buf_lockval = bip->bli_buf->b_sema.count;
__entry->li_desc = bip->bli_item.li_desc;
__entry->li_flags = bip->bli_item.li_flags;
),
@@ -998,7 +998,8 @@ DECLARE_EVENT_CLASS(xfs_simple_io_class,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
- __field(loff_t, size)
+ __field(loff_t, isize)
+ __field(loff_t, disize)
__field(loff_t, new_size)
__field(loff_t, offset)
__field(size_t, count)
@@ -1006,16 +1007,18 @@ DECLARE_EVENT_CLASS(xfs_simple_io_class,
TP_fast_assign(
__entry->dev = VFS_I(ip)->i_sb->s_dev;
__entry->ino = ip->i_ino;
- __entry->size = ip->i_d.di_size;
+ __entry->isize = ip->i_size;
+ __entry->disize = ip->i_d.di_size;
__entry->new_size = ip->i_new_size;
__entry->offset = offset;
__entry->count = count;
),
- TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
+ TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx new_size 0x%llx "
"offset 0x%llx count %zd",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
- __entry->size,
+ __entry->isize,
+ __entry->disize,
__entry->new_size,
__entry->offset,
__entry->count)
@@ -1028,40 +1031,7 @@ DEFINE_EVENT(xfs_simple_io_class, name, \
DEFINE_SIMPLE_IO_EVENT(xfs_delalloc_enospc);
DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
-
-
-TRACE_EVENT(xfs_itruncate_start,
- TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size, int flag,
- xfs_off_t toss_start, xfs_off_t toss_finish),
- TP_ARGS(ip, new_size, flag, toss_start, toss_finish),
- TP_STRUCT__entry(
- __field(dev_t, dev)
- __field(xfs_ino_t, ino)
- __field(xfs_fsize_t, size)
- __field(xfs_fsize_t, new_size)
- __field(xfs_off_t, toss_start)
- __field(xfs_off_t, toss_finish)
- __field(int, flag)
- ),
- TP_fast_assign(
- __entry->dev = VFS_I(ip)->i_sb->s_dev;
- __entry->ino = ip->i_ino;
- __entry->size = ip->i_d.di_size;
- __entry->new_size = new_size;
- __entry->toss_start = toss_start;
- __entry->toss_finish = toss_finish;
- __entry->flag = flag;
- ),
- TP_printk("dev %d:%d ino 0x%llx %s size 0x%llx new_size 0x%llx "
- "toss start 0x%llx toss finish 0x%llx",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino,
- __print_flags(__entry->flag, "|", XFS_ITRUNC_FLAGS),
- __entry->size,
- __entry->new_size,
- __entry->toss_start,
- __entry->toss_finish)
-);
+DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
DECLARE_EVENT_CLASS(xfs_itrunc_class,
TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
@@ -1089,8 +1059,8 @@ DECLARE_EVENT_CLASS(xfs_itrunc_class,
DEFINE_EVENT(xfs_itrunc_class, name, \
TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
TP_ARGS(ip, new_size))
-DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
-DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
+DEFINE_ITRUNC_EVENT(xfs_itruncate_data_start);
+DEFINE_ITRUNC_EVENT(xfs_itruncate_data_end);
TRACE_EVENT(xfs_pagecache_inval,
TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 6fa21460381..837f31158d4 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -220,7 +220,7 @@ xfs_qm_adjust_dqtimers(
{
ASSERT(d->d_id);
-#ifdef QUOTADEBUG
+#ifdef DEBUG
if (d->d_blk_hardlimit)
ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
be64_to_cpu(d->d_blk_hardlimit));
@@ -231,6 +231,7 @@ xfs_qm_adjust_dqtimers(
ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
be64_to_cpu(d->d_rtb_hardlimit));
#endif
+
if (!d->d_btimer) {
if ((d->d_blk_softlimit &&
(be64_to_cpu(d->d_bcount) >=
@@ -318,7 +319,7 @@ xfs_qm_init_dquot_blk(
ASSERT(tp);
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+ ASSERT(xfs_buf_islocked(bp));
d = (xfs_dqblk_t *)XFS_BUF_PTR(bp);
@@ -534,7 +535,7 @@ xfs_qm_dqtobp(
}
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+ ASSERT(xfs_buf_islocked(bp));
/*
* calculate the location of the dquot inside the buffer.
@@ -622,7 +623,7 @@ xfs_qm_dqread(
* brelse it because we have the changes incore.
*/
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+ ASSERT(xfs_buf_islocked(bp));
xfs_trans_brelse(tp, bp);
return (error);
@@ -1423,45 +1424,6 @@ xfs_qm_dqpurge(
}
-#ifdef QUOTADEBUG
-void
-xfs_qm_dqprint(xfs_dquot_t *dqp)
-{
- struct xfs_mount *mp = dqp->q_mount;
-
- xfs_debug(mp, "-----------KERNEL DQUOT----------------");
- xfs_debug(mp, "---- dquotID = %d",
- (int)be32_to_cpu(dqp->q_core.d_id));
- xfs_debug(mp, "---- type = %s", DQFLAGTO_TYPESTR(dqp));
- xfs_debug(mp, "---- fs = 0x%p", dqp->q_mount);
- xfs_debug(mp, "---- blkno = 0x%x", (int) dqp->q_blkno);
- xfs_debug(mp, "---- boffset = 0x%x", (int) dqp->q_bufoffset);
- xfs_debug(mp, "---- blkhlimit = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_blk_hardlimit),
- (int)be64_to_cpu(dqp->q_core.d_blk_hardlimit));
- xfs_debug(mp, "---- blkslimit = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_blk_softlimit),
- (int)be64_to_cpu(dqp->q_core.d_blk_softlimit));
- xfs_debug(mp, "---- inohlimit = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_ino_hardlimit),
- (int)be64_to_cpu(dqp->q_core.d_ino_hardlimit));
- xfs_debug(mp, "---- inoslimit = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_ino_softlimit),
- (int)be64_to_cpu(dqp->q_core.d_ino_softlimit));
- xfs_debug(mp, "---- bcount = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_bcount),
- (int)be64_to_cpu(dqp->q_core.d_bcount));
- xfs_debug(mp, "---- icount = %Lu (0x%x)",
- be64_to_cpu(dqp->q_core.d_icount),
- (int)be64_to_cpu(dqp->q_core.d_icount));
- xfs_debug(mp, "---- btimer = %d",
- (int)be32_to_cpu(dqp->q_core.d_btimer));
- xfs_debug(mp, "---- itimer = %d",
- (int)be32_to_cpu(dqp->q_core.d_itimer));
- xfs_debug(mp, "---------------------------");
-}
-#endif
-
/*
* Give the buffer a little push if it is incore and
* wait on the flush lock.
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index 5da3a23b820..34b7e945dbf 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -116,12 +116,6 @@ static inline void xfs_dqfunlock(xfs_dquot_t *dqp)
(XFS_IS_UQUOTA_ON((d)->q_mount)) : \
(XFS_IS_OQUOTA_ON((d)->q_mount))))
-#ifdef QUOTADEBUG
-extern void xfs_qm_dqprint(xfs_dquot_t *);
-#else
-#define xfs_qm_dqprint(a)
-#endif
-
extern void xfs_qm_dqdestroy(xfs_dquot_t *);
extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
extern int xfs_qm_dqpurge(xfs_dquot_t *);
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index b94dace4e78..46e54ad9a2d 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -67,32 +67,6 @@ static struct shrinker xfs_qm_shaker = {
.seeks = DEFAULT_SEEKS,
};
-#ifdef DEBUG
-extern struct mutex qcheck_lock;
-#endif
-
-#ifdef QUOTADEBUG
-static void
-xfs_qm_dquot_list_print(
- struct xfs_mount *mp)
-{
- xfs_dquot_t *dqp;
- int i = 0;
-
- list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) {
- xfs_debug(mp, " %d. \"%d (%s)\" "
- "bcnt = %lld, icnt = %lld, refs = %d",
- i++, be32_to_cpu(dqp->q_core.d_id),
- DQFLAGTO_TYPESTR(dqp),
- (long long)be64_to_cpu(dqp->q_core.d_bcount),
- (long long)be64_to_cpu(dqp->q_core.d_icount),
- dqp->q_nrefs);
- }
-}
-#else
-static void xfs_qm_dquot_list_print(struct xfs_mount *mp) { }
-#endif
-
/*
* Initialize the XQM structure.
* Note that there is not one quota manager per file system.
@@ -165,9 +139,6 @@ xfs_Gqm_init(void)
atomic_set(&xqm->qm_totaldquots, 0);
xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
xqm->qm_nrefs = 0;
-#ifdef DEBUG
- mutex_init(&qcheck_lock);
-#endif
return xqm;
out_free_udqhash:
@@ -204,9 +175,6 @@ xfs_qm_destroy(
mutex_lock(&xqm->qm_dqfrlist_lock);
list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
xfs_dqlock(dqp);
-#ifdef QUOTADEBUG
- xfs_debug(dqp->q_mount, "FREELIST destroy 0x%p", dqp);
-#endif
list_del_init(&dqp->q_freelist);
xfs_Gqm->qm_dqfrlist_cnt--;
xfs_dqunlock(dqp);
@@ -214,9 +182,6 @@ xfs_qm_destroy(
}
mutex_unlock(&xqm->qm_dqfrlist_lock);
mutex_destroy(&xqm->qm_dqfrlist_lock);
-#ifdef DEBUG
- mutex_destroy(&qcheck_lock);
-#endif
kmem_free(xqm);
}
@@ -409,11 +374,6 @@ xfs_qm_mount_quotas(
xfs_warn(mp, "Failed to initialize disk quotas.");
return;
}
-
-#ifdef QUOTADEBUG
- if (XFS_IS_QUOTA_ON(mp))
- xfs_qm_internalqcheck(mp);
-#endif
}
/*
@@ -866,8 +826,8 @@ xfs_qm_dqattach_locked(
}
done:
-#ifdef QUOTADEBUG
- if (! error) {
+#ifdef DEBUG
+ if (!error) {
if (XFS_IS_UQUOTA_ON(mp))
ASSERT(ip->i_udquot);
if (XFS_IS_OQUOTA_ON(mp))
@@ -1733,8 +1693,6 @@ xfs_qm_quotacheck(
mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
mp->m_qflags |= flags;
- xfs_qm_dquot_list_print(mp);
-
error_return:
if (error) {
xfs_warn(mp,
@@ -2096,9 +2054,6 @@ xfs_qm_write_sb_changes(
xfs_trans_t *tp;
int error;
-#ifdef QUOTADEBUG
- xfs_notice(mp, "Writing superblock quota changes");
-#endif
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
if ((error = xfs_trans_reserve(tp, 0,
mp->m_sb.sb_sectsize + 128, 0,
diff --git a/fs/xfs/quota/xfs_qm.h b/fs/xfs/quota/xfs_qm.h
index 567b29b9f1b..43b9abe1052 100644
--- a/fs/xfs/quota/xfs_qm.h
+++ b/fs/xfs/quota/xfs_qm.h
@@ -163,10 +163,4 @@ extern int xfs_qm_scall_getqstat(xfs_mount_t *, fs_quota_stat_t *);
extern int xfs_qm_scall_quotaon(xfs_mount_t *, uint);
extern int xfs_qm_scall_quotaoff(xfs_mount_t *, uint);
-#ifdef DEBUG
-extern int xfs_qm_internalqcheck(xfs_mount_t *);
-#else
-#define xfs_qm_internalqcheck(mp) (0)
-#endif
-
#endif /* __XFS_QM_H__ */
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 2dadb15d5ca..609246f42e6 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -263,7 +263,7 @@ xfs_qm_scall_trunc_qfile(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip);
- error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK, 1);
+ error = xfs_itruncate_data(&tp, ip, 0);
if (error) {
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT);
@@ -622,7 +622,6 @@ xfs_qm_scall_setqlim(
xfs_trans_log_dquot(tp, dqp);
error = xfs_trans_commit(tp, 0);
- xfs_qm_dqprint(dqp);
xfs_qm_dqrele(dqp);
out_unlock:
@@ -657,7 +656,6 @@ xfs_qm_scall_getquota(
xfs_qm_dqput(dqp);
return XFS_ERROR(ENOENT);
}
- /* xfs_qm_dqprint(dqp); */
/*
* Convert the disk dquot to the exportable format
*/
@@ -906,354 +904,3 @@ xfs_qm_dqrele_all_inodes(
ASSERT(mp->m_quotainfo);
xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags);
}
-
-/*------------------------------------------------------------------------*/
-#ifdef DEBUG
-/*
- * This contains all the test functions for XFS disk quotas.
- * Currently it does a quota accounting check. ie. it walks through
- * all inodes in the file system, calculating the dquot accounting fields,
- * and prints out any inconsistencies.
- */
-xfs_dqhash_t *qmtest_udqtab;
-xfs_dqhash_t *qmtest_gdqtab;
-int qmtest_hashmask;
-int qmtest_nfails;
-struct mutex qcheck_lock;
-
-#define DQTEST_HASHVAL(mp, id) (((__psunsigned_t)(mp) + \
- (__psunsigned_t)(id)) & \
- (qmtest_hashmask - 1))
-
-#define DQTEST_HASH(mp, id, type) ((type & XFS_DQ_USER) ? \
- (qmtest_udqtab + \
- DQTEST_HASHVAL(mp, id)) : \
- (qmtest_gdqtab + \
- DQTEST_HASHVAL(mp, id)))
-
-#define DQTEST_LIST_PRINT(l, NXT, title) \
-{ \
- xfs_dqtest_t *dqp; int i = 0;\
- xfs_debug(NULL, "%s (#%d)", title, (int) (l)->qh_nelems); \
- for (dqp = (xfs_dqtest_t *)(l)->qh_next; dqp != NULL; \
- dqp = (xfs_dqtest_t *)dqp->NXT) { \
- xfs_debug(dqp->q_mount, \
- " %d. \"%d (%s)\" bcnt = %d, icnt = %d", \
- ++i, dqp->d_id, DQFLAGTO_TYPESTR(dqp), \
- dqp->d_bcount, dqp->d_icount); } \
-}
-
-typedef struct dqtest {
- uint dq_flags; /* various flags (XFS_DQ_*) */
- struct list_head q_hashlist;
- xfs_dqhash_t *q_hash; /* the hashchain header */
- xfs_mount_t *q_mount; /* filesystem this relates to */
- xfs_dqid_t d_id; /* user id or group id */
- xfs_qcnt_t d_bcount; /* # disk blocks owned by the user */
- xfs_qcnt_t d_icount; /* # inodes owned by the user */
-} xfs_dqtest_t;
-
-STATIC void
-xfs_qm_hashinsert(xfs_dqhash_t *h, xfs_dqtest_t *dqp)
-{
- list_add(&dqp->q_hashlist, &h->qh_list);
- h->qh_version++;
- h->qh_nelems++;
-}
-STATIC void
-xfs_qm_dqtest_print(
- struct xfs_mount *mp,
- struct dqtest *d)
-{
- xfs_debug(mp, "-----------DQTEST DQUOT----------------");
- xfs_debug(mp, "---- dquot ID = %d", d->d_id);
- xfs_debug(mp, "---- fs = 0x%p", d->q_mount);
- xfs_debug(mp, "---- bcount = %Lu (0x%x)",
- d->d_bcount, (int)d->d_bcount);
- xfs_debug(mp, "---- icount = %Lu (0x%x)",
- d->d_icount, (int)d->d_icount);
- xfs_debug(mp, "---------------------------");
-}
-
-STATIC void
-xfs_qm_dqtest_failed(
- xfs_dqtest_t *d,
- xfs_dquot_t *dqp,
- char *reason,
- xfs_qcnt_t a,
- xfs_qcnt_t b,
- int error)
-{
- qmtest_nfails++;
- if (error)
- xfs_debug(dqp->q_mount,
- "quotacheck failed id=%d, err=%d\nreason: %s",
- d->d_id, error, reason);
- else
- xfs_debug(dqp->q_mount,
- "quotacheck failed id=%d (%s) [%d != %d]",
- d->d_id, reason, (int)a, (int)b);
- xfs_qm_dqtest_print(dqp->q_mount, d);
- if (dqp)
- xfs_qm_dqprint(dqp);
-}
-
-STATIC int
-xfs_dqtest_cmp2(
- xfs_dqtest_t *d,
- xfs_dquot_t *dqp)
-{
- int err = 0;
- if (be64_to_cpu(dqp->q_core.d_icount) != d->d_icount) {
- xfs_qm_dqtest_failed(d, dqp, "icount mismatch",
- be64_to_cpu(dqp->q_core.d_icount),
- d->d_icount, 0);
- err++;
- }
- if (be64_to_cpu(dqp->q_core.d_bcount) != d->d_bcount) {
- xfs_qm_dqtest_failed(d, dqp, "bcount mismatch",
- be64_to_cpu(dqp->q_core.d_bcount),
- d->d_bcount, 0);
- err++;
- }
- if (dqp->q_core.d_blk_softlimit &&
- be64_to_cpu(dqp->q_core.d_bcount) >=
- be64_to_cpu(dqp->q_core.d_blk_softlimit)) {
- if (!dqp->q_core.d_btimer && dqp->q_core.d_id) {
- xfs_debug(dqp->q_mount,
- "%d [%s] BLK TIMER NOT STARTED",
- d->d_id, DQFLAGTO_TYPESTR(d));
- err++;
- }
- }
- if (dqp->q_core.d_ino_softlimit &&
- be64_to_cpu(dqp->q_core.d_icount) >=
- be64_to_cpu(dqp->q_core.d_ino_softlimit)) {
- if (!dqp->q_core.d_itimer && dqp->q_core.d_id) {
- xfs_debug(dqp->q_mount,
- "%d [%s] INO TIMER NOT STARTED",
- d->d_id, DQFLAGTO_TYPESTR(d));
- err++;
- }
- }
-#ifdef QUOTADEBUG
- if (!err) {
- xfs_debug(dqp->q_mount, "%d [%s] qchecked",
- d->d_id, DQFLAGTO_TYPESTR(d));
- }
-#endif
- return (err);
-}
-
-STATIC void
-xfs_dqtest_cmp(
- xfs_dqtest_t *d)
-{
- xfs_dquot_t *dqp;
- int error;
-
- /* xfs_qm_dqtest_print(d); */
- if ((error = xfs_qm_dqget(d->q_mount, NULL, d->d_id, d->dq_flags, 0,
- &dqp))) {
- xfs_qm_dqtest_failed(d, NULL, "dqget failed", 0, 0, error);
- return;
- }
- xfs_dqtest_cmp2(d, dqp);
- xfs_qm_dqput(dqp);
-}
-
-STATIC int
-xfs_qm_internalqcheck_dqget(
- xfs_mount_t *mp,
- xfs_dqid_t id,
- uint type,
- xfs_dqtest_t **O_dq)
-{
- xfs_dqtest_t *d;
- xfs_dqhash_t *h;
-
- h = DQTEST_HASH(mp, id, type);
- list_for_each_entry(d, &h->qh_list, q_hashlist) {
- if (d->d_id == id && mp == d->q_mount) {
- *O_dq = d;
- return (0);
- }
- }
- d = kmem_zalloc(sizeof(xfs_dqtest_t), KM_SLEEP);
- d->dq_flags = type;
- d->d_id = id;
- d->q_mount = mp;
- d->q_hash = h;
- INIT_LIST_HEAD(&d->q_hashlist);
- xfs_qm_hashinsert(h, d);
- *O_dq = d;
- return (0);
-}
-
-STATIC void
-xfs_qm_internalqcheck_get_dquots(
- xfs_mount_t *mp,
- xfs_dqid_t uid,
- xfs_dqid_t projid,
- xfs_dqid_t gid,
- xfs_dqtest_t **ud,
- xfs_dqtest_t **gd)
-{
- if (XFS_IS_UQUOTA_ON(mp))
- xfs_qm_internalqcheck_dqget(mp, uid, XFS_DQ_USER, ud);
- if (XFS_IS_GQUOTA_ON(mp))
- xfs_qm_internalqcheck_dqget(mp, gid, XFS_DQ_GROUP, gd);
- else if (XFS_IS_PQUOTA_ON(mp))
- xfs_qm_internalqcheck_dqget(mp, projid, XFS_DQ_PROJ, gd);
-}
-
-
-STATIC void
-xfs_qm_internalqcheck_dqadjust(
- xfs_inode_t *ip,
- xfs_dqtest_t *d)
-{
- d->d_icount++;
- d->d_bcount += (xfs_qcnt_t)ip->i_d.di_nblocks;
-}
-
-STATIC int
-xfs_qm_internalqcheck_adjust(
- xfs_mount_t *mp, /* mount point for filesystem */
- xfs_ino_t ino, /* inode number to get data for */
- void __user *buffer, /* not used */
- int ubsize, /* not used */
- int *ubused, /* not used */
- int *res) /* bulkstat result code */
-{
- xfs_inode_t *ip;
- xfs_dqtest_t *ud, *gd;
- uint lock_flags;
- boolean_t ipreleased;
- int error;
-
- ASSERT(XFS_IS_QUOTA_RUNNING(mp));
-
- if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
- *res = BULKSTAT_RV_NOTHING;
- xfs_debug(mp, "%s: ino=%llu, uqino=%llu, gqino=%llu\n",
- __func__, (unsigned long long) ino,
- (unsigned long long) mp->m_sb.sb_uquotino,
- (unsigned long long) mp->m_sb.sb_gquotino);
- return XFS_ERROR(EINVAL);
- }
- ipreleased = B_FALSE;
- again:
- lock_flags = XFS_ILOCK_SHARED;
- if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip))) {
- *res = BULKSTAT_RV_NOTHING;
- return (error);
- }
-
- /*
- * This inode can have blocks after eof which can get released
- * when we send it to inactive. Since we don't check the dquot
- * until the after all our calculations are done, we must get rid
- * of those now.
- */
- if (! ipreleased) {
- xfs_iunlock(ip, lock_flags);
- IRELE(ip);
- ipreleased = B_TRUE;
- goto again;
- }
- xfs_qm_internalqcheck_get_dquots(mp,
- (xfs_dqid_t) ip->i_d.di_uid,
- (xfs_dqid_t) xfs_get_projid(ip),
- (xfs_dqid_t) ip->i_d.di_gid,
- &ud, &gd);
- if (XFS_IS_UQUOTA_ON(mp)) {
- ASSERT(ud);
- xfs_qm_internalqcheck_dqadjust(ip, ud);
- }
- if (XFS_IS_OQUOTA_ON(mp)) {
- ASSERT(gd);
- xfs_qm_internalqcheck_dqadjust(ip, gd);
- }
- xfs_iunlock(ip, lock_flags);
- IRELE(ip);
- *res = BULKSTAT_RV_DIDONE;
- return (0);
-}
-
-
-/* PRIVATE, debugging */
-int
-xfs_qm_internalqcheck(
- xfs_mount_t *mp)
-{
- xfs_ino_t lastino;
- int done, count;
- int i;
- int error;
-
- lastino = 0;
- qmtest_hashmask = 32;
- count = 5;
- done = 0;
- qmtest_nfails = 0;
-
- if (! XFS_IS_QUOTA_ON(mp))
- return XFS_ERROR(ESRCH);
-
- xfs_log_force(mp, XFS_LOG_SYNC);
- XFS_bflush(mp->m_ddev_targp);
- xfs_log_force(mp, XFS_LOG_SYNC);
- XFS_bflush(mp->m_ddev_targp);
-
- mutex_lock(&qcheck_lock);
- /* There should be absolutely no quota activity while this
- is going on. */
- qmtest_udqtab = kmem_zalloc(qmtest_hashmask *
- sizeof(xfs_dqhash_t), KM_SLEEP);
- qmtest_gdqtab = kmem_zalloc(qmtest_hashmask *
- sizeof(xfs_dqhash_t), KM_SLEEP);
- do {
- /*
- * Iterate thru all the inodes in the file system,
- * adjusting the corresponding dquot counters
- */
- error = xfs_bulkstat(mp, &lastino, &count,
- xfs_qm_internalqcheck_adjust,
- 0, NULL, &done);
- if (error) {
- xfs_debug(mp, "Bulkstat returned error 0x%x", error);
- break;
- }
- } while (!done);
-
- xfs_debug(mp, "Checking results against system dquots");
- for (i = 0; i < qmtest_hashmask; i++) {
- xfs_dqtest_t *d, *n;
- xfs_dqhash_t *h;
-
- h = &qmtest_udqtab[i];
- list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
- xfs_dqtest_cmp(d);
- kmem_free(d);
- }
- h = &qmtest_gdqtab[i];
- list_for_each_entry_safe(d, n, &h->qh_list, q_hashlist) {
- xfs_dqtest_cmp(d);
- kmem_free(d);
- }
- }
-
- if (qmtest_nfails) {
- xfs_debug(mp, "******** quotacheck failed ********");
- xfs_debug(mp, "failures = %d", qmtest_nfails);
- } else {
- xfs_debug(mp, "******** quotacheck successful! ********");
- }
- kmem_free(qmtest_udqtab);
- kmem_free(qmtest_gdqtab);
- mutex_unlock(&qcheck_lock);
- return (qmtest_nfails);
-}
-
-#endif /* DEBUG */
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 2a364873133..4d00ee67792 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -59,7 +59,7 @@ xfs_trans_dqjoin(
xfs_trans_add_item(tp, &dqp->q_logitem.qli_item);
/*
- * Initialize i_transp so we can later determine if this dquot is
+ * Initialize d_transp so we can later determine if this dquot is
* associated with this transaction.
*/
dqp->q_transp = tp;
@@ -387,18 +387,18 @@ xfs_trans_apply_dquot_deltas(
qtrx->qt_delbcnt_delta;
totalrtbdelta = qtrx->qt_rtbcount_delta +
qtrx->qt_delrtb_delta;
-#ifdef QUOTADEBUG
+#ifdef DEBUG
if (totalbdelta < 0)
ASSERT(be64_to_cpu(d->d_bcount) >=
- (xfs_qcnt_t) -totalbdelta);
+ -totalbdelta);
if (totalrtbdelta < 0)
ASSERT(be64_to_cpu(d->d_rtbcount) >=
- (xfs_qcnt_t) -totalrtbdelta);
+ -totalrtbdelta);
if (qtrx->qt_icount_delta < 0)
ASSERT(be64_to_cpu(d->d_icount) >=
- (xfs_qcnt_t) -qtrx->qt_icount_delta);
+ -qtrx->qt_icount_delta);
#endif
if (totalbdelta)
be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta);
@@ -642,11 +642,6 @@ xfs_trans_dqresv(
((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) ||
(XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) &&
(XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) {
-#ifdef QUOTADEBUG
- xfs_debug(mp,
- "BLK Res: nblks=%ld + resbcount=%Ld > hardlimit=%Ld?",
- nblks, *resbcountp, hardlimit);
-#endif
if (nblks > 0) {
/*
* dquot is locked already. See if we'd go over the
diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h
index 5ad8ad3a1dc..53ec3ea9a62 100644
--- a/fs/xfs/xfs.h
+++ b/fs/xfs/xfs.h
@@ -22,7 +22,6 @@
#define STATIC
#define DEBUG 1
#define XFS_BUF_LOCK_TRACKING 1
-/* #define QUOTADEBUG 1 */
#endif
#include <linux-2.6/xfs_linux.h>
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 95862bbff56..1e00b3ef627 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -570,9 +570,7 @@ xfs_alloc_ag_vextent_exact(
xfs_agblock_t tbno; /* start block of trimmed extent */
xfs_extlen_t tlen; /* length of trimmed extent */
xfs_agblock_t tend; /* end block of trimmed extent */
- xfs_agblock_t end; /* end of allocated extent */
int i; /* success/failure of operation */
- xfs_extlen_t rlen; /* length of returned extent */
ASSERT(args->alignment == 1);
@@ -625,18 +623,16 @@ xfs_alloc_ag_vextent_exact(
*
* Fix the length according to mod and prod if given.
*/
- end = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen);
- args->len = end - args->agbno;
+ args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
+ - args->agbno;
xfs_alloc_fix_len(args);
if (!xfs_alloc_fix_minleft(args))
goto not_found;
- rlen = args->len;
- ASSERT(args->agbno + rlen <= tend);
- end = args->agbno + rlen;
+ ASSERT(args->agbno + args->len <= tend);
/*
- * We are allocating agbno for rlen [agbno .. end]
+ * We are allocating agbno for args->len
* Allocate/initialize a cursor for the by-size btree.
*/
cnt_cur = xfs_allocbt_init_cursor(args->mp, args->tp, args->agbp,
@@ -2127,7 +2123,7 @@ xfs_read_agf(
* Validate the magic number of the agf block.
*/
agf_ok =
- be32_to_cpu(agf->agf_magicnum) == XFS_AGF_MAGIC &&
+ agf->agf_magicnum == cpu_to_be32(XFS_AGF_MAGIC) &&
XFS_AGF_GOOD_VERSION(be32_to_cpu(agf->agf_versionnum)) &&
be32_to_cpu(agf->agf_freeblks) <= be32_to_cpu(agf->agf_length) &&
be32_to_cpu(agf->agf_flfirst) < XFS_AGFL_SIZE(mp) &&
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 2b3518826a6..ffb3386e45c 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -31,7 +31,6 @@
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_btree_trace.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@@ -311,72 +310,6 @@ xfs_allocbt_recs_inorder(
}
#endif /* DEBUG */
-#ifdef XFS_BTREE_TRACE
-ktrace_t *xfs_allocbt_trace_buf;
-
-STATIC void
-xfs_allocbt_trace_enter(
- struct xfs_btree_cur *cur,
- const char *func,
- char *s,
- int type,
- int line,
- __psunsigned_t a0,
- __psunsigned_t a1,
- __psunsigned_t a2,
- __psunsigned_t a3,
- __psunsigned_t a4,
- __psunsigned_t a5,
- __psunsigned_t a6,
- __psunsigned_t a7,
- __psunsigned_t a8,
- __psunsigned_t a9,
- __psunsigned_t a10)
-{
- ktrace_enter(xfs_allocbt_trace_buf, (void *)(__psint_t)type,
- (void *)func, (void *)s, NULL, (void *)cur,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)a8, (void *)a9, (void *)a10);
-}
-
-STATIC void
-xfs_allocbt_trace_cursor(
- struct xfs_btree_cur *cur,
- __uint32_t *s0,
- __uint64_t *l0,
- __uint64_t *l1)
-{
- *s0 = cur->bc_private.a.agno;
- *l0 = cur->bc_rec.a.ar_startblock;
- *l1 = cur->bc_rec.a.ar_blockcount;
-}
-
-STATIC void
-xfs_allocbt_trace_key(
- struct xfs_btree_cur *cur,
- union xfs_btree_key *key,
- __uint64_t *l0,
- __uint64_t *l1)
-{
- *l0 = be32_to_cpu(key->alloc.ar_startblock);
- *l1 = be32_to_cpu(key->alloc.ar_blockcount);
-}
-
-STATIC void
-xfs_allocbt_trace_record(
- struct xfs_btree_cur *cur,
- union xfs_btree_rec *rec,
- __uint64_t *l0,
- __uint64_t *l1,
- __uint64_t *l2)
-{
- *l0 = be32_to_cpu(rec->alloc.ar_startblock);
- *l1 = be32_to_cpu(rec->alloc.ar_blockcount);
- *l2 = 0;
-}
-#endif /* XFS_BTREE_TRACE */
-
static const struct xfs_btree_ops xfs_allocbt_ops = {
.rec_len = sizeof(xfs_alloc_rec_t),
.key_len = sizeof(xfs_alloc_key_t),
@@ -393,18 +326,10 @@ static const struct xfs_btree_ops xfs_allocbt_ops = {
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
.key_diff = xfs_allocbt_key_diff,
-
#ifdef DEBUG
.keys_inorder = xfs_allocbt_keys_inorder,
.recs_inorder = xfs_allocbt_recs_inorder,
#endif
-
-#ifdef XFS_BTREE_TRACE
- .trace_enter = xfs_allocbt_trace_enter,
- .trace_cursor = xfs_allocbt_trace_cursor,
- .trace_key = xfs_allocbt_trace_key,
- .trace_record = xfs_allocbt_trace_record,
-#endif
};
/*
@@ -427,13 +352,16 @@ xfs_allocbt_init_cursor(
cur->bc_tp = tp;
cur->bc_mp = mp;
- cur->bc_nlevels = be32_to_cpu(agf->agf_levels[btnum]);
cur->bc_btnum = btnum;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
-
cur->bc_ops = &xfs_allocbt_ops;
- if (btnum == XFS_BTNUM_CNT)
+
+ if (btnum == XFS_BTNUM_CNT) {
+ cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
+ } else {
+ cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
+ }
cur->bc_private.a.agbp = agbp;
cur->bc_private.a.agno = agno;
diff --git a/fs/xfs/xfs_arch.h b/fs/xfs/xfs_arch.h
deleted file mode 100644
index 0902249354a..00000000000
--- a/fs/xfs/xfs_arch.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_ARCH_H__
-#define __XFS_ARCH_H__
-
-#ifndef XFS_BIG_INUMS
-# error XFS_BIG_INUMS must be defined true or false
-#endif
-
-#ifdef __KERNEL__
-
-#include <asm/byteorder.h>
-
-#ifdef __BIG_ENDIAN
-#define XFS_NATIVE_HOST 1
-#else
-#undef XFS_NATIVE_HOST
-#endif
-
-#else /* __KERNEL__ */
-
-#if __BYTE_ORDER == __BIG_ENDIAN
-#define XFS_NATIVE_HOST 1
-#else
-#undef XFS_NATIVE_HOST
-#endif
-
-#ifdef XFS_NATIVE_HOST
-#define cpu_to_be16(val) ((__force __be16)(__u16)(val))
-#define cpu_to_be32(val) ((__force __be32)(__u32)(val))
-#define cpu_to_be64(val) ((__force __be64)(__u64)(val))
-#define be16_to_cpu(val) ((__force __u16)(__be16)(val))
-#define be32_to_cpu(val) ((__force __u32)(__be32)(val))
-#define be64_to_cpu(val) ((__force __u64)(__be64)(val))
-#else
-#define cpu_to_be16(val) ((__force __be16)__swab16((__u16)(val)))
-#define cpu_to_be32(val) ((__force __be32)__swab32((__u32)(val)))
-#define cpu_to_be64(val) ((__force __be64)__swab64((__u64)(val)))
-#define be16_to_cpu(val) (__swab16((__force __u16)(__be16)(val)))
-#define be32_to_cpu(val) (__swab32((__force __u32)(__be32)(val)))
-#define be64_to_cpu(val) (__swab64((__force __u64)(__be64)(val)))
-#endif
-
-static inline void be16_add_cpu(__be16 *a, __s16 b)
-{
- *a = cpu_to_be16(be16_to_cpu(*a) + b);
-}
-
-static inline void be32_add_cpu(__be32 *a, __s32 b)
-{
- *a = cpu_to_be32(be32_to_cpu(*a) + b);
-}
-
-static inline void be64_add_cpu(__be64 *a, __s64 b)
-{
- *a = cpu_to_be64(be64_to_cpu(*a) + b);
-}
-
-#endif /* __KERNEL__ */
-
-/*
- * get and set integers from potentially unaligned locations
- */
-
-#define INT_GET_UNALIGNED_16_BE(pointer) \
- ((__u16)((((__u8*)(pointer))[0] << 8) | (((__u8*)(pointer))[1])))
-#define INT_SET_UNALIGNED_16_BE(pointer,value) \
- { \
- ((__u8*)(pointer))[0] = (((value) >> 8) & 0xff); \
- ((__u8*)(pointer))[1] = (((value) ) & 0xff); \
- }
-
-/*
- * In directories inode numbers are stored as unaligned arrays of unsigned
- * 8bit integers on disk.
- *
- * For v1 directories or v2 directories that contain inode numbers that
- * do not fit into 32bit the array has eight members, but the first member
- * is always zero:
- *
- * |unused|48-55|40-47|32-39|24-31|16-23| 8-15| 0- 7|
- *
- * For v2 directories that only contain entries with inode numbers that fit
- * into 32bits a four-member array is used:
- *
- * |24-31|16-23| 8-15| 0- 7|
- */
-
-#define XFS_GET_DIR_INO4(di) \
- (((__u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
-
-#define XFS_PUT_DIR_INO4(from, di) \
-do { \
- (di).i[0] = (((from) & 0xff000000ULL) >> 24); \
- (di).i[1] = (((from) & 0x00ff0000ULL) >> 16); \
- (di).i[2] = (((from) & 0x0000ff00ULL) >> 8); \
- (di).i[3] = ((from) & 0x000000ffULL); \
-} while (0)
-
-#define XFS_DI_HI(di) \
- (((__u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
-#define XFS_DI_LO(di) \
- (((__u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
-
-#define XFS_GET_DIR_INO8(di) \
- (((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \
- ((xfs_ino_t)XFS_DI_HI(di) << 32))
-
-#define XFS_PUT_DIR_INO8(from, di) \
-do { \
- (di).i[0] = 0; \
- (di).i[1] = (((from) & 0x00ff000000000000ULL) >> 48); \
- (di).i[2] = (((from) & 0x0000ff0000000000ULL) >> 40); \
- (di).i[3] = (((from) & 0x000000ff00000000ULL) >> 32); \
- (di).i[4] = (((from) & 0x00000000ff000000ULL) >> 24); \
- (di).i[5] = (((from) & 0x0000000000ff0000ULL) >> 16); \
- (di).i[6] = (((from) & 0x000000000000ff00ULL) >> 8); \
- (di).i[7] = ((from) & 0x00000000000000ffULL); \
-} while (0)
-
-#endif /* __XFS_ARCH_H__ */
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 01d2072fb6d..cbae424fe1b 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -822,17 +822,21 @@ xfs_attr_inactive(xfs_inode_t *dp)
error = xfs_attr_root_inactive(&trans, dp);
if (error)
goto out;
+
/*
- * signal synchronous inactive transactions unless this
- * is a synchronous mount filesystem in which case we
- * know that we're here because we've been called out of
- * xfs_inactive which means that the last reference is gone
- * and the unlink transaction has already hit the disk so
- * async inactive transactions are safe.
+ * Signal synchronous inactive transactions unless this is a
+ * synchronous mount filesystem in which case we know that we're here
+ * because we've been called out of xfs_inactive which means that the
+ * last reference is gone and the unlink transaction has already hit
+ * the disk so async inactive transactions are safe.
*/
- if ((error = xfs_itruncate_finish(&trans, dp, 0LL, XFS_ATTR_FORK,
- (!(mp->m_flags & XFS_MOUNT_WSYNC)
- ? 1 : 0))))
+ if (!(mp->m_flags & XFS_MOUNT_WSYNC)) {
+ if (dp->i_d.di_anextents > 0)
+ xfs_trans_set_sync(trans);
+ }
+
+ error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
+ if (error)
goto out;
/*
@@ -1199,7 +1203,7 @@ xfs_attr_leaf_list(xfs_attr_list_context_t *context)
return XFS_ERROR(error);
ASSERT(bp != NULL);
leaf = bp->data;
- if (unlikely(be16_to_cpu(leaf->hdr.info.magic) != XFS_ATTR_LEAF_MAGIC)) {
+ if (unlikely(leaf->hdr.info.magic != cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) {
XFS_CORRUPTION_ERROR("xfs_attr_leaf_list", XFS_ERRLEVEL_LOW,
context->dp->i_mount, leaf);
xfs_da_brelse(NULL, bp);
@@ -1606,9 +1610,8 @@ xfs_attr_node_removename(xfs_da_args_t *args)
XFS_ATTR_FORK);
if (error)
goto out;
- ASSERT(be16_to_cpu(((xfs_attr_leafblock_t *)
- bp->data)->hdr.info.magic)
- == XFS_ATTR_LEAF_MAGIC);
+ ASSERT((((xfs_attr_leafblock_t *)bp->data)->hdr.info.magic) ==
+ cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
xfs_bmap_init(args->flist, args->firstblock);
@@ -1873,11 +1876,11 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
return(XFS_ERROR(EFSCORRUPTED));
}
node = bp->data;
- if (be16_to_cpu(node->hdr.info.magic)
- == XFS_ATTR_LEAF_MAGIC)
+ if (node->hdr.info.magic ==
+ cpu_to_be16(XFS_ATTR_LEAF_MAGIC))
break;
- if (unlikely(be16_to_cpu(node->hdr.info.magic)
- != XFS_DA_NODE_MAGIC)) {
+ if (unlikely(node->hdr.info.magic !=
+ cpu_to_be16(XFS_DA_NODE_MAGIC))) {
XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)",
XFS_ERRLEVEL_LOW,
context->dp->i_mount,
@@ -1912,8 +1915,8 @@ xfs_attr_node_list(xfs_attr_list_context_t *context)
*/
for (;;) {
leaf = bp->data;
- if (unlikely(be16_to_cpu(leaf->hdr.info.magic)
- != XFS_ATTR_LEAF_MAGIC)) {
+ if (unlikely(leaf->hdr.info.magic !=
+ cpu_to_be16(XFS_ATTR_LEAF_MAGIC))) {
XFS_CORRUPTION_ERROR("xfs_attr_node_list(4)",
XFS_ERRLEVEL_LOW,
context->dp->i_mount, leaf);
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 71e90dc2aeb..8fad9602542 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -731,7 +731,7 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
int bytes, i;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
entry = &leaf->entries[0];
bytes = sizeof(struct xfs_attr_sf_hdr);
@@ -777,7 +777,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
ASSERT(bp != NULL);
memcpy(tmpbuffer, bp->data, XFS_LBSIZE(dp->i_mount));
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
memset(bp->data, 0, XFS_LBSIZE(dp->i_mount));
/*
@@ -872,7 +872,7 @@ xfs_attr_leaf_to_node(xfs_da_args_t *args)
goto out;
node = bp1->data;
leaf = bp2->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
/* both on-disk, don't endian-flip twice */
node->btree[0].hashval =
leaf->entries[be16_to_cpu(leaf->hdr.count)-1 ].hashval;
@@ -997,7 +997,7 @@ xfs_attr_leaf_add(xfs_dabuf_t *bp, xfs_da_args_t *args)
int tablesize, entsize, sum, tmp, i;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT((args->index >= 0)
&& (args->index <= be16_to_cpu(leaf->hdr.count)));
hdr = &leaf->hdr;
@@ -1070,7 +1070,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
int tmp, i;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
hdr = &leaf->hdr;
ASSERT((mapindex >= 0) && (mapindex < XFS_ATTR_LEAF_MAPSIZE));
ASSERT((args->index >= 0) && (args->index <= be16_to_cpu(hdr->count)));
@@ -1256,8 +1256,8 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
leaf1 = blk1->bp->data;
leaf2 = blk2->bp->data;
- ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
+ ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
args = state->args;
/*
@@ -1533,7 +1533,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
*/
blk = &state->path.blk[ state->path.active-1 ];
info = blk->bp->data;
- ASSERT(be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
leaf = (xfs_attr_leafblock_t *)info;
count = be16_to_cpu(leaf->hdr.count);
bytes = sizeof(xfs_attr_leaf_hdr_t) +
@@ -1596,7 +1596,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
bytes = state->blocksize - (state->blocksize>>2);
bytes -= be16_to_cpu(leaf->hdr.usedbytes);
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
count += be16_to_cpu(leaf->hdr.count);
bytes -= be16_to_cpu(leaf->hdr.usedbytes);
bytes -= count * sizeof(xfs_attr_leaf_entry_t);
@@ -1650,7 +1650,7 @@ xfs_attr_leaf_remove(xfs_dabuf_t *bp, xfs_da_args_t *args)
xfs_mount_t *mp;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
hdr = &leaf->hdr;
mp = args->trans->t_mountp;
ASSERT((be16_to_cpu(hdr->count) > 0)
@@ -1813,8 +1813,8 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
ASSERT(save_blk->magic == XFS_ATTR_LEAF_MAGIC);
drop_leaf = drop_blk->bp->data;
save_leaf = save_blk->bp->data;
- ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
+ ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
drop_hdr = &drop_leaf->hdr;
save_hdr = &save_leaf->hdr;
@@ -1915,7 +1915,7 @@ xfs_attr_leaf_lookup_int(xfs_dabuf_t *bp, xfs_da_args_t *args)
xfs_dahash_t hashval;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(be16_to_cpu(leaf->hdr.count)
< (XFS_LBSIZE(args->dp->i_mount)/8));
@@ -2019,7 +2019,7 @@ xfs_attr_leaf_getvalue(xfs_dabuf_t *bp, xfs_da_args_t *args)
xfs_attr_leaf_name_remote_t *name_rmt;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(be16_to_cpu(leaf->hdr.count)
< (XFS_LBSIZE(args->dp->i_mount)/8));
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
@@ -2087,8 +2087,8 @@ xfs_attr_leaf_moveents(xfs_attr_leafblock_t *leaf_s, int start_s,
/*
* Set up environment.
*/
- ASSERT(be16_to_cpu(leaf_s->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
- ASSERT(be16_to_cpu(leaf_d->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf_s->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
+ ASSERT(leaf_d->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
hdr_s = &leaf_s->hdr;
hdr_d = &leaf_d->hdr;
ASSERT((be16_to_cpu(hdr_s->count) > 0) &&
@@ -2222,8 +2222,8 @@ xfs_attr_leaf_order(xfs_dabuf_t *leaf1_bp, xfs_dabuf_t *leaf2_bp)
leaf1 = leaf1_bp->data;
leaf2 = leaf2_bp->data;
- ASSERT((be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC) &&
- (be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC));
+ ASSERT((leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) &&
+ (leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)));
if ((be16_to_cpu(leaf1->hdr.count) > 0) &&
(be16_to_cpu(leaf2->hdr.count) > 0) &&
((be32_to_cpu(leaf2->entries[0].hashval) <
@@ -2246,7 +2246,7 @@ xfs_attr_leaf_lasthash(xfs_dabuf_t *bp, int *count)
xfs_attr_leafblock_t *leaf;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
if (count)
*count = be16_to_cpu(leaf->hdr.count);
if (!leaf->hdr.count)
@@ -2265,7 +2265,7 @@ xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
xfs_attr_leaf_name_remote_t *name_rmt;
int size;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
if (leaf->entries[index].flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr_leaf_name_local(leaf, index);
size = xfs_attr_leaf_entsize_local(name_loc->namelen,
@@ -2451,7 +2451,7 @@ xfs_attr_leaf_clearflag(xfs_da_args_t *args)
ASSERT(bp != NULL);
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
ASSERT(args->index >= 0);
entry = &leaf->entries[ args->index ];
@@ -2515,7 +2515,7 @@ xfs_attr_leaf_setflag(xfs_da_args_t *args)
ASSERT(bp != NULL);
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(args->index < be16_to_cpu(leaf->hdr.count));
ASSERT(args->index >= 0);
entry = &leaf->entries[ args->index ];
@@ -2585,13 +2585,13 @@ xfs_attr_leaf_flipflags(xfs_da_args_t *args)
}
leaf1 = bp1->data;
- ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(args->index < be16_to_cpu(leaf1->hdr.count));
ASSERT(args->index >= 0);
entry1 = &leaf1->entries[ args->index ];
leaf2 = bp2->data;
- ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
ASSERT(args->index2 < be16_to_cpu(leaf2->hdr.count));
ASSERT(args->index2 >= 0);
entry2 = &leaf2->entries[ args->index2 ];
@@ -2689,9 +2689,9 @@ xfs_attr_root_inactive(xfs_trans_t **trans, xfs_inode_t *dp)
* This is a depth-first traversal!
*/
info = bp->data;
- if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) {
+ if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
error = xfs_attr_node_inactive(trans, dp, bp, 1);
- } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) {
+ } else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) {
error = xfs_attr_leaf_inactive(trans, dp, bp);
} else {
error = XFS_ERROR(EIO);
@@ -2739,7 +2739,7 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
}
node = bp->data;
- ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
parent_blkno = xfs_da_blkno(bp); /* save for re-read later */
count = be16_to_cpu(node->hdr.count);
if (!count) {
@@ -2773,10 +2773,10 @@ xfs_attr_node_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp,
* Invalidate the subtree, however we have to.
*/
info = child_bp->data;
- if (be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC) {
+ if (info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
error = xfs_attr_node_inactive(trans, dp,
child_bp, level+1);
- } else if (be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC) {
+ } else if (info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)) {
error = xfs_attr_leaf_inactive(trans, dp,
child_bp);
} else {
@@ -2836,7 +2836,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
int error, count, size, tmp, i;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
/*
* Count the number of "remote" value extents.
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index e546a33214c..c51a3f90363 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -29,15 +29,11 @@
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_mount.h"
#include "xfs_itable.h"
-#include "xfs_dir2_data.h"
-#include "xfs_dir2_leaf.h"
-#include "xfs_dir2_block.h"
#include "xfs_inode_item.h"
#include "xfs_extfree_item.h"
#include "xfs_alloc.h"
@@ -94,6 +90,7 @@ xfs_bmap_add_attrfork_local(
*/
STATIC int /* error */
xfs_bmap_add_extent_delay_real(
+ struct xfs_trans *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_extnum_t *idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
@@ -439,6 +436,7 @@ xfs_bmap_add_attrfork_local(
*/
STATIC int /* error */
xfs_bmap_add_extent(
+ struct xfs_trans *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_extnum_t *idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
@@ -524,7 +522,7 @@ xfs_bmap_add_extent(
if (cur)
ASSERT(cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL);
- error = xfs_bmap_add_extent_delay_real(ip,
+ error = xfs_bmap_add_extent_delay_real(tp, ip,
idx, &cur, new, &da_new,
first, flist, &logflags);
} else {
@@ -561,7 +559,7 @@ xfs_bmap_add_extent(
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(ip->i_transp, ip, first,
+ error = xfs_bmap_extents_to_btree(tp, ip, first,
flist, &cur, da_old > 0, &tmp_logflags, whichfork);
logflags |= tmp_logflags;
if (error)
@@ -604,6 +602,7 @@ done:
*/
STATIC int /* error */
xfs_bmap_add_extent_delay_real(
+ struct xfs_trans *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_extnum_t *idx, /* extent number to update/insert */
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
@@ -901,7 +900,7 @@ xfs_bmap_add_extent_delay_real(
}
if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
ip->i_d.di_nextents > ip->i_df.if_ext_max) {
- error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
+ error = xfs_bmap_extents_to_btree(tp, ip,
first, flist, &cur, 1, &tmp_rval,
XFS_DATA_FORK);
rval |= tmp_rval;
@@ -984,7 +983,7 @@ xfs_bmap_add_extent_delay_real(
}
if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
ip->i_d.di_nextents > ip->i_df.if_ext_max) {
- error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
+ error = xfs_bmap_extents_to_btree(tp, ip,
first, flist, &cur, 1, &tmp_rval,
XFS_DATA_FORK);
rval |= tmp_rval;
@@ -1052,7 +1051,7 @@ xfs_bmap_add_extent_delay_real(
}
if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
ip->i_d.di_nextents > ip->i_df.if_ext_max) {
- error = xfs_bmap_extents_to_btree(ip->i_transp, ip,
+ error = xfs_bmap_extents_to_btree(tp, ip,
first, flist, &cur, 1, &tmp_rval,
XFS_DATA_FORK);
rval |= tmp_rval;
@@ -2871,8 +2870,8 @@ xfs_bmap_del_extent(
len = del->br_blockcount;
do_div(bno, mp->m_sb.sb_rextsize);
do_div(len, mp->m_sb.sb_rextsize);
- if ((error = xfs_rtfree_extent(ip->i_transp, bno,
- (xfs_extlen_t)len)))
+ error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
+ if (error)
goto done;
do_fx = 0;
nblks = len * mp->m_sb.sb_rextsize;
@@ -4080,7 +4079,7 @@ xfs_bmap_sanity_check(
{
struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
- if (be32_to_cpu(block->bb_magic) != XFS_BMAP_MAGIC ||
+ if (block->bb_magic != cpu_to_be32(XFS_BMAP_MAGIC) ||
be16_to_cpu(block->bb_level) != level ||
be16_to_cpu(block->bb_numrecs) == 0 ||
be16_to_cpu(block->bb_numrecs) > mp->m_bmap_dmxr[level != 0])
@@ -4662,7 +4661,7 @@ xfs_bmapi(
if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
got.br_state = XFS_EXT_UNWRITTEN;
}
- error = xfs_bmap_add_extent(ip, &lastx, &cur, &got,
+ error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, &got,
firstblock, flist, &tmp_logflags,
whichfork);
logflags |= tmp_logflags;
@@ -4763,7 +4762,7 @@ xfs_bmapi(
mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
? XFS_EXT_NORM
: XFS_EXT_UNWRITTEN;
- error = xfs_bmap_add_extent(ip, &lastx, &cur, mval,
+ error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, mval,
firstblock, flist, &tmp_logflags,
whichfork);
logflags |= tmp_logflags;
@@ -5117,7 +5116,7 @@ xfs_bunmapi(
del.br_blockcount = mod;
}
del.br_state = XFS_EXT_UNWRITTEN;
- error = xfs_bmap_add_extent(ip, &lastx, &cur, &del,
+ error = xfs_bmap_add_extent(tp, ip, &lastx, &cur, &del,
firstblock, flist, &logflags,
XFS_DATA_FORK);
if (error)
@@ -5175,18 +5174,18 @@ xfs_bunmapi(
}
prev.br_state = XFS_EXT_UNWRITTEN;
lastx--;
- error = xfs_bmap_add_extent(ip, &lastx, &cur,
- &prev, firstblock, flist, &logflags,
- XFS_DATA_FORK);
+ error = xfs_bmap_add_extent(tp, ip, &lastx,
+ &cur, &prev, firstblock, flist,
+ &logflags, XFS_DATA_FORK);
if (error)
goto error0;
goto nodelete;
} else {
ASSERT(del.br_state == XFS_EXT_NORM);
del.br_state = XFS_EXT_UNWRITTEN;
- error = xfs_bmap_add_extent(ip, &lastx, &cur,
- &del, firstblock, flist, &logflags,
- XFS_DATA_FORK);
+ error = xfs_bmap_add_extent(tp, ip, &lastx,
+ &cur, &del, firstblock, flist,
+ &logflags, XFS_DATA_FORK);
if (error)
goto error0;
goto nodelete;
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 87d3c10b695..e2f5d59cbea 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -33,7 +33,6 @@
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_btree.h"
-#include "xfs_btree_trace.h"
#include "xfs_itable.h"
#include "xfs_bmap.h"
#include "xfs_error.h"
@@ -425,10 +424,10 @@ xfs_bmbt_to_bmdr(
xfs_bmbt_key_t *tkp;
__be64 *tpp;
- ASSERT(be32_to_cpu(rblock->bb_magic) == XFS_BMAP_MAGIC);
- ASSERT(be64_to_cpu(rblock->bb_u.l.bb_leftsib) == NULLDFSBNO);
- ASSERT(be64_to_cpu(rblock->bb_u.l.bb_rightsib) == NULLDFSBNO);
- ASSERT(be16_to_cpu(rblock->bb_level) > 0);
+ ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
+ ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO));
+ ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO));
+ ASSERT(rblock->bb_level != 0);
dblock->bb_level = rblock->bb_level;
dblock->bb_numrecs = rblock->bb_numrecs;
dmxr = xfs_bmdr_maxrecs(mp, dblocklen, 0);
@@ -732,95 +731,6 @@ xfs_bmbt_recs_inorder(
}
#endif /* DEBUG */
-#ifdef XFS_BTREE_TRACE
-ktrace_t *xfs_bmbt_trace_buf;
-
-STATIC void
-xfs_bmbt_trace_enter(
- struct xfs_btree_cur *cur,
- const char *func,
- char *s,
- int type,
- int line,
- __psunsigned_t a0,
- __psunsigned_t a1,
- __psunsigned_t a2,
- __psunsigned_t a3,
- __psunsigned_t a4,
- __psunsigned_t a5,
- __psunsigned_t a6,
- __psunsigned_t a7,
- __psunsigned_t a8,
- __psunsigned_t a9,
- __psunsigned_t a10)
-{
- struct xfs_inode *ip = cur->bc_private.b.ip;
- int whichfork = cur->bc_private.b.whichfork;
-
- ktrace_enter(xfs_bmbt_trace_buf,
- (void *)((__psint_t)type | (whichfork << 8) | (line << 16)),
- (void *)func, (void *)s, (void *)ip, (void *)cur,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)a8, (void *)a9, (void *)a10);
-}
-
-STATIC void
-xfs_bmbt_trace_cursor(
- struct xfs_btree_cur *cur,
- __uint32_t *s0,
- __uint64_t *l0,
- __uint64_t *l1)
-{
- struct xfs_bmbt_rec_host r;
-
- xfs_bmbt_set_all(&r, &cur->bc_rec.b);
-
- *s0 = (cur->bc_nlevels << 24) |
- (cur->bc_private.b.flags << 16) |
- cur->bc_private.b.allocated;
- *l0 = r.l0;
- *l1 = r.l1;
-}
-
-STATIC void
-xfs_bmbt_trace_key(
- struct xfs_btree_cur *cur,
- union xfs_btree_key *key,
- __uint64_t *l0,
- __uint64_t *l1)
-{
- *l0 = be64_to_cpu(key->bmbt.br_startoff);
- *l1 = 0;
-}
-
-/* Endian flipping versions of the bmbt extraction functions */
-STATIC void
-xfs_bmbt_disk_get_all(
- xfs_bmbt_rec_t *r,
- xfs_bmbt_irec_t *s)
-{
- __xfs_bmbt_get_all(get_unaligned_be64(&r->l0),
- get_unaligned_be64(&r->l1), s);
-}
-
-STATIC void
-xfs_bmbt_trace_record(
- struct xfs_btree_cur *cur,
- union xfs_btree_rec *rec,
- __uint64_t *l0,
- __uint64_t *l1,
- __uint64_t *l2)
-{
- struct xfs_bmbt_irec irec;
-
- xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
- *l0 = irec.br_startoff;
- *l1 = irec.br_startblock;
- *l2 = irec.br_blockcount;
-}
-#endif /* XFS_BTREE_TRACE */
-
static const struct xfs_btree_ops xfs_bmbt_ops = {
.rec_len = sizeof(xfs_bmbt_rec_t),
.key_len = sizeof(xfs_bmbt_key_t),
@@ -837,18 +747,10 @@ static const struct xfs_btree_ops xfs_bmbt_ops = {
.init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
.key_diff = xfs_bmbt_key_diff,
-
#ifdef DEBUG
.keys_inorder = xfs_bmbt_keys_inorder,
.recs_inorder = xfs_bmbt_recs_inorder,
#endif
-
-#ifdef XFS_BTREE_TRACE
- .trace_enter = xfs_bmbt_trace_enter,
- .trace_cursor = xfs_bmbt_trace_cursor,
- .trace_key = xfs_bmbt_trace_key,
- .trace_record = xfs_bmbt_trace_record,
-#endif
};
/*
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 2f9e97c128a..cabf4b5604a 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -32,7 +32,6 @@
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_btree.h"
-#include "xfs_btree_trace.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@@ -66,11 +65,11 @@ xfs_btree_check_lblock(
be16_to_cpu(block->bb_numrecs) <=
cur->bc_ops->get_maxrecs(cur, level) &&
block->bb_u.l.bb_leftsib &&
- (be64_to_cpu(block->bb_u.l.bb_leftsib) == NULLDFSBNO ||
+ (block->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO) ||
XFS_FSB_SANITY_CHECK(mp,
be64_to_cpu(block->bb_u.l.bb_leftsib))) &&
block->bb_u.l.bb_rightsib &&
- (be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO ||
+ (block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO) ||
XFS_FSB_SANITY_CHECK(mp,
be64_to_cpu(block->bb_u.l.bb_rightsib)));
if (unlikely(XFS_TEST_ERROR(!lblock_ok, mp,
@@ -105,10 +104,10 @@ xfs_btree_check_sblock(
be16_to_cpu(block->bb_level) == level &&
be16_to_cpu(block->bb_numrecs) <=
cur->bc_ops->get_maxrecs(cur, level) &&
- (be32_to_cpu(block->bb_u.s.bb_leftsib) == NULLAGBLOCK ||
+ (block->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) ||
be32_to_cpu(block->bb_u.s.bb_leftsib) < agflen) &&
block->bb_u.s.bb_leftsib &&
- (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK ||
+ (block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK) ||
be32_to_cpu(block->bb_u.s.bb_rightsib) < agflen) &&
block->bb_u.s.bb_rightsib;
if (unlikely(XFS_TEST_ERROR(!sblock_ok, cur->bc_mp,
@@ -511,9 +510,9 @@ xfs_btree_islastblock(
block = xfs_btree_get_block(cur, level, &bp);
xfs_btree_check_block(cur, block, level, bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
- return be64_to_cpu(block->bb_u.l.bb_rightsib) == NULLDFSBNO;
+ return block->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO);
else
- return be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK;
+ return block->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK);
}
/*
@@ -777,14 +776,14 @@ xfs_btree_setbuf(
b = XFS_BUF_TO_BLOCK(bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
- if (be64_to_cpu(b->bb_u.l.bb_leftsib) == NULLDFSBNO)
+ if (b->bb_u.l.bb_leftsib == cpu_to_be64(NULLDFSBNO))
cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
- if (be64_to_cpu(b->bb_u.l.bb_rightsib) == NULLDFSBNO)
+ if (b->bb_u.l.bb_rightsib == cpu_to_be64(NULLDFSBNO))
cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
} else {
- if (be32_to_cpu(b->bb_u.s.bb_leftsib) == NULLAGBLOCK)
+ if (b->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK))
cur->bc_ra[lev] |= XFS_BTCUR_LEFTRA;
- if (be32_to_cpu(b->bb_u.s.bb_rightsib) == NULLAGBLOCK)
+ if (b->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
cur->bc_ra[lev] |= XFS_BTCUR_RIGHTRA;
}
}
@@ -795,9 +794,9 @@ xfs_btree_ptr_is_null(
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
- return be64_to_cpu(ptr->l) == NULLDFSBNO;
+ return ptr->l == cpu_to_be64(NULLDFSBNO);
else
- return be32_to_cpu(ptr->s) == NULLAGBLOCK;
+ return ptr->s == cpu_to_be32(NULLAGBLOCK);
}
STATIC void
@@ -923,12 +922,12 @@ xfs_btree_ptr_to_daddr(
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
- ASSERT(be64_to_cpu(ptr->l) != NULLDFSBNO);
+ ASSERT(ptr->l != cpu_to_be64(NULLDFSBNO));
return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
} else {
ASSERT(cur->bc_private.a.agno != NULLAGNUMBER);
- ASSERT(be32_to_cpu(ptr->s) != NULLAGBLOCK);
+ ASSERT(ptr->s != cpu_to_be32(NULLAGBLOCK));
return XFS_AGB_TO_DADDR(cur->bc_mp, cur->bc_private.a.agno,
be32_to_cpu(ptr->s));
diff --git a/fs/xfs/xfs_btree.h b/fs/xfs/xfs_btree.h
index 82fafc66bd1..8d05a6a46ce 100644
--- a/fs/xfs/xfs_btree.h
+++ b/fs/xfs/xfs_btree.h
@@ -199,25 +199,6 @@ struct xfs_btree_ops {
union xfs_btree_rec *r1,
union xfs_btree_rec *r2);
#endif
-
- /* btree tracing */
-#ifdef XFS_BTREE_TRACE
- void (*trace_enter)(struct xfs_btree_cur *, const char *,
- char *, int, int, __psunsigned_t,
- __psunsigned_t, __psunsigned_t,
- __psunsigned_t, __psunsigned_t,
- __psunsigned_t, __psunsigned_t,
- __psunsigned_t, __psunsigned_t,
- __psunsigned_t, __psunsigned_t);
- void (*trace_cursor)(struct xfs_btree_cur *, __uint32_t *,
- __uint64_t *, __uint64_t *);
- void (*trace_key)(struct xfs_btree_cur *,
- union xfs_btree_key *, __uint64_t *,
- __uint64_t *);
- void (*trace_record)(struct xfs_btree_cur *,
- union xfs_btree_rec *, __uint64_t *,
- __uint64_t *, __uint64_t *);
-#endif
};
/*
@@ -452,4 +433,23 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block)
(XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks)
+/*
+ * Trace hooks. Currently not implemented as they need to be ported
+ * over to the generic tracing functionality, which is some effort.
+ *
+ * i,j = integer (32 bit)
+ * b = btree block buffer (xfs_buf_t)
+ * p = btree ptr
+ * r = btree record
+ * k = btree key
+ */
+#define XFS_BTREE_TRACE_ARGBI(c, b, i)
+#define XFS_BTREE_TRACE_ARGBII(c, b, i, j)
+#define XFS_BTREE_TRACE_ARGI(c, i)
+#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s)
+#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r)
+#define XFS_BTREE_TRACE_ARGIK(c, i, k)
+#define XFS_BTREE_TRACE_ARGR(c, r)
+#define XFS_BTREE_TRACE_CURSOR(c, t)
+
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/xfs_btree_trace.c b/fs/xfs/xfs_btree_trace.c
deleted file mode 100644
index 44ff942a0fd..00000000000
--- a/fs/xfs/xfs_btree_trace.c
+++ /dev/null
@@ -1,249 +0,0 @@
-/*
- * Copyright (c) 2008 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#include "xfs.h"
-#include "xfs_types.h"
-#include "xfs_inum.h"
-#include "xfs_bmap_btree.h"
-#include "xfs_alloc_btree.h"
-#include "xfs_ialloc_btree.h"
-#include "xfs_inode.h"
-#include "xfs_btree.h"
-#include "xfs_btree_trace.h"
-
-STATIC void
-xfs_btree_trace_ptr(
- struct xfs_btree_cur *cur,
- union xfs_btree_ptr ptr,
- __psunsigned_t *high,
- __psunsigned_t *low)
-{
- if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
- __u64 val = be64_to_cpu(ptr.l);
- *high = val >> 32;
- *low = (int)val;
- } else {
- *high = 0;
- *low = be32_to_cpu(ptr.s);
- }
-}
-
-/*
- * Add a trace buffer entry for arguments, for a buffer & 1 integer arg.
- */
-void
-xfs_btree_trace_argbi(
- const char *func,
- struct xfs_btree_cur *cur,
- struct xfs_buf *b,
- int i,
- int line)
-{
- cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGBI,
- line, (__psunsigned_t)b, i, 0, 0, 0, 0, 0,
- 0, 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for a buffer & 2 integer args.
- */
-void
-xfs_btree_trace_argbii(
- const char *func,
- struct xfs_btree_cur *cur,
- struct xfs_buf *b,
- int i0,
- int i1,
- int line)
-{
- cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGBII,
- line, (__psunsigned_t)b, i0, i1, 0, 0, 0, 0,
- 0, 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for 3 block-length args
- * and an integer arg.
- */
-void
-xfs_btree_trace_argfffi(
- const char *func,
- struct xfs_btree_cur *cur,
- xfs_dfiloff_t o,
- xfs_dfsbno_t b,
- xfs_dfilblks_t i,
- int j,
- int line)
-{
- cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGFFFI,
- line,
- o >> 32, (int)o,
- b >> 32, (int)b,
- i >> 32, (int)i,
- (int)j, 0, 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for one integer arg.
- */
-void
-xfs_btree_trace_argi(
- const char *func,
- struct xfs_btree_cur *cur,
- int i,
- int line)
-{
- cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGI,
- line, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for int, fsblock, key.
- */
-void
-xfs_btree_trace_argipk(
- const char *func,
- struct xfs_btree_cur *cur,
- int i,
- union xfs_btree_ptr ptr,
- union xfs_btree_key *key,
- int line)
-{
- __psunsigned_t high, low;
- __uint64_t l0, l1;
-
- xfs_btree_trace_ptr(cur, ptr, &high, &low);
- cur->bc_ops->trace_key(cur, key, &l0, &l1);
- cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIPK,
- line, i, high, low,
- l0 >> 32, (int)l0,
- l1 >> 32, (int)l1,
- 0, 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for int, fsblock, rec.
- */
-void
-xfs_btree_trace_argipr(
- const char *func,
- struct xfs_btree_cur *cur,
- int i,
- union xfs_btree_ptr ptr,
- union xfs_btree_rec *rec,
- int line)
-{
- __psunsigned_t high, low;
- __uint64_t l0, l1, l2;
-
- xfs_btree_trace_ptr(cur, ptr, &high, &low);
- cur->bc_ops->trace_record(cur, rec, &l0, &l1, &l2);
- cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIPR,
- line, i,
- high, low,
- l0 >> 32, (int)l0,
- l1 >> 32, (int)l1,
- l2 >> 32, (int)l2,
- 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for int, key.
- */
-void
-xfs_btree_trace_argik(
- const char *func,
- struct xfs_btree_cur *cur,
- int i,
- union xfs_btree_key *key,
- int line)
-{
- __uint64_t l0, l1;
-
- cur->bc_ops->trace_key(cur, key, &l0, &l1);
- cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGIK,
- line, i,
- l0 >> 32, (int)l0,
- l1 >> 32, (int)l1,
- 0, 0, 0, 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for arguments, for record.
- */
-void
-xfs_btree_trace_argr(
- const char *func,
- struct xfs_btree_cur *cur,
- union xfs_btree_rec *rec,
- int line)
-{
- __uint64_t l0, l1, l2;
-
- cur->bc_ops->trace_record(cur, rec, &l0, &l1, &l2);
- cur->bc_ops->trace_enter(cur, func, XBT_ARGS, XFS_BTREE_KTRACE_ARGR,
- line,
- l0 >> 32, (int)l0,
- l1 >> 32, (int)l1,
- l2 >> 32, (int)l2,
- 0, 0, 0, 0, 0);
-}
-
-/*
- * Add a trace buffer entry for the cursor/operation.
- */
-void
-xfs_btree_trace_cursor(
- const char *func,
- struct xfs_btree_cur *cur,
- int type,
- int line)
-{
- __uint32_t s0;
- __uint64_t l0, l1;
- char *s;
-
- switch (type) {
- case XBT_ARGS:
- s = "args";
- break;
- case XBT_ENTRY:
- s = "entry";
- break;
- case XBT_ERROR:
- s = "error";
- break;
- case XBT_EXIT:
- s = "exit";
- break;
- default:
- s = "unknown";
- break;
- }
-
- cur->bc_ops->trace_cursor(cur, &s0, &l0, &l1);
- cur->bc_ops->trace_enter(cur, func, s, XFS_BTREE_KTRACE_CUR, line,
- s0,
- l0 >> 32, (int)l0,
- l1 >> 32, (int)l1,
- (__psunsigned_t)cur->bc_bufs[0],
- (__psunsigned_t)cur->bc_bufs[1],
- (__psunsigned_t)cur->bc_bufs[2],
- (__psunsigned_t)cur->bc_bufs[3],
- (cur->bc_ptrs[0] << 16) | cur->bc_ptrs[1],
- (cur->bc_ptrs[2] << 16) | cur->bc_ptrs[3]);
-}
diff --git a/fs/xfs/xfs_btree_trace.h b/fs/xfs/xfs_btree_trace.h
deleted file mode 100644
index 2d8a309873e..00000000000
--- a/fs/xfs/xfs_btree_trace.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2008 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_BTREE_TRACE_H__
-#define __XFS_BTREE_TRACE_H__
-
-struct xfs_btree_cur;
-struct xfs_buf;
-
-
-/*
- * Trace hooks.
- * i,j = integer (32 bit)
- * b = btree block buffer (xfs_buf_t)
- * p = btree ptr
- * r = btree record
- * k = btree key
- */
-
-#ifdef XFS_BTREE_TRACE
-
-/*
- * Trace buffer entry types.
- */
-#define XFS_BTREE_KTRACE_ARGBI 1
-#define XFS_BTREE_KTRACE_ARGBII 2
-#define XFS_BTREE_KTRACE_ARGFFFI 3
-#define XFS_BTREE_KTRACE_ARGI 4
-#define XFS_BTREE_KTRACE_ARGIPK 5
-#define XFS_BTREE_KTRACE_ARGIPR 6
-#define XFS_BTREE_KTRACE_ARGIK 7
-#define XFS_BTREE_KTRACE_ARGR 8
-#define XFS_BTREE_KTRACE_CUR 9
-
-/*
- * Sub-types for cursor traces.
- */
-#define XBT_ARGS 0
-#define XBT_ENTRY 1
-#define XBT_ERROR 2
-#define XBT_EXIT 3
-
-void xfs_btree_trace_argbi(const char *, struct xfs_btree_cur *,
- struct xfs_buf *, int, int);
-void xfs_btree_trace_argbii(const char *, struct xfs_btree_cur *,
- struct xfs_buf *, int, int, int);
-void xfs_btree_trace_argi(const char *, struct xfs_btree_cur *, int, int);
-void xfs_btree_trace_argipk(const char *, struct xfs_btree_cur *, int,
- union xfs_btree_ptr, union xfs_btree_key *, int);
-void xfs_btree_trace_argipr(const char *, struct xfs_btree_cur *, int,
- union xfs_btree_ptr, union xfs_btree_rec *, int);
-void xfs_btree_trace_argik(const char *, struct xfs_btree_cur *, int,
- union xfs_btree_key *, int);
-void xfs_btree_trace_argr(const char *, struct xfs_btree_cur *,
- union xfs_btree_rec *, int);
-void xfs_btree_trace_cursor(const char *, struct xfs_btree_cur *, int, int);
-
-#define XFS_BTREE_TRACE_ARGBI(c, b, i) \
- xfs_btree_trace_argbi(__func__, c, b, i, __LINE__)
-#define XFS_BTREE_TRACE_ARGBII(c, b, i, j) \
- xfs_btree_trace_argbii(__func__, c, b, i, j, __LINE__)
-#define XFS_BTREE_TRACE_ARGI(c, i) \
- xfs_btree_trace_argi(__func__, c, i, __LINE__)
-#define XFS_BTREE_TRACE_ARGIPK(c, i, p, k) \
- xfs_btree_trace_argipk(__func__, c, i, p, k, __LINE__)
-#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r) \
- xfs_btree_trace_argipr(__func__, c, i, p, r, __LINE__)
-#define XFS_BTREE_TRACE_ARGIK(c, i, k) \
- xfs_btree_trace_argik(__func__, c, i, k, __LINE__)
-#define XFS_BTREE_TRACE_ARGR(c, r) \
- xfs_btree_trace_argr(__func__, c, r, __LINE__)
-#define XFS_BTREE_TRACE_CURSOR(c, t) \
- xfs_btree_trace_cursor(__func__, c, t, __LINE__)
-#else
-#define XFS_BTREE_TRACE_ARGBI(c, b, i)
-#define XFS_BTREE_TRACE_ARGBII(c, b, i, j)
-#define XFS_BTREE_TRACE_ARGI(c, i)
-#define XFS_BTREE_TRACE_ARGIPK(c, i, p, s)
-#define XFS_BTREE_TRACE_ARGIPR(c, i, p, r)
-#define XFS_BTREE_TRACE_ARGIK(c, i, k)
-#define XFS_BTREE_TRACE_ARGR(c, r)
-#define XFS_BTREE_TRACE_CURSOR(c, t)
-#endif /* XFS_BTREE_TRACE */
-
-#endif /* __XFS_BTREE_TRACE_H__ */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 7b7e005e3dc..88492916c3d 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -90,13 +90,11 @@ xfs_buf_item_flush_log_debug(
uint first,
uint last)
{
- xfs_buf_log_item_t *bip;
+ xfs_buf_log_item_t *bip = bp->b_fspriv;
uint nbytes;
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
- if ((bip == NULL) || (bip->bli_item.li_type != XFS_LI_BUF)) {
+ if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
return;
- }
ASSERT(bip->bli_logged != NULL);
nbytes = last - first + 1;
@@ -408,7 +406,7 @@ xfs_buf_item_unpin(
int stale = bip->bli_flags & XFS_BLI_STALE;
int freed;
- ASSERT(XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *) == bip);
+ ASSERT(bp->b_fspriv == bip);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_buf_item_unpin(bip);
@@ -420,7 +418,7 @@ xfs_buf_item_unpin(
if (freed && stale) {
ASSERT(bip->bli_flags & XFS_BLI_STALE);
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+ ASSERT(xfs_buf_islocked(bp));
ASSERT(!(XFS_BUF_ISDELAYWRITE(bp)));
ASSERT(XFS_BUF_ISSTALE(bp));
ASSERT(bip->bli_format.blf_flags & XFS_BLF_CANCEL);
@@ -443,7 +441,7 @@ xfs_buf_item_unpin(
* Since the transaction no longer refers to the buffer,
* the buffer should no longer refer to the transaction.
*/
- XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+ bp->b_transp = NULL;
}
/*
@@ -454,13 +452,13 @@ xfs_buf_item_unpin(
*/
if (bip->bli_flags & XFS_BLI_STALE_INODE) {
xfs_buf_do_callbacks(bp);
- XFS_BUF_SET_FSPRIVATE(bp, NULL);
- XFS_BUF_CLR_IODONE_FUNC(bp);
+ bp->b_fspriv = NULL;
+ bp->b_iodone = NULL;
} else {
spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, (xfs_log_item_t *)bip);
xfs_buf_item_relse(bp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) == NULL);
+ ASSERT(bp->b_fspriv == NULL);
}
xfs_buf_relse(bp);
}
@@ -483,7 +481,7 @@ xfs_buf_item_trylock(
if (XFS_BUF_ISPINNED(bp))
return XFS_ITEM_PINNED;
- if (!XFS_BUF_CPSEMA(bp))
+ if (!xfs_buf_trylock(bp))
return XFS_ITEM_LOCKED;
/* take a reference to the buffer. */
@@ -525,7 +523,7 @@ xfs_buf_item_unlock(
uint hold;
/* Clear the buffer's association with this transaction. */
- XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+ bp->b_transp = NULL;
/*
* If this is a transaction abort, don't return early. Instead, allow
@@ -684,7 +682,7 @@ xfs_buf_item_init(
xfs_buf_t *bp,
xfs_mount_t *mp)
{
- xfs_log_item_t *lip;
+ xfs_log_item_t *lip = bp->b_fspriv;
xfs_buf_log_item_t *bip;
int chunks;
int map_size;
@@ -696,12 +694,8 @@ xfs_buf_item_init(
* nothing to do here so return.
*/
ASSERT(bp->b_target->bt_mount == mp);
- if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
- lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
- if (lip->li_type == XFS_LI_BUF) {
- return;
- }
- }
+ if (lip != NULL && lip->li_type == XFS_LI_BUF)
+ return;
/*
* chunks is the number of XFS_BLF_CHUNK size pieces
@@ -740,11 +734,9 @@ xfs_buf_item_init(
* Put the buf item into the list of items attached to the
* buffer at the front.
*/
- if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
- bip->bli_item.li_bio_list =
- XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
- }
- XFS_BUF_SET_FSPRIVATE(bp, bip);
+ if (bp->b_fspriv)
+ bip->bli_item.li_bio_list = bp->b_fspriv;
+ bp->b_fspriv = bip;
}
@@ -876,12 +868,11 @@ xfs_buf_item_relse(
trace_xfs_buf_item_relse(bp, _RET_IP_);
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
- XFS_BUF_SET_FSPRIVATE(bp, bip->bli_item.li_bio_list);
- if ((XFS_BUF_FSPRIVATE(bp, void *) == NULL) &&
- (XFS_BUF_IODONE_FUNC(bp) != NULL)) {
- XFS_BUF_CLR_IODONE_FUNC(bp);
- }
+ bip = bp->b_fspriv;
+ bp->b_fspriv = bip->bli_item.li_bio_list;
+ if (bp->b_fspriv == NULL)
+ bp->b_iodone = NULL;
+
xfs_buf_rele(bp);
xfs_buf_item_free(bip);
}
@@ -905,20 +896,20 @@ xfs_buf_attach_iodone(
xfs_log_item_t *head_lip;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+ ASSERT(xfs_buf_islocked(bp));
lip->li_cb = cb;
- if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
- head_lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+ head_lip = bp->b_fspriv;
+ if (head_lip) {
lip->li_bio_list = head_lip->li_bio_list;
head_lip->li_bio_list = lip;
} else {
- XFS_BUF_SET_FSPRIVATE(bp, lip);
+ bp->b_fspriv = lip;
}
- ASSERT((XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks) ||
- (XFS_BUF_IODONE_FUNC(bp) == NULL));
- XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
+ ASSERT(bp->b_iodone == NULL ||
+ bp->b_iodone == xfs_buf_iodone_callbacks);
+ bp->b_iodone = xfs_buf_iodone_callbacks;
}
/*
@@ -939,8 +930,8 @@ xfs_buf_do_callbacks(
{
struct xfs_log_item *lip;
- while ((lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *)) != NULL) {
- XFS_BUF_SET_FSPRIVATE(bp, lip->li_bio_list);
+ while ((lip = bp->b_fspriv) != NULL) {
+ bp->b_fspriv = lip->li_bio_list;
ASSERT(lip->li_cb != NULL);
/*
* Clear the next pointer so we don't have any
@@ -1007,7 +998,7 @@ xfs_buf_iodone_callbacks(
XFS_BUF_DONE(bp);
XFS_BUF_SET_START(bp);
}
- ASSERT(XFS_BUF_IODONE_FUNC(bp));
+ ASSERT(bp->b_iodone != NULL);
trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
xfs_buf_relse(bp);
return;
@@ -1026,8 +1017,8 @@ xfs_buf_iodone_callbacks(
do_callbacks:
xfs_buf_do_callbacks(bp);
- XFS_BUF_SET_FSPRIVATE(bp, NULL);
- XFS_BUF_CLR_IODONE_FUNC(bp);
+ bp->b_fspriv = NULL;
+ bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 6102ac6d1df..2925726529f 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -24,11 +24,12 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
+#include "xfs_dir2.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2_priv.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
@@ -36,10 +37,6 @@
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
-#include "xfs_dir2_data.h"
-#include "xfs_dir2_leaf.h"
-#include "xfs_dir2_block.h"
-#include "xfs_dir2_node.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@@ -89,7 +86,7 @@ STATIC void xfs_da_node_unbalance(xfs_da_state_t *state,
*/
STATIC uint xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count);
STATIC int xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp);
-STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra);
+STATIC xfs_dabuf_t *xfs_da_buf_make(int nbuf, xfs_buf_t **bps);
STATIC int xfs_da_blk_unlink(xfs_da_state_t *state,
xfs_da_state_blk_t *drop_blk,
xfs_da_state_blk_t *save_blk);
@@ -321,11 +318,11 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
ASSERT(bp != NULL);
node = bp->data;
oldroot = blk1->bp->data;
- if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC) {
+ if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC)) {
size = (int)((char *)&oldroot->btree[be16_to_cpu(oldroot->hdr.count)] -
(char *)oldroot);
} else {
- ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
leaf = (xfs_dir2_leaf_t *)oldroot;
size = (int)((char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] -
(char *)leaf);
@@ -352,7 +349,7 @@ xfs_da_root_split(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
node->hdr.count = cpu_to_be16(2);
#ifdef DEBUG
- if (be16_to_cpu(oldroot->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC) {
+ if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
ASSERT(blk1->blkno >= mp->m_dirleafblk &&
blk1->blkno < mp->m_dirfreeblk);
ASSERT(blk2->blkno >= mp->m_dirleafblk &&
@@ -384,7 +381,7 @@ xfs_da_node_split(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
int useextra;
node = oldblk->bp->data;
- ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
/*
* With V2 dirs the extra block is data or freespace.
@@ -483,8 +480,8 @@ xfs_da_node_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
node1 = node2;
node2 = tmpnode;
}
- ASSERT(be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC);
- ASSERT(be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
+ ASSERT(node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
count = (be16_to_cpu(node1->hdr.count) - be16_to_cpu(node2->hdr.count)) / 2;
if (count == 0)
return;
@@ -578,7 +575,7 @@ xfs_da_node_add(xfs_da_state_t *state, xfs_da_state_blk_t *oldblk,
int tmp;
node = oldblk->bp->data;
- ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
ASSERT((oldblk->index >= 0) && (oldblk->index <= be16_to_cpu(node->hdr.count)));
ASSERT(newblk->blkno != 0);
if (state->args->whichfork == XFS_DATA_FORK)
@@ -714,7 +711,7 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
ASSERT(args != NULL);
ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
oldroot = root_blk->bp->data;
- ASSERT(be16_to_cpu(oldroot->hdr.info.magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
ASSERT(!oldroot->hdr.info.forw);
ASSERT(!oldroot->hdr.info.back);
@@ -737,10 +734,10 @@ xfs_da_root_join(xfs_da_state_t *state, xfs_da_state_blk_t *root_blk)
ASSERT(bp != NULL);
blkinfo = bp->data;
if (be16_to_cpu(oldroot->hdr.level) == 1) {
- ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DIR2_LEAFN_MAGIC ||
- be16_to_cpu(blkinfo->magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(blkinfo->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
+ blkinfo->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
} else {
- ASSERT(be16_to_cpu(blkinfo->magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(blkinfo->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
}
ASSERT(!blkinfo->forw);
ASSERT(!blkinfo->back);
@@ -776,7 +773,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
*/
blk = &state->path.blk[ state->path.active-1 ];
info = blk->bp->data;
- ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
node = (xfs_da_intnode_t *)info;
count = be16_to_cpu(node->hdr.count);
if (count > (state->node_ents >> 1)) {
@@ -836,7 +833,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
count -= state->node_ents >> 2;
count -= be16_to_cpu(node->hdr.count);
node = bp->data;
- ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
count -= be16_to_cpu(node->hdr.count);
xfs_da_brelse(state->args->trans, bp);
if (count >= 0)
@@ -911,7 +908,7 @@ xfs_da_fixhashpath(xfs_da_state_t *state, xfs_da_state_path_t *path)
}
for (blk--, level--; level >= 0; blk--, level--) {
node = blk->bp->data;
- ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
btree = &node->btree[ blk->index ];
if (be32_to_cpu(btree->hashval) == lasthash)
break;
@@ -979,8 +976,8 @@ xfs_da_node_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
drop_node = drop_blk->bp->data;
save_node = save_blk->bp->data;
- ASSERT(be16_to_cpu(drop_node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
- ASSERT(be16_to_cpu(save_node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(drop_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
+ ASSERT(save_node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
tp = state->args->trans;
/*
@@ -1278,8 +1275,8 @@ xfs_da_node_order(xfs_dabuf_t *node1_bp, xfs_dabuf_t *node2_bp)
node1 = node1_bp->data;
node2 = node2_bp->data;
- ASSERT((be16_to_cpu(node1->hdr.info.magic) == XFS_DA_NODE_MAGIC) &&
- (be16_to_cpu(node2->hdr.info.magic) == XFS_DA_NODE_MAGIC));
+ ASSERT(node1->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) &&
+ node2->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
if ((be16_to_cpu(node1->hdr.count) > 0) && (be16_to_cpu(node2->hdr.count) > 0) &&
((be32_to_cpu(node2->btree[0].hashval) <
be32_to_cpu(node1->btree[0].hashval)) ||
@@ -1299,7 +1296,7 @@ xfs_da_node_lasthash(xfs_dabuf_t *bp, int *count)
xfs_da_intnode_t *node;
node = bp->data;
- ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
if (count)
*count = be16_to_cpu(node->hdr.count);
if (!node->hdr.count)
@@ -1412,7 +1409,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
ASSERT(blk->bp != NULL);
node = blk->bp->data;
- ASSERT(be16_to_cpu(node->hdr.info.magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(node->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
if (forward && (blk->index < be16_to_cpu(node->hdr.count)-1)) {
blk->index++;
blkno = be32_to_cpu(node->btree[blk->index].before);
@@ -1451,9 +1448,9 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
return(error);
ASSERT(blk->bp != NULL);
info = blk->bp->data;
- ASSERT(be16_to_cpu(info->magic) == XFS_DA_NODE_MAGIC ||
- be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC ||
- be16_to_cpu(info->magic) == XFS_ATTR_LEAF_MAGIC);
+ ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
+ info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
+ info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
blk->magic = be16_to_cpu(info->magic);
if (blk->magic == XFS_DA_NODE_MAGIC) {
node = (xfs_da_intnode_t *)info;
@@ -1546,79 +1543,62 @@ const struct xfs_nameops xfs_default_nameops = {
.compname = xfs_da_compname
};
-/*
- * Add a block to the btree ahead of the file.
- * Return the new block number to the caller.
- */
int
-xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
+xfs_da_grow_inode_int(
+ struct xfs_da_args *args,
+ xfs_fileoff_t *bno,
+ int count)
{
- xfs_fileoff_t bno, b;
- xfs_bmbt_irec_t map;
- xfs_bmbt_irec_t *mapp;
- xfs_inode_t *dp;
- int nmap, error, w, count, c, got, i, mapi;
- xfs_trans_t *tp;
- xfs_mount_t *mp;
- xfs_drfsbno_t nblks;
+ struct xfs_trans *tp = args->trans;
+ struct xfs_inode *dp = args->dp;
+ int w = args->whichfork;
+ xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
+ struct xfs_bmbt_irec map, *mapp;
+ int nmap, error, got, i, mapi;
- dp = args->dp;
- mp = dp->i_mount;
- w = args->whichfork;
- tp = args->trans;
- nblks = dp->i_d.di_nblocks;
-
- /*
- * For new directories adjust the file offset and block count.
- */
- if (w == XFS_DATA_FORK) {
- bno = mp->m_dirleafblk;
- count = mp->m_dirblkfsbs;
- } else {
- bno = 0;
- count = 1;
- }
/*
* Find a spot in the file space to put the new block.
*/
- if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, w)))
+ error = xfs_bmap_first_unused(tp, dp, count, bno, w);
+ if (error)
return error;
- if (w == XFS_DATA_FORK)
- ASSERT(bno >= mp->m_dirleafblk && bno < mp->m_dirfreeblk);
+
/*
* Try mapping it in one filesystem block.
*/
nmap = 1;
ASSERT(args->firstblock != NULL);
- if ((error = xfs_bmapi(tp, dp, bno, count,
+ error = xfs_bmapi(tp, dp, *bno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
- args->flist))) {
+ args->flist);
+ if (error)
return error;
- }
+
ASSERT(nmap <= 1);
if (nmap == 1) {
mapp = &map;
mapi = 1;
- }
- /*
- * If we didn't get it and the block might work if fragmented,
- * try without the CONTIG flag. Loop until we get it all.
- */
- else if (nmap == 0 && count > 1) {
+ } else if (nmap == 0 && count > 1) {
+ xfs_fileoff_t b;
+ int c;
+
+ /*
+ * If we didn't get it and the block might work if fragmented,
+ * try without the CONTIG flag. Loop until we get it all.
+ */
mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
- for (b = bno, mapi = 0; b < bno + count; ) {
+ for (b = *bno, mapi = 0; b < *bno + count; ) {
nmap = MIN(XFS_BMAP_MAX_NMAP, count);
- c = (int)(bno + count - b);
- if ((error = xfs_bmapi(tp, dp, b, c,
+ c = (int)(*bno + count - b);
+ error = xfs_bmapi(tp, dp, b, c,
xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
XFS_BMAPI_METADATA,
args->firstblock, args->total,
- &mapp[mapi], &nmap, args->flist))) {
- kmem_free(mapp);
- return error;
- }
+ &mapp[mapi], &nmap, args->flist);
+ if (error)
+ goto out_free_map;
if (nmap < 1)
break;
mapi += nmap;
@@ -1629,24 +1609,53 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
mapi = 0;
mapp = NULL;
}
+
/*
* Count the blocks we got, make sure it matches the total.
*/
for (i = 0, got = 0; i < mapi; i++)
got += mapp[i].br_blockcount;
- if (got != count || mapp[0].br_startoff != bno ||
+ if (got != count || mapp[0].br_startoff != *bno ||
mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
- bno + count) {
- if (mapp != &map)
- kmem_free(mapp);
- return XFS_ERROR(ENOSPC);
+ *bno + count) {
+ error = XFS_ERROR(ENOSPC);
+ goto out_free_map;
}
- if (mapp != &map)
- kmem_free(mapp);
+
/* account for newly allocated blocks in reserved blocks total */
args->total -= dp->i_d.di_nblocks - nblks;
- *new_blkno = (xfs_dablk_t)bno;
- return 0;
+
+out_free_map:
+ if (mapp != &map)
+ kmem_free(mapp);
+ return error;
+}
+
+/*
+ * Add a block to the btree ahead of the file.
+ * Return the new block number to the caller.
+ */
+int
+xfs_da_grow_inode(
+ struct xfs_da_args *args,
+ xfs_dablk_t *new_blkno)
+{
+ xfs_fileoff_t bno;
+ int count;
+ int error;
+
+ if (args->whichfork == XFS_DATA_FORK) {
+ bno = args->dp->i_mount->m_dirleafblk;
+ count = args->dp->i_mount->m_dirblkfsbs;
+ } else {
+ bno = 0;
+ count = 1;
+ }
+
+ error = xfs_da_grow_inode_int(args, &bno, count);
+ if (!error)
+ *new_blkno = (xfs_dablk_t)bno;
+ return error;
}
/*
@@ -1704,12 +1713,12 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
/*
* Get values from the moved block.
*/
- if (be16_to_cpu(dead_info->magic) == XFS_DIR2_LEAFN_MAGIC) {
+ if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC)) {
dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
dead_level = 0;
dead_hash = be32_to_cpu(dead_leaf2->ents[be16_to_cpu(dead_leaf2->hdr.count) - 1].hashval);
} else {
- ASSERT(be16_to_cpu(dead_info->magic) == XFS_DA_NODE_MAGIC);
+ ASSERT(dead_info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC));
dead_node = (xfs_da_intnode_t *)dead_info;
dead_level = be16_to_cpu(dead_node->hdr.level);
dead_hash = be32_to_cpu(dead_node->btree[be16_to_cpu(dead_node->hdr.count) - 1].hashval);
@@ -1768,8 +1777,8 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
if ((error = xfs_da_read_buf(tp, ip, par_blkno, -1, &par_buf, w)))
goto done;
par_node = par_buf->data;
- if (unlikely(
- be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC ||
+ if (unlikely(par_node->hdr.info.magic !=
+ cpu_to_be16(XFS_DA_NODE_MAGIC) ||
(level >= 0 && level != be16_to_cpu(par_node->hdr.level) + 1))) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
XFS_ERRLEVEL_LOW, mp);
@@ -1820,7 +1829,7 @@ xfs_da_swap_lastblock(xfs_da_args_t *args, xfs_dablk_t *dead_blknop,
par_node = par_buf->data;
if (unlikely(
be16_to_cpu(par_node->hdr.level) != level ||
- be16_to_cpu(par_node->hdr.info.magic) != XFS_DA_NODE_MAGIC)) {
+ par_node->hdr.info.magic != cpu_to_be16(XFS_DA_NODE_MAGIC))) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
@@ -1930,8 +1939,7 @@ xfs_da_do_buf(
xfs_daddr_t *mappedbnop,
xfs_dabuf_t **bpp,
int whichfork,
- int caller,
- inst_t *ra)
+ int caller)
{
xfs_buf_t *bp = NULL;
xfs_buf_t **bplist;
@@ -2070,25 +2078,22 @@ xfs_da_do_buf(
* Build a dabuf structure.
*/
if (bplist) {
- rbp = xfs_da_buf_make(nbplist, bplist, ra);
+ rbp = xfs_da_buf_make(nbplist, bplist);
} else if (bp)
- rbp = xfs_da_buf_make(1, &bp, ra);
+ rbp = xfs_da_buf_make(1, &bp);
else
rbp = NULL;
/*
* For read_buf, check the magic number.
*/
if (caller == 1) {
- xfs_dir2_data_t *data;
- xfs_dir2_free_t *free;
- xfs_da_blkinfo_t *info;
+ xfs_dir2_data_hdr_t *hdr = rbp->data;
+ xfs_dir2_free_t *free = rbp->data;
+ xfs_da_blkinfo_t *info = rbp->data;
uint magic, magic1;
- info = rbp->data;
- data = rbp->data;
- free = rbp->data;
magic = be16_to_cpu(info->magic);
- magic1 = be32_to_cpu(data->hdr.magic);
+ magic1 = be32_to_cpu(hdr->magic);
if (unlikely(
XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
(magic != XFS_ATTR_LEAF_MAGIC) &&
@@ -2096,7 +2101,7 @@ xfs_da_do_buf(
(magic != XFS_DIR2_LEAFN_MAGIC) &&
(magic1 != XFS_DIR2_BLOCK_MAGIC) &&
(magic1 != XFS_DIR2_DATA_MAGIC) &&
- (be32_to_cpu(free->hdr.magic) != XFS_DIR2_FREE_MAGIC),
+ (free->hdr.magic != cpu_to_be32(XFS_DIR2_FREE_MAGIC)),
mp, XFS_ERRTAG_DA_READ_BUF,
XFS_RANDOM_DA_READ_BUF))) {
trace_xfs_da_btree_corrupt(rbp->bps[0], _RET_IP_);
@@ -2143,8 +2148,7 @@ xfs_da_get_buf(
xfs_dabuf_t **bpp,
int whichfork)
{
- return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0,
- (inst_t *)__return_address);
+ return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 0);
}
/*
@@ -2159,8 +2163,7 @@ xfs_da_read_buf(
xfs_dabuf_t **bpp,
int whichfork)
{
- return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1,
- (inst_t *)__return_address);
+ return xfs_da_do_buf(trans, dp, bno, &mappedbno, bpp, whichfork, 1);
}
/*
@@ -2176,8 +2179,7 @@ xfs_da_reada_buf(
xfs_daddr_t rval;
rval = -1;
- if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3,
- (inst_t *)__return_address))
+ if (xfs_da_do_buf(trans, dp, bno, &rval, NULL, whichfork, 3))
return -1;
else
return rval;
@@ -2235,17 +2237,12 @@ xfs_da_state_free(xfs_da_state_t *state)
kmem_zone_free(xfs_da_state_zone, state);
}
-#ifdef XFS_DABUF_DEBUG
-xfs_dabuf_t *xfs_dabuf_global_list;
-static DEFINE_SPINLOCK(xfs_dabuf_global_lock);
-#endif
-
/*
* Create a dabuf.
*/
/* ARGSUSED */
STATIC xfs_dabuf_t *
-xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
+xfs_da_buf_make(int nbuf, xfs_buf_t **bps)
{
xfs_buf_t *bp;
xfs_dabuf_t *dabuf;
@@ -2257,11 +2254,6 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
else
dabuf = kmem_alloc(XFS_DA_BUF_SIZE(nbuf), KM_NOFS);
dabuf->dirty = 0;
-#ifdef XFS_DABUF_DEBUG
- dabuf->ra = ra;
- dabuf->target = XFS_BUF_TARGET(bps[0]);
- dabuf->blkno = XFS_BUF_ADDR(bps[0]);
-#endif
if (nbuf == 1) {
dabuf->nbuf = 1;
bp = bps[0];
@@ -2281,23 +2273,6 @@ xfs_da_buf_make(int nbuf, xfs_buf_t **bps, inst_t *ra)
XFS_BUF_COUNT(bp));
}
}
-#ifdef XFS_DABUF_DEBUG
- {
- xfs_dabuf_t *p;
-
- spin_lock(&xfs_dabuf_global_lock);
- for (p = xfs_dabuf_global_list; p; p = p->next) {
- ASSERT(p->blkno != dabuf->blkno ||
- p->target != dabuf->target);
- }
- dabuf->prev = NULL;
- if (xfs_dabuf_global_list)
- xfs_dabuf_global_list->prev = dabuf;
- dabuf->next = xfs_dabuf_global_list;
- xfs_dabuf_global_list = dabuf;
- spin_unlock(&xfs_dabuf_global_lock);
- }
-#endif
return dabuf;
}
@@ -2333,25 +2308,12 @@ xfs_da_buf_done(xfs_dabuf_t *dabuf)
ASSERT(dabuf->nbuf && dabuf->data && dabuf->bbcount && dabuf->bps[0]);
if (dabuf->dirty)
xfs_da_buf_clean(dabuf);
- if (dabuf->nbuf > 1)
+ if (dabuf->nbuf > 1) {
kmem_free(dabuf->data);
-#ifdef XFS_DABUF_DEBUG
- {
- spin_lock(&xfs_dabuf_global_lock);
- if (dabuf->prev)
- dabuf->prev->next = dabuf->next;
- else
- xfs_dabuf_global_list = dabuf->next;
- if (dabuf->next)
- dabuf->next->prev = dabuf->prev;
- spin_unlock(&xfs_dabuf_global_lock);
- }
- memset(dabuf, 0, XFS_DA_BUF_SIZE(dabuf->nbuf));
-#endif
- if (dabuf->nbuf == 1)
- kmem_zone_free(xfs_dabuf_zone, dabuf);
- else
kmem_free(dabuf);
+ } else {
+ kmem_zone_free(xfs_dabuf_zone, dabuf);
+ }
}
/*
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index fe9f5a8c1d2..dbf7c074ae7 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -145,22 +145,11 @@ typedef struct xfs_dabuf {
short dirty; /* data needs to be copied back */
short bbcount; /* how large is data in bbs */
void *data; /* pointer for buffers' data */
-#ifdef XFS_DABUF_DEBUG
- inst_t *ra; /* return address of caller to make */
- struct xfs_dabuf *next; /* next in global chain */
- struct xfs_dabuf *prev; /* previous in global chain */
- struct xfs_buftarg *target; /* device for buffer */
- xfs_daddr_t blkno; /* daddr first in bps[0] */
-#endif
struct xfs_buf *bps[1]; /* actually nbuf of these */
} xfs_dabuf_t;
#define XFS_DA_BUF_SIZE(n) \
(sizeof(xfs_dabuf_t) + sizeof(struct xfs_buf *) * ((n) - 1))
-#ifdef XFS_DABUF_DEBUG
-extern xfs_dabuf_t *xfs_dabuf_global_list;
-#endif
-
/*
* Storage for holding state during Btree searches and split/join ops.
*
@@ -248,6 +237,8 @@ int xfs_da_blk_link(xfs_da_state_t *state, xfs_da_state_blk_t *old_blk,
* Utility routines.
*/
int xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno);
+int xfs_da_grow_inode_int(struct xfs_da_args *args, xfs_fileoff_t *bno,
+ int count);
int xfs_da_get_buf(struct xfs_trans *trans, struct xfs_inode *dp,
xfs_dablk_t bno, xfs_daddr_t mappedbno,
xfs_dabuf_t **bp, int whichfork);
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index dba7a71cedf..4580ce00aeb 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -24,20 +24,17 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
-#include "xfs_dir2_data.h"
-#include "xfs_dir2_leaf.h"
-#include "xfs_dir2_block.h"
-#include "xfs_dir2_node.h"
+#include "xfs_dir2.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
@@ -122,15 +119,15 @@ int
xfs_dir_isempty(
xfs_inode_t *dp)
{
- xfs_dir2_sf_t *sfp;
+ xfs_dir2_sf_hdr_t *sfp;
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
if (dp->i_d.di_size == 0) /* might happen during shutdown. */
return 1;
if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp))
return 0;
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
- return !sfp->hdr.count;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
+ return !sfp->count;
}
/*
@@ -500,129 +497,34 @@ xfs_dir_canenter(
/*
* Add a block to the directory.
- * This routine is for data and free blocks, not leaf/node blocks
- * which are handled by xfs_da_grow_inode.
+ *
+ * This routine is for data and free blocks, not leaf/node blocks which are
+ * handled by xfs_da_grow_inode.
*/
int
xfs_dir2_grow_inode(
- xfs_da_args_t *args,
- int space, /* v2 dir's space XFS_DIR2_xxx_SPACE */
- xfs_dir2_db_t *dbp) /* out: block number added */
+ struct xfs_da_args *args,
+ int space, /* v2 dir's space XFS_DIR2_xxx_SPACE */
+ xfs_dir2_db_t *dbp) /* out: block number added */
{
- xfs_fileoff_t bno; /* directory offset of new block */
- int count; /* count of filesystem blocks */
- xfs_inode_t *dp; /* incore directory inode */
- int error;
- int got; /* blocks actually mapped */
- int i;
- xfs_bmbt_irec_t map; /* single structure for bmap */
- int mapi; /* mapping index */
- xfs_bmbt_irec_t *mapp; /* bmap mapping structure(s) */
- xfs_mount_t *mp;
- int nmap; /* number of bmap entries */
- xfs_trans_t *tp;
- xfs_drfsbno_t nblks;
+ struct xfs_inode *dp = args->dp;
+ struct xfs_mount *mp = dp->i_mount;
+ xfs_fileoff_t bno; /* directory offset of new block */
+ int count; /* count of filesystem blocks */
+ int error;
trace_xfs_dir2_grow_inode(args, space);
- dp = args->dp;
- tp = args->trans;
- mp = dp->i_mount;
- nblks = dp->i_d.di_nblocks;
/*
* Set lowest possible block in the space requested.
*/
bno = XFS_B_TO_FSBT(mp, space * XFS_DIR2_SPACE_SIZE);
count = mp->m_dirblkfsbs;
- /*
- * Find the first hole for our block.
- */
- if ((error = xfs_bmap_first_unused(tp, dp, count, &bno, XFS_DATA_FORK)))
- return error;
- nmap = 1;
- ASSERT(args->firstblock != NULL);
- /*
- * Try mapping the new block contiguously (one extent).
- */
- if ((error = xfs_bmapi(tp, dp, bno, count,
- XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
- args->firstblock, args->total, &map, &nmap,
- args->flist)))
- return error;
- ASSERT(nmap <= 1);
- if (nmap == 1) {
- mapp = &map;
- mapi = 1;
- }
- /*
- * Didn't work and this is a multiple-fsb directory block.
- * Try again with contiguous flag turned on.
- */
- else if (nmap == 0 && count > 1) {
- xfs_fileoff_t b; /* current file offset */
- /*
- * Space for maximum number of mappings.
- */
- mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
- /*
- * Iterate until we get to the end of our block.
- */
- for (b = bno, mapi = 0; b < bno + count; ) {
- int c; /* current fsb count */
-
- /*
- * Can't map more than MAX_NMAP at once.
- */
- nmap = MIN(XFS_BMAP_MAX_NMAP, count);
- c = (int)(bno + count - b);
- if ((error = xfs_bmapi(tp, dp, b, c,
- XFS_BMAPI_WRITE|XFS_BMAPI_METADATA,
- args->firstblock, args->total,
- &mapp[mapi], &nmap, args->flist))) {
- kmem_free(mapp);
- return error;
- }
- if (nmap < 1)
- break;
- /*
- * Add this bunch into our table, go to the next offset.
- */
- mapi += nmap;
- b = mapp[mapi - 1].br_startoff +
- mapp[mapi - 1].br_blockcount;
- }
- }
- /*
- * Didn't work.
- */
- else {
- mapi = 0;
- mapp = NULL;
- }
- /*
- * See how many fsb's we got.
- */
- for (i = 0, got = 0; i < mapi; i++)
- got += mapp[i].br_blockcount;
- /*
- * Didn't get enough fsb's, or the first/last block's are wrong.
- */
- if (got != count || mapp[0].br_startoff != bno ||
- mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
- bno + count) {
- if (mapp != &map)
- kmem_free(mapp);
- return XFS_ERROR(ENOSPC);
- }
- /*
- * Done with the temporary mapping table.
- */
- if (mapp != &map)
- kmem_free(mapp);
+ error = xfs_da_grow_inode_int(args, &bno, count);
+ if (error)
+ return error;
- /* account for newly allocated blocks in reserved blocks total */
- args->total -= dp->i_d.di_nblocks - nblks;
*dbp = xfs_dir2_da_to_db(mp, (xfs_dablk_t)bno);
/*
@@ -634,7 +536,7 @@ xfs_dir2_grow_inode(
size = XFS_FSB_TO_B(mp, bno + count);
if (size > dp->i_d.di_size) {
dp->i_d.di_size = size;
- xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
+ xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE);
}
}
return 0;
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index 74a3b105768..e937d9991c1 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -16,49 +16,14 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __XFS_DIR2_H__
-#define __XFS_DIR2_H__
+#define __XFS_DIR2_H__
-struct uio;
-struct xfs_dabuf;
-struct xfs_da_args;
-struct xfs_dir2_put_args;
struct xfs_bmap_free;
+struct xfs_da_args;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
-/*
- * Directory version 2.
- * There are 4 possible formats:
- * shortform
- * single block - data with embedded leaf at the end
- * multiple data blocks, single leaf+freeindex block
- * data blocks, node&leaf blocks (btree), freeindex blocks
- *
- * The shortform format is in xfs_dir2_sf.h.
- * The single block format is in xfs_dir2_block.h.
- * The data block format is in xfs_dir2_data.h.
- * The leaf and freeindex block formats are in xfs_dir2_leaf.h.
- * Node blocks are the same as the other version, in xfs_da_btree.h.
- */
-
-/*
- * Byte offset in data block and shortform entry.
- */
-typedef __uint16_t xfs_dir2_data_off_t;
-#define NULLDATAOFF 0xffffU
-typedef uint xfs_dir2_data_aoff_t; /* argument form */
-
-/*
- * Directory block number (logical dirblk in file)
- */
-typedef __uint32_t xfs_dir2_db_t;
-
-/*
- * Byte offset in a directory.
- */
-typedef xfs_off_t xfs_dir2_off_t;
-
extern struct xfs_name xfs_name_dotdot;
/*
@@ -86,21 +51,10 @@ extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_bmap_free *flist, xfs_extlen_t tot);
extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, uint resblks);
-extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
/*
- * Utility routines for v2 directories.
+ * Direct call from the bmap code, bypassing the generic directory layer.
*/
-extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
- xfs_dir2_db_t *dbp);
-extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp,
- int *vp);
-extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp,
- int *vp);
-extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
- struct xfs_dabuf *bp);
-
-extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
- const unsigned char *name, int len);
+extern int xfs_dir2_sf_to_block(struct xfs_da_args *args);
#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index 580d99cef9e..9245e029b8e 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -23,17 +23,14 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
-#include "xfs_dir2_data.h"
-#include "xfs_dir2_leaf.h"
-#include "xfs_dir2_block.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@@ -67,7 +64,7 @@ xfs_dir2_block_addname(
xfs_da_args_t *args) /* directory op arguments */
{
xfs_dir2_data_free_t *bf; /* bestfree table in block */
- xfs_dir2_block_t *block; /* directory block structure */
+ xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dabuf_t *bp; /* buffer for block */
xfs_dir2_block_tail_t *btp; /* block tail */
@@ -105,13 +102,13 @@ xfs_dir2_block_addname(
return error;
}
ASSERT(bp != NULL);
- block = bp->data;
+ hdr = bp->data;
/*
* Check the magic number, corrupted if wrong.
*/
- if (unlikely(be32_to_cpu(block->hdr.magic) != XFS_DIR2_BLOCK_MAGIC)) {
+ if (unlikely(hdr->magic != cpu_to_be32(XFS_DIR2_BLOCK_MAGIC))) {
XFS_CORRUPTION_ERROR("xfs_dir2_block_addname",
- XFS_ERRLEVEL_LOW, mp, block);
+ XFS_ERRLEVEL_LOW, mp, hdr);
xfs_da_brelse(tp, bp);
return XFS_ERROR(EFSCORRUPTED);
}
@@ -119,8 +116,8 @@ xfs_dir2_block_addname(
/*
* Set up pointers to parts of the block.
*/
- bf = block->hdr.bestfree;
- btp = xfs_dir2_block_tail_p(mp, block);
+ bf = hdr->bestfree;
+ btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
/*
* No stale entries? Need space for entry and new leaf.
@@ -133,7 +130,7 @@ xfs_dir2_block_addname(
/*
* Data object just before the first leaf entry.
*/
- enddup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
+ enddup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp));
/*
* If it's not free then can't do this add without cleaning up:
* the space before the first leaf entry needs to be free so it
@@ -146,7 +143,7 @@ xfs_dir2_block_addname(
*/
else {
dup = (xfs_dir2_data_unused_t *)
- ((char *)block + be16_to_cpu(bf[0].offset));
+ ((char *)hdr + be16_to_cpu(bf[0].offset));
if (dup == enddup) {
/*
* It is the biggest freespace, is it too small
@@ -159,7 +156,7 @@ xfs_dir2_block_addname(
*/
if (be16_to_cpu(bf[1].length) >= len)
dup = (xfs_dir2_data_unused_t *)
- ((char *)block +
+ ((char *)hdr +
be16_to_cpu(bf[1].offset));
else
dup = NULL;
@@ -182,7 +179,7 @@ xfs_dir2_block_addname(
*/
else if (be16_to_cpu(bf[0].length) >= len) {
dup = (xfs_dir2_data_unused_t *)
- ((char *)block + be16_to_cpu(bf[0].offset));
+ ((char *)hdr + be16_to_cpu(bf[0].offset));
compact = 0;
}
/*
@@ -196,7 +193,7 @@ xfs_dir2_block_addname(
/*
* Data object just before the first leaf entry.
*/
- dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
+ dup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp));
/*
* If it's not free then the data will go where the
* leaf data starts now, if it works at all.
@@ -255,7 +252,8 @@ xfs_dir2_block_addname(
highstale = lfloghigh = -1;
fromidx >= 0;
fromidx--) {
- if (be32_to_cpu(blp[fromidx].address) == XFS_DIR2_NULL_DATAPTR) {
+ if (blp[fromidx].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) {
if (highstale == -1)
highstale = toidx;
else {
@@ -272,7 +270,7 @@ xfs_dir2_block_addname(
lfloghigh -= be32_to_cpu(btp->stale) - 1;
be32_add_cpu(&btp->count, -(be32_to_cpu(btp->stale) - 1));
xfs_dir2_data_make_free(tp, bp,
- (xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
+ (xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr),
(xfs_dir2_data_aoff_t)((be32_to_cpu(btp->stale) - 1) * sizeof(*blp)),
&needlog, &needscan);
blp += be32_to_cpu(btp->stale) - 1;
@@ -282,7 +280,7 @@ xfs_dir2_block_addname(
* This needs to happen before the next call to use_free.
*/
if (needscan) {
- xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
+ xfs_dir2_data_freescan(mp, hdr, &needlog);
needscan = 0;
}
}
@@ -318,7 +316,7 @@ xfs_dir2_block_addname(
*/
xfs_dir2_data_use_free(tp, bp, enddup,
(xfs_dir2_data_aoff_t)
- ((char *)enddup - (char *)block + be16_to_cpu(enddup->length) -
+ ((char *)enddup - (char *)hdr + be16_to_cpu(enddup->length) -
sizeof(*blp)),
(xfs_dir2_data_aoff_t)sizeof(*blp),
&needlog, &needscan);
@@ -331,8 +329,7 @@ xfs_dir2_block_addname(
* This needs to happen before the next call to use_free.
*/
if (needscan) {
- xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block,
- &needlog);
+ xfs_dir2_data_freescan(mp, hdr, &needlog);
needscan = 0;
}
/*
@@ -353,12 +350,14 @@ xfs_dir2_block_addname(
else {
for (lowstale = mid;
lowstale >= 0 &&
- be32_to_cpu(blp[lowstale].address) != XFS_DIR2_NULL_DATAPTR;
+ blp[lowstale].address !=
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR);
lowstale--)
continue;
for (highstale = mid + 1;
highstale < be32_to_cpu(btp->count) &&
- be32_to_cpu(blp[highstale].address) != XFS_DIR2_NULL_DATAPTR &&
+ blp[highstale].address !=
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR) &&
(lowstale < 0 || mid - lowstale > highstale - mid);
highstale++)
continue;
@@ -397,13 +396,13 @@ xfs_dir2_block_addname(
*/
blp[mid].hashval = cpu_to_be32(args->hashval);
blp[mid].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
- (char *)dep - (char *)block));
+ (char *)dep - (char *)hdr));
xfs_dir2_block_log_leaf(tp, bp, lfloglow, lfloghigh);
/*
* Mark space for the data entry used.
*/
xfs_dir2_data_use_free(tp, bp, dup,
- (xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
+ (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr),
(xfs_dir2_data_aoff_t)len, &needlog, &needscan);
/*
* Create the new data entry.
@@ -412,12 +411,12 @@ xfs_dir2_block_addname(
dep->namelen = args->namelen;
memcpy(dep->name, args->name, args->namelen);
tagp = xfs_dir2_data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)block);
+ *tagp = cpu_to_be16((char *)dep - (char *)hdr);
/*
* Clean up the bestfree array and log the header, tail, and entry.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
+ xfs_dir2_data_freescan(mp, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(tp, bp);
xfs_dir2_block_log_tail(tp, bp);
@@ -437,7 +436,7 @@ xfs_dir2_block_getdents(
xfs_off_t *offset,
filldir_t filldir)
{
- xfs_dir2_block_t *block; /* directory block structure */
+ xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dabuf_t *bp; /* buffer for block */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* block data entry */
@@ -470,13 +469,13 @@ xfs_dir2_block_getdents(
* We'll skip entries before this.
*/
wantoff = xfs_dir2_dataptr_to_off(mp, *offset);
- block = bp->data;
+ hdr = bp->data;
xfs_dir2_data_check(dp, bp);
/*
* Set up values for the loop.
*/
- btp = xfs_dir2_block_tail_p(mp, block);
- ptr = (char *)block->u;
+ btp = xfs_dir2_block_tail_p(mp, hdr);
+ ptr = (char *)(hdr + 1);
endptr = (char *)xfs_dir2_block_leaf_p(btp);
/*
@@ -502,11 +501,11 @@ xfs_dir2_block_getdents(
/*
* The entry is before the desired starting point, skip it.
*/
- if ((char *)dep - (char *)block < wantoff)
+ if ((char *)dep - (char *)hdr < wantoff)
continue;
cook = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
- (char *)dep - (char *)block);
+ (char *)dep - (char *)hdr);
/*
* If it didn't fit, set the final offset to here & return.
@@ -540,17 +539,14 @@ xfs_dir2_block_log_leaf(
int first, /* index of first logged leaf */
int last) /* index of last logged leaf */
{
- xfs_dir2_block_t *block; /* directory block structure */
- xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
- xfs_dir2_block_tail_t *btp; /* block tail */
- xfs_mount_t *mp; /* filesystem mount point */
+ xfs_dir2_data_hdr_t *hdr = bp->data;
+ xfs_dir2_leaf_entry_t *blp;
+ xfs_dir2_block_tail_t *btp;
- mp = tp->t_mountp;
- block = bp->data;
- btp = xfs_dir2_block_tail_p(mp, block);
+ btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
- xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)block),
- (uint)((char *)&blp[last + 1] - (char *)block - 1));
+ xfs_da_log_buf(tp, bp, (uint)((char *)&blp[first] - (char *)hdr),
+ (uint)((char *)&blp[last + 1] - (char *)hdr - 1));
}
/*
@@ -561,15 +557,12 @@ xfs_dir2_block_log_tail(
xfs_trans_t *tp, /* transaction structure */
xfs_dabuf_t *bp) /* block buffer */
{
- xfs_dir2_block_t *block; /* directory block structure */
- xfs_dir2_block_tail_t *btp; /* block tail */
- xfs_mount_t *mp; /* filesystem mount point */
+ xfs_dir2_data_hdr_t *hdr = bp->data;
+ xfs_dir2_block_tail_t *btp;
- mp = tp->t_mountp;
- block = bp->data;
- btp = xfs_dir2_block_tail_p(mp, block);
- xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)block),
- (uint)((char *)(btp + 1) - (char *)block - 1));
+ btp = xfs_dir2_block_tail_p(tp->t_mountp, hdr);
+ xfs_da_log_buf(tp, bp, (uint)((char *)btp - (char *)hdr),
+ (uint)((char *)(btp + 1) - (char *)hdr - 1));
}
/*
@@ -580,7 +573,7 @@ int /* error */
xfs_dir2_block_lookup(
xfs_da_args_t *args) /* dir lookup arguments */
{
- xfs_dir2_block_t *block; /* block structure */
+ xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
@@ -600,14 +593,14 @@ xfs_dir2_block_lookup(
return error;
dp = args->dp;
mp = dp->i_mount;
- block = bp->data;
+ hdr = bp->data;
xfs_dir2_data_check(dp, bp);
- btp = xfs_dir2_block_tail_p(mp, block);
+ btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
/*
* Get the offset from the leaf entry, to point to the data.
*/
- dep = (xfs_dir2_data_entry_t *)((char *)block +
+ dep = (xfs_dir2_data_entry_t *)((char *)hdr +
xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
/*
* Fill in inode number, CI name if appropriate, release the block.
@@ -628,7 +621,7 @@ xfs_dir2_block_lookup_int(
int *entno) /* returned entry number */
{
xfs_dir2_dataptr_t addr; /* data entry address */
- xfs_dir2_block_t *block; /* block structure */
+ xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
@@ -654,9 +647,9 @@ xfs_dir2_block_lookup_int(
return error;
}
ASSERT(bp != NULL);
- block = bp->data;
+ hdr = bp->data;
xfs_dir2_data_check(dp, bp);
- btp = xfs_dir2_block_tail_p(mp, block);
+ btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
/*
* Loop doing a binary search for our hash value.
@@ -694,7 +687,7 @@ xfs_dir2_block_lookup_int(
* Get pointer to the entry from the leaf.
*/
dep = (xfs_dir2_data_entry_t *)
- ((char *)block + xfs_dir2_dataptr_to_off(mp, addr));
+ ((char *)hdr + xfs_dir2_dataptr_to_off(mp, addr));
/*
* Compare name and if it's an exact match, return the index
* and buffer. If it's the first case-insensitive match, store
@@ -733,7 +726,7 @@ int /* error */
xfs_dir2_block_removename(
xfs_da_args_t *args) /* directory operation args */
{
- xfs_dir2_block_t *block; /* block structure */
+ xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf pointer */
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
@@ -760,20 +753,20 @@ xfs_dir2_block_removename(
dp = args->dp;
tp = args->trans;
mp = dp->i_mount;
- block = bp->data;
- btp = xfs_dir2_block_tail_p(mp, block);
+ hdr = bp->data;
+ btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
/*
* Point to the data entry using the leaf entry.
*/
dep = (xfs_dir2_data_entry_t *)
- ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
+ ((char *)hdr + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
/*
* Mark the data entry's space free.
*/
needlog = needscan = 0;
xfs_dir2_data_make_free(tp, bp,
- (xfs_dir2_data_aoff_t)((char *)dep - (char *)block),
+ (xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
/*
* Fix up the block tail.
@@ -789,15 +782,15 @@ xfs_dir2_block_removename(
* Fix up bestfree, log the header if necessary.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
+ xfs_dir2_data_freescan(mp, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(tp, bp);
xfs_dir2_data_check(dp, bp);
/*
* See if the size as a shortform is good enough.
*/
- if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
- XFS_IFORK_DSIZE(dp)) {
+ size = xfs_dir2_block_sfsize(dp, hdr, &sfh);
+ if (size > XFS_IFORK_DSIZE(dp)) {
xfs_da_buf_done(bp);
return 0;
}
@@ -815,7 +808,7 @@ int /* error */
xfs_dir2_block_replace(
xfs_da_args_t *args) /* directory operation args */
{
- xfs_dir2_block_t *block; /* block structure */
+ xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail */
@@ -836,14 +829,14 @@ xfs_dir2_block_replace(
}
dp = args->dp;
mp = dp->i_mount;
- block = bp->data;
- btp = xfs_dir2_block_tail_p(mp, block);
+ hdr = bp->data;
+ btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
/*
* Point to the data entry we need to change.
*/
dep = (xfs_dir2_data_entry_t *)
- ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
+ ((char *)hdr + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
ASSERT(be64_to_cpu(dep->inumber) != args->inumber);
/*
* Change the inode number to the new value.
@@ -882,7 +875,7 @@ xfs_dir2_leaf_to_block(
xfs_dabuf_t *dbp) /* data buffer */
{
__be16 *bestsp; /* leaf bests table */
- xfs_dir2_block_t *block; /* block structure */
+ xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* unused data entry */
@@ -906,7 +899,7 @@ xfs_dir2_leaf_to_block(
tp = args->trans;
mp = dp->i_mount;
leaf = lbp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
/*
* If there are data blocks other than the first one, take this
@@ -917,7 +910,7 @@ xfs_dir2_leaf_to_block(
while (dp->i_d.di_size > mp->m_dirblksize) {
bestsp = xfs_dir2_leaf_bests_p(ltp);
if (be16_to_cpu(bestsp[be32_to_cpu(ltp->bestcount) - 1]) ==
- mp->m_dirblksize - (uint)sizeof(block->hdr)) {
+ mp->m_dirblksize - (uint)sizeof(*hdr)) {
if ((error =
xfs_dir2_leaf_trim_data(args, lbp,
(xfs_dir2_db_t)(be32_to_cpu(ltp->bestcount) - 1))))
@@ -935,18 +928,18 @@ xfs_dir2_leaf_to_block(
XFS_DATA_FORK))) {
goto out;
}
- block = dbp->data;
- ASSERT(be32_to_cpu(block->hdr.magic) == XFS_DIR2_DATA_MAGIC);
+ hdr = dbp->data;
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
/*
* Size of the "leaf" area in the block.
*/
- size = (uint)sizeof(block->tail) +
+ size = (uint)sizeof(xfs_dir2_block_tail_t) +
(uint)sizeof(*lep) * (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
/*
* Look at the last data entry.
*/
- tagp = (__be16 *)((char *)block + mp->m_dirblksize) - 1;
- dup = (xfs_dir2_data_unused_t *)((char *)block + be16_to_cpu(*tagp));
+ tagp = (__be16 *)((char *)hdr + mp->m_dirblksize) - 1;
+ dup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp));
/*
* If it's not free or is too short we can't do it.
*/
@@ -958,7 +951,7 @@ xfs_dir2_leaf_to_block(
/*
* Start converting it to block form.
*/
- block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
+ hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
needlog = 1;
needscan = 0;
/*
@@ -969,7 +962,7 @@ xfs_dir2_leaf_to_block(
/*
* Initialize the block tail.
*/
- btp = xfs_dir2_block_tail_p(mp, block);
+ btp = xfs_dir2_block_tail_p(mp, hdr);
btp->count = cpu_to_be32(be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale));
btp->stale = 0;
xfs_dir2_block_log_tail(tp, dbp);
@@ -978,7 +971,8 @@ xfs_dir2_leaf_to_block(
*/
lep = xfs_dir2_block_leaf_p(btp);
for (from = to = 0; from < be16_to_cpu(leaf->hdr.count); from++) {
- if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR)
+ if (leaf->ents[from].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
continue;
lep[to++] = leaf->ents[from];
}
@@ -988,7 +982,7 @@ xfs_dir2_leaf_to_block(
* Scan the bestfree if we need it and log the data block header.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
+ xfs_dir2_data_freescan(mp, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(tp, dbp);
/*
@@ -1002,8 +996,8 @@ xfs_dir2_leaf_to_block(
/*
* Now see if the resulting block can be shrunken to shortform.
*/
- if ((size = xfs_dir2_block_sfsize(dp, block, &sfh)) >
- XFS_IFORK_DSIZE(dp)) {
+ size = xfs_dir2_block_sfsize(dp, hdr, &sfh);
+ if (size > XFS_IFORK_DSIZE(dp)) {
error = 0;
goto out;
}
@@ -1024,12 +1018,10 @@ xfs_dir2_sf_to_block(
xfs_da_args_t *args) /* operation arguments */
{
xfs_dir2_db_t blkno; /* dir-relative block # (0) */
- xfs_dir2_block_t *block; /* block structure */
+ xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block leaf entries */
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_block_tail_t *btp; /* block tail pointer */
- char *buf; /* sf buffer */
- int buf_len;
xfs_dir2_data_entry_t *dep; /* data entry pointer */
xfs_inode_t *dp; /* incore directory inode */
int dummy; /* trash */
@@ -1043,7 +1035,8 @@ xfs_dir2_sf_to_block(
int newoffset; /* offset from current entry */
int offset; /* target block offset */
xfs_dir2_sf_entry_t *sfep; /* sf entry pointer */
- xfs_dir2_sf_t *sfp; /* shortform structure */
+ xfs_dir2_sf_hdr_t *oldsfp; /* old shortform header */
+ xfs_dir2_sf_hdr_t *sfp; /* shortform header */
__be16 *tagp; /* end of data entry */
xfs_trans_t *tp; /* transaction pointer */
struct xfs_name name;
@@ -1061,32 +1054,30 @@ xfs_dir2_sf_to_block(
ASSERT(XFS_FORCED_SHUTDOWN(mp));
return XFS_ERROR(EIO);
}
+
+ oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
+
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
- ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
+ ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(oldsfp->i8count));
+
/*
- * Copy the directory into the stack buffer.
+ * Copy the directory into a temporary buffer.
* Then pitch the incore inode data so we can make extents.
*/
+ sfp = kmem_alloc(dp->i_df.if_bytes, KM_SLEEP);
+ memcpy(sfp, oldsfp, dp->i_df.if_bytes);
- buf_len = dp->i_df.if_bytes;
- buf = kmem_alloc(buf_len, KM_SLEEP);
-
- memcpy(buf, sfp, buf_len);
- xfs_idata_realloc(dp, -buf_len, XFS_DATA_FORK);
+ xfs_idata_realloc(dp, -dp->i_df.if_bytes, XFS_DATA_FORK);
dp->i_d.di_size = 0;
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
- /*
- * Reset pointer - old sfp is gone.
- */
- sfp = (xfs_dir2_sf_t *)buf;
+
/*
* Add block 0 to the inode.
*/
error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno);
if (error) {
- kmem_free(buf);
+ kmem_free(sfp);
return error;
}
/*
@@ -1094,21 +1085,21 @@ xfs_dir2_sf_to_block(
*/
error = xfs_dir2_data_init(args, blkno, &bp);
if (error) {
- kmem_free(buf);
+ kmem_free(sfp);
return error;
}
- block = bp->data;
- block->hdr.magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
+ hdr = bp->data;
+ hdr->magic = cpu_to_be32(XFS_DIR2_BLOCK_MAGIC);
/*
* Compute size of block "tail" area.
*/
i = (uint)sizeof(*btp) +
- (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
+ (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t);
/*
* The whole thing is initialized to free by the init routine.
* Say we're using the leaf and tail area.
*/
- dup = (xfs_dir2_data_unused_t *)block->u;
+ dup = (xfs_dir2_data_unused_t *)(hdr + 1);
needlog = needscan = 0;
xfs_dir2_data_use_free(tp, bp, dup, mp->m_dirblksize - i, i, &needlog,
&needscan);
@@ -1116,50 +1107,51 @@ xfs_dir2_sf_to_block(
/*
* Fill in the tail.
*/
- btp = xfs_dir2_block_tail_p(mp, block);
- btp->count = cpu_to_be32(sfp->hdr.count + 2); /* ., .. */
+ btp = xfs_dir2_block_tail_p(mp, hdr);
+ btp->count = cpu_to_be32(sfp->count + 2); /* ., .. */
btp->stale = 0;
blp = xfs_dir2_block_leaf_p(btp);
- endoffset = (uint)((char *)blp - (char *)block);
+ endoffset = (uint)((char *)blp - (char *)hdr);
/*
* Remove the freespace, we'll manage it.
*/
xfs_dir2_data_use_free(tp, bp, dup,
- (xfs_dir2_data_aoff_t)((char *)dup - (char *)block),
+ (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr),
be16_to_cpu(dup->length), &needlog, &needscan);
/*
* Create entry for .
*/
dep = (xfs_dir2_data_entry_t *)
- ((char *)block + XFS_DIR2_DATA_DOT_OFFSET);
+ ((char *)hdr + XFS_DIR2_DATA_DOT_OFFSET);
dep->inumber = cpu_to_be64(dp->i_ino);
dep->namelen = 1;
dep->name[0] = '.';
tagp = xfs_dir2_data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)block);
+ *tagp = cpu_to_be16((char *)dep - (char *)hdr);
xfs_dir2_data_log_entry(tp, bp, dep);
blp[0].hashval = cpu_to_be32(xfs_dir_hash_dot);
blp[0].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
- (char *)dep - (char *)block));
+ (char *)dep - (char *)hdr));
/*
* Create entry for ..
*/
dep = (xfs_dir2_data_entry_t *)
- ((char *)block + XFS_DIR2_DATA_DOTDOT_OFFSET);
- dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
+ ((char *)hdr + XFS_DIR2_DATA_DOTDOT_OFFSET);
+ dep->inumber = cpu_to_be64(xfs_dir2_sf_get_parent_ino(sfp));
dep->namelen = 2;
dep->name[0] = dep->name[1] = '.';
tagp = xfs_dir2_data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)block);
+ *tagp = cpu_to_be16((char *)dep - (char *)hdr);
xfs_dir2_data_log_entry(tp, bp, dep);
blp[1].hashval = cpu_to_be32(xfs_dir_hash_dotdot);
blp[1].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
- (char *)dep - (char *)block));
+ (char *)dep - (char *)hdr));
offset = XFS_DIR2_DATA_FIRST_OFFSET;
/*
* Loop over existing entries, stuff them in.
*/
- if ((i = 0) == sfp->hdr.count)
+ i = 0;
+ if (!sfp->count)
sfep = NULL;
else
sfep = xfs_dir2_sf_firstentry(sfp);
@@ -1179,43 +1171,40 @@ xfs_dir2_sf_to_block(
* There should be a hole here, make one.
*/
if (offset < newoffset) {
- dup = (xfs_dir2_data_unused_t *)
- ((char *)block + offset);
+ dup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
dup->length = cpu_to_be16(newoffset - offset);
*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16(
- ((char *)dup - (char *)block));
+ ((char *)dup - (char *)hdr));
xfs_dir2_data_log_unused(tp, bp, dup);
- (void)xfs_dir2_data_freeinsert((xfs_dir2_data_t *)block,
- dup, &dummy);
+ xfs_dir2_data_freeinsert(hdr, dup, &dummy);
offset += be16_to_cpu(dup->length);
continue;
}
/*
* Copy a real entry.
*/
- dep = (xfs_dir2_data_entry_t *)((char *)block + newoffset);
- dep->inumber = cpu_to_be64(xfs_dir2_sf_get_inumber(sfp,
- xfs_dir2_sf_inumberp(sfep)));
+ dep = (xfs_dir2_data_entry_t *)((char *)hdr + newoffset);
+ dep->inumber = cpu_to_be64(xfs_dir2_sfe_get_ino(sfp, sfep));
dep->namelen = sfep->namelen;
memcpy(dep->name, sfep->name, dep->namelen);
tagp = xfs_dir2_data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)block);
+ *tagp = cpu_to_be16((char *)dep - (char *)hdr);
xfs_dir2_data_log_entry(tp, bp, dep);
name.name = sfep->name;
name.len = sfep->namelen;
blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
hashname(&name));
blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
- (char *)dep - (char *)block));
- offset = (int)((char *)(tagp + 1) - (char *)block);
- if (++i == sfp->hdr.count)
+ (char *)dep - (char *)hdr));
+ offset = (int)((char *)(tagp + 1) - (char *)hdr);
+ if (++i == sfp->count)
sfep = NULL;
else
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
}
/* Done with the temporary buffer */
- kmem_free(buf);
+ kmem_free(sfp);
/*
* Sort the leaf entries by hash value.
*/
diff --git a/fs/xfs/xfs_dir2_block.h b/fs/xfs/xfs_dir2_block.h
deleted file mode 100644
index 10e68967638..00000000000
--- a/fs/xfs/xfs_dir2_block.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DIR2_BLOCK_H__
-#define __XFS_DIR2_BLOCK_H__
-
-/*
- * xfs_dir2_block.h
- * Directory version 2, single block format structures
- */
-
-struct uio;
-struct xfs_dabuf;
-struct xfs_da_args;
-struct xfs_dir2_data_hdr;
-struct xfs_dir2_leaf_entry;
-struct xfs_inode;
-struct xfs_mount;
-struct xfs_trans;
-
-/*
- * The single block format is as follows:
- * xfs_dir2_data_hdr_t structure
- * xfs_dir2_data_entry_t and xfs_dir2_data_unused_t structures
- * xfs_dir2_leaf_entry_t structures
- * xfs_dir2_block_tail_t structure
- */
-
-#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: for one block dirs */
-
-typedef struct xfs_dir2_block_tail {
- __be32 count; /* count of leaf entries */
- __be32 stale; /* count of stale lf entries */
-} xfs_dir2_block_tail_t;
-
-/*
- * Generic single-block structure, for xfs_db.
- */
-typedef struct xfs_dir2_block {
- xfs_dir2_data_hdr_t hdr; /* magic XFS_DIR2_BLOCK_MAGIC */
- xfs_dir2_data_union_t u[1];
- xfs_dir2_leaf_entry_t leaf[1];
- xfs_dir2_block_tail_t tail;
-} xfs_dir2_block_t;
-
-/*
- * Pointer to the leaf header embedded in a data block (1-block format)
- */
-static inline xfs_dir2_block_tail_t *
-xfs_dir2_block_tail_p(struct xfs_mount *mp, xfs_dir2_block_t *block)
-{
- return (((xfs_dir2_block_tail_t *)
- ((char *)(block) + (mp)->m_dirblksize)) - 1);
-}
-
-/*
- * Pointer to the leaf entries embedded in a data block (1-block format)
- */
-static inline struct xfs_dir2_leaf_entry *
-xfs_dir2_block_leaf_p(xfs_dir2_block_tail_t *btp)
-{
- return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count);
-}
-
-/*
- * Function declarations.
- */
-extern int xfs_dir2_block_addname(struct xfs_da_args *args);
-extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent,
- xfs_off_t *offset, filldir_t filldir);
-extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
-extern int xfs_dir2_block_removename(struct xfs_da_args *args);
-extern int xfs_dir2_block_replace(struct xfs_da_args *args);
-extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
- struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
-extern int xfs_dir2_sf_to_block(struct xfs_da_args *args);
-
-#endif /* __XFS_DIR2_BLOCK_H__ */
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index 921595b84f5..5bbe2a8a023 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -23,18 +23,18 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
-#include "xfs_dir2_data.h"
-#include "xfs_dir2_leaf.h"
-#include "xfs_dir2_block.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2_priv.h"
#include "xfs_error.h"
+STATIC xfs_dir2_data_free_t *
+xfs_dir2_data_freefind(xfs_dir2_data_hdr_t *hdr, xfs_dir2_data_unused_t *dup);
+
#ifdef DEBUG
/*
* Check the consistency of the data block.
@@ -50,7 +50,7 @@ xfs_dir2_data_check(
xfs_dir2_data_free_t *bf; /* bestfree table */
xfs_dir2_block_tail_t *btp=NULL; /* block tail */
int count; /* count of entries found */
- xfs_dir2_data_t *d; /* data block pointer */
+ xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_data_entry_t *dep; /* data entry */
xfs_dir2_data_free_t *dfp; /* bestfree entry */
xfs_dir2_data_unused_t *dup; /* unused entry */
@@ -66,17 +66,19 @@ xfs_dir2_data_check(
struct xfs_name name;
mp = dp->i_mount;
- d = bp->data;
- ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
- be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
- bf = d->hdr.bestfree;
- p = (char *)d->u;
- if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
- btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d);
+ hdr = bp->data;
+ bf = hdr->bestfree;
+ p = (char *)(hdr + 1);
+
+ if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
+ btp = xfs_dir2_block_tail_p(mp, hdr);
lep = xfs_dir2_block_leaf_p(btp);
endp = (char *)lep;
- } else
- endp = (char *)d + mp->m_dirblksize;
+ } else {
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
+ endp = (char *)hdr + mp->m_dirblksize;
+ }
+
count = lastfree = freeseen = 0;
/*
* Account for zero bestfree entries.
@@ -108,8 +110,8 @@ xfs_dir2_data_check(
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
ASSERT(lastfree == 0);
ASSERT(be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
- (char *)dup - (char *)d);
- dfp = xfs_dir2_data_freefind(d, dup);
+ (char *)dup - (char *)hdr);
+ dfp = xfs_dir2_data_freefind(hdr, dup);
if (dfp) {
i = (int)(dfp - bf);
ASSERT((freeseen & (1 << i)) == 0);
@@ -132,13 +134,13 @@ xfs_dir2_data_check(
ASSERT(dep->namelen != 0);
ASSERT(xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)) == 0);
ASSERT(be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep)) ==
- (char *)dep - (char *)d);
+ (char *)dep - (char *)hdr);
count++;
lastfree = 0;
- if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
+ if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
(xfs_dir2_data_aoff_t)
- ((char *)dep - (char *)d));
+ ((char *)dep - (char *)hdr));
name.name = dep->name;
name.len = dep->namelen;
hash = mp->m_dirnameops->hashname(&name);
@@ -155,9 +157,10 @@ xfs_dir2_data_check(
* Need to have seen all the entries and all the bestfree slots.
*/
ASSERT(freeseen == 7);
- if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
+ if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
for (i = stale = 0; i < be32_to_cpu(btp->count); i++) {
- if (be32_to_cpu(lep[i].address) == XFS_DIR2_NULL_DATAPTR)
+ if (lep[i].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
if (i > 0)
ASSERT(be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval));
@@ -172,9 +175,9 @@ xfs_dir2_data_check(
* Given a data block and an unused entry from that block,
* return the bestfree entry if any that corresponds to it.
*/
-xfs_dir2_data_free_t *
+STATIC xfs_dir2_data_free_t *
xfs_dir2_data_freefind(
- xfs_dir2_data_t *d, /* data block */
+ xfs_dir2_data_hdr_t *hdr, /* data block */
xfs_dir2_data_unused_t *dup) /* data unused entry */
{
xfs_dir2_data_free_t *dfp; /* bestfree entry */
@@ -184,17 +187,17 @@ xfs_dir2_data_freefind(
int seenzero; /* saw a 0 bestfree entry */
#endif
- off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)d);
+ off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr);
#if defined(DEBUG) && defined(__KERNEL__)
/*
* Validate some consistency in the bestfree table.
* Check order, non-overlapping entries, and if we find the
* one we're looking for it has to be exact.
*/
- ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
- be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
- for (dfp = &d->hdr.bestfree[0], seenzero = matched = 0;
- dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT];
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
+ hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
+ for (dfp = &hdr->bestfree[0], seenzero = matched = 0;
+ dfp < &hdr->bestfree[XFS_DIR2_DATA_FD_COUNT];
dfp++) {
if (!dfp->offset) {
ASSERT(!dfp->length);
@@ -210,7 +213,7 @@ xfs_dir2_data_freefind(
else
ASSERT(be16_to_cpu(dfp->offset) + be16_to_cpu(dfp->length) <= off);
ASSERT(matched || be16_to_cpu(dfp->length) >= be16_to_cpu(dup->length));
- if (dfp > &d->hdr.bestfree[0])
+ if (dfp > &hdr->bestfree[0])
ASSERT(be16_to_cpu(dfp[-1].length) >= be16_to_cpu(dfp[0].length));
}
#endif
@@ -219,13 +222,13 @@ xfs_dir2_data_freefind(
* it can't be there since they're sorted.
*/
if (be16_to_cpu(dup->length) <
- be16_to_cpu(d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length))
+ be16_to_cpu(hdr->bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length))
return NULL;
/*
* Look at the three bestfree entries for our guy.
*/
- for (dfp = &d->hdr.bestfree[0];
- dfp < &d->hdr.bestfree[XFS_DIR2_DATA_FD_COUNT];
+ for (dfp = &hdr->bestfree[0];
+ dfp < &hdr->bestfree[XFS_DIR2_DATA_FD_COUNT];
dfp++) {
if (!dfp->offset)
return NULL;
@@ -243,7 +246,7 @@ xfs_dir2_data_freefind(
*/
xfs_dir2_data_free_t * /* entry inserted */
xfs_dir2_data_freeinsert(
- xfs_dir2_data_t *d, /* data block pointer */
+ xfs_dir2_data_hdr_t *hdr, /* data block pointer */
xfs_dir2_data_unused_t *dup, /* unused space */
int *loghead) /* log the data header (out) */
{
@@ -251,12 +254,13 @@ xfs_dir2_data_freeinsert(
xfs_dir2_data_free_t new; /* new bestfree entry */
#ifdef __KERNEL__
- ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
- be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
+ hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
#endif
- dfp = d->hdr.bestfree;
+ dfp = hdr->bestfree;
new.length = dup->length;
- new.offset = cpu_to_be16((char *)dup - (char *)d);
+ new.offset = cpu_to_be16((char *)dup - (char *)hdr);
+
/*
* Insert at position 0, 1, or 2; or not at all.
*/
@@ -286,36 +290,36 @@ xfs_dir2_data_freeinsert(
*/
STATIC void
xfs_dir2_data_freeremove(
- xfs_dir2_data_t *d, /* data block pointer */
+ xfs_dir2_data_hdr_t *hdr, /* data block header */
xfs_dir2_data_free_t *dfp, /* bestfree entry pointer */
int *loghead) /* out: log data header */
{
#ifdef __KERNEL__
- ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
- be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
+ hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
#endif
/*
* It's the first entry, slide the next 2 up.
*/
- if (dfp == &d->hdr.bestfree[0]) {
- d->hdr.bestfree[0] = d->hdr.bestfree[1];
- d->hdr.bestfree[1] = d->hdr.bestfree[2];
+ if (dfp == &hdr->bestfree[0]) {
+ hdr->bestfree[0] = hdr->bestfree[1];
+ hdr->bestfree[1] = hdr->bestfree[2];
}
/*
* It's the second entry, slide the 3rd entry up.
*/
- else if (dfp == &d->hdr.bestfree[1])
- d->hdr.bestfree[1] = d->hdr.bestfree[2];
+ else if (dfp == &hdr->bestfree[1])
+ hdr->bestfree[1] = hdr->bestfree[2];
/*
* Must be the last entry.
*/
else
- ASSERT(dfp == &d->hdr.bestfree[2]);
+ ASSERT(dfp == &hdr->bestfree[2]);
/*
* Clear the 3rd entry, must be zero now.
*/
- d->hdr.bestfree[2].length = 0;
- d->hdr.bestfree[2].offset = 0;
+ hdr->bestfree[2].length = 0;
+ hdr->bestfree[2].offset = 0;
*loghead = 1;
}
@@ -325,7 +329,7 @@ xfs_dir2_data_freeremove(
void
xfs_dir2_data_freescan(
xfs_mount_t *mp, /* filesystem mount point */
- xfs_dir2_data_t *d, /* data block pointer */
+ xfs_dir2_data_hdr_t *hdr, /* data block header */
int *loghead) /* out: log data header */
{
xfs_dir2_block_tail_t *btp; /* block tail */
@@ -335,23 +339,23 @@ xfs_dir2_data_freescan(
char *p; /* current entry pointer */
#ifdef __KERNEL__
- ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
- be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
+ hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
#endif
/*
* Start by clearing the table.
*/
- memset(d->hdr.bestfree, 0, sizeof(d->hdr.bestfree));
+ memset(hdr->bestfree, 0, sizeof(hdr->bestfree));
*loghead = 1;
/*
* Set up pointers.
*/
- p = (char *)d->u;
- if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC) {
- btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d);
+ p = (char *)(hdr + 1);
+ if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
+ btp = xfs_dir2_block_tail_p(mp, hdr);
endp = (char *)xfs_dir2_block_leaf_p(btp);
} else
- endp = (char *)d + mp->m_dirblksize;
+ endp = (char *)hdr + mp->m_dirblksize;
/*
* Loop over the block's entries.
*/
@@ -361,9 +365,9 @@ xfs_dir2_data_freescan(
* If it's a free entry, insert it.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
- ASSERT((char *)dup - (char *)d ==
+ ASSERT((char *)dup - (char *)hdr ==
be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
- xfs_dir2_data_freeinsert(d, dup, loghead);
+ xfs_dir2_data_freeinsert(hdr, dup, loghead);
p += be16_to_cpu(dup->length);
}
/*
@@ -371,7 +375,7 @@ xfs_dir2_data_freescan(
*/
else {
dep = (xfs_dir2_data_entry_t *)p;
- ASSERT((char *)dep - (char *)d ==
+ ASSERT((char *)dep - (char *)hdr ==
be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep)));
p += xfs_dir2_data_entsize(dep->namelen);
}
@@ -389,7 +393,7 @@ xfs_dir2_data_init(
xfs_dabuf_t **bpp) /* output block buffer */
{
xfs_dabuf_t *bp; /* block buffer */
- xfs_dir2_data_t *d; /* pointer to block */
+ xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* unused entry pointer */
int error; /* error return value */
@@ -410,26 +414,28 @@ xfs_dir2_data_init(
return error;
}
ASSERT(bp != NULL);
+
/*
* Initialize the header.
*/
- d = bp->data;
- d->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
- d->hdr.bestfree[0].offset = cpu_to_be16(sizeof(d->hdr));
+ hdr = bp->data;
+ hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
+ hdr->bestfree[0].offset = cpu_to_be16(sizeof(*hdr));
for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
- d->hdr.bestfree[i].length = 0;
- d->hdr.bestfree[i].offset = 0;
+ hdr->bestfree[i].length = 0;
+ hdr->bestfree[i].offset = 0;
}
+
/*
* Set up an unused entry for the block's body.
*/
- dup = &d->u[0].unused;
+ dup = (xfs_dir2_data_unused_t *)(hdr + 1);
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
- t=mp->m_dirblksize - (uint)sizeof(d->hdr);
- d->hdr.bestfree[0].length = cpu_to_be16(t);
+ t = mp->m_dirblksize - (uint)sizeof(*hdr);
+ hdr->bestfree[0].length = cpu_to_be16(t);
dup->length = cpu_to_be16(t);
- *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)d);
+ *xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)hdr);
/*
* Log it and return it.
*/
@@ -448,14 +454,14 @@ xfs_dir2_data_log_entry(
xfs_dabuf_t *bp, /* block buffer */
xfs_dir2_data_entry_t *dep) /* data entry pointer */
{
- xfs_dir2_data_t *d; /* data block pointer */
+ xfs_dir2_data_hdr_t *hdr = bp->data;
+
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
+ hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
- d = bp->data;
- ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
- be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
- xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)d),
+ xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr),
(uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) -
- (char *)d - 1));
+ (char *)hdr - 1));
}
/*
@@ -466,13 +472,12 @@ xfs_dir2_data_log_header(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp) /* block buffer */
{
- xfs_dir2_data_t *d; /* data block pointer */
+ xfs_dir2_data_hdr_t *hdr = bp->data;
- d = bp->data;
- ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
- be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
- xfs_da_log_buf(tp, bp, (uint)((char *)&d->hdr - (char *)d),
- (uint)(sizeof(d->hdr) - 1));
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
+ hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
+
+ xfs_da_log_buf(tp, bp, 0, sizeof(*hdr) - 1);
}
/*
@@ -484,23 +489,23 @@ xfs_dir2_data_log_unused(
xfs_dabuf_t *bp, /* block buffer */
xfs_dir2_data_unused_t *dup) /* data unused pointer */
{
- xfs_dir2_data_t *d; /* data block pointer */
+ xfs_dir2_data_hdr_t *hdr = bp->data;
+
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
+ hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
- d = bp->data;
- ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
- be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
/*
* Log the first part of the unused entry.
*/
- xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)d),
+ xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr),
(uint)((char *)&dup->length + sizeof(dup->length) -
- 1 - (char *)d));
+ 1 - (char *)hdr));
/*
* Log the end (tag) of the unused entry.
*/
xfs_da_log_buf(tp, bp,
- (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)d),
- (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)d +
+ (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr),
+ (uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr +
sizeof(xfs_dir2_data_off_t) - 1));
}
@@ -517,7 +522,7 @@ xfs_dir2_data_make_free(
int *needlogp, /* out: log header */
int *needscanp) /* out: regen bestfree */
{
- xfs_dir2_data_t *d; /* data block pointer */
+ xfs_dir2_data_hdr_t *hdr; /* data block pointer */
xfs_dir2_data_free_t *dfp; /* bestfree pointer */
char *endptr; /* end of data area */
xfs_mount_t *mp; /* filesystem mount point */
@@ -527,28 +532,29 @@ xfs_dir2_data_make_free(
xfs_dir2_data_unused_t *prevdup; /* unused entry before us */
mp = tp->t_mountp;
- d = bp->data;
+ hdr = bp->data;
+
/*
* Figure out where the end of the data area is.
*/
- if (be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC)
- endptr = (char *)d + mp->m_dirblksize;
+ if (hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC))
+ endptr = (char *)hdr + mp->m_dirblksize;
else {
xfs_dir2_block_tail_t *btp; /* block tail */
- ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
- btp = xfs_dir2_block_tail_p(mp, (xfs_dir2_block_t *)d);
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
+ btp = xfs_dir2_block_tail_p(mp, hdr);
endptr = (char *)xfs_dir2_block_leaf_p(btp);
}
/*
* If this isn't the start of the block, then back up to
* the previous entry and see if it's free.
*/
- if (offset > sizeof(d->hdr)) {
+ if (offset > sizeof(*hdr)) {
__be16 *tagp; /* tag just before us */
- tagp = (__be16 *)((char *)d + offset) - 1;
- prevdup = (xfs_dir2_data_unused_t *)((char *)d + be16_to_cpu(*tagp));
+ tagp = (__be16 *)((char *)hdr + offset) - 1;
+ prevdup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp));
if (be16_to_cpu(prevdup->freetag) != XFS_DIR2_DATA_FREE_TAG)
prevdup = NULL;
} else
@@ -557,9 +563,9 @@ xfs_dir2_data_make_free(
* If this isn't the end of the block, see if the entry after
* us is free.
*/
- if ((char *)d + offset + len < endptr) {
+ if ((char *)hdr + offset + len < endptr) {
postdup =
- (xfs_dir2_data_unused_t *)((char *)d + offset + len);
+ (xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG)
postdup = NULL;
} else
@@ -576,21 +582,21 @@ xfs_dir2_data_make_free(
/*
* See if prevdup and/or postdup are in bestfree table.
*/
- dfp = xfs_dir2_data_freefind(d, prevdup);
- dfp2 = xfs_dir2_data_freefind(d, postdup);
+ dfp = xfs_dir2_data_freefind(hdr, prevdup);
+ dfp2 = xfs_dir2_data_freefind(hdr, postdup);
/*
* We need a rescan unless there are exactly 2 free entries
* namely our two. Then we know what's happening, otherwise
* since the third bestfree is there, there might be more
* entries.
*/
- needscan = (d->hdr.bestfree[2].length != 0);
+ needscan = (hdr->bestfree[2].length != 0);
/*
* Fix up the new big freespace.
*/
be16_add_cpu(&prevdup->length, len + be16_to_cpu(postdup->length));
*xfs_dir2_data_unused_tag_p(prevdup) =
- cpu_to_be16((char *)prevdup - (char *)d);
+ cpu_to_be16((char *)prevdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, prevdup);
if (!needscan) {
/*
@@ -600,18 +606,18 @@ xfs_dir2_data_make_free(
* Remove entry 1 first then entry 0.
*/
ASSERT(dfp && dfp2);
- if (dfp == &d->hdr.bestfree[1]) {
- dfp = &d->hdr.bestfree[0];
+ if (dfp == &hdr->bestfree[1]) {
+ dfp = &hdr->bestfree[0];
ASSERT(dfp2 == dfp);
- dfp2 = &d->hdr.bestfree[1];
+ dfp2 = &hdr->bestfree[1];
}
- xfs_dir2_data_freeremove(d, dfp2, needlogp);
- xfs_dir2_data_freeremove(d, dfp, needlogp);
+ xfs_dir2_data_freeremove(hdr, dfp2, needlogp);
+ xfs_dir2_data_freeremove(hdr, dfp, needlogp);
/*
* Now insert the new entry.
*/
- dfp = xfs_dir2_data_freeinsert(d, prevdup, needlogp);
- ASSERT(dfp == &d->hdr.bestfree[0]);
+ dfp = xfs_dir2_data_freeinsert(hdr, prevdup, needlogp);
+ ASSERT(dfp == &hdr->bestfree[0]);
ASSERT(dfp->length == prevdup->length);
ASSERT(!dfp[1].length);
ASSERT(!dfp[2].length);
@@ -621,10 +627,10 @@ xfs_dir2_data_make_free(
* The entry before us is free, merge with it.
*/
else if (prevdup) {
- dfp = xfs_dir2_data_freefind(d, prevdup);
+ dfp = xfs_dir2_data_freefind(hdr, prevdup);
be16_add_cpu(&prevdup->length, len);
*xfs_dir2_data_unused_tag_p(prevdup) =
- cpu_to_be16((char *)prevdup - (char *)d);
+ cpu_to_be16((char *)prevdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, prevdup);
/*
* If the previous entry was in the table, the new entry
@@ -632,27 +638,27 @@ xfs_dir2_data_make_free(
* the old one and add the new one.
*/
if (dfp) {
- xfs_dir2_data_freeremove(d, dfp, needlogp);
- (void)xfs_dir2_data_freeinsert(d, prevdup, needlogp);
+ xfs_dir2_data_freeremove(hdr, dfp, needlogp);
+ xfs_dir2_data_freeinsert(hdr, prevdup, needlogp);
}
/*
* Otherwise we need a scan if the new entry is big enough.
*/
else {
needscan = be16_to_cpu(prevdup->length) >
- be16_to_cpu(d->hdr.bestfree[2].length);
+ be16_to_cpu(hdr->bestfree[2].length);
}
}
/*
* The following entry is free, merge with it.
*/
else if (postdup) {
- dfp = xfs_dir2_data_freefind(d, postdup);
- newdup = (xfs_dir2_data_unused_t *)((char *)d + offset);
+ dfp = xfs_dir2_data_freefind(hdr, postdup);
+ newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length));
*xfs_dir2_data_unused_tag_p(newdup) =
- cpu_to_be16((char *)newdup - (char *)d);
+ cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
/*
* If the following entry was in the table, the new entry
@@ -660,28 +666,28 @@ xfs_dir2_data_make_free(
* the old one and add the new one.
*/
if (dfp) {
- xfs_dir2_data_freeremove(d, dfp, needlogp);
- (void)xfs_dir2_data_freeinsert(d, newdup, needlogp);
+ xfs_dir2_data_freeremove(hdr, dfp, needlogp);
+ xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
}
/*
* Otherwise we need a scan if the new entry is big enough.
*/
else {
needscan = be16_to_cpu(newdup->length) >
- be16_to_cpu(d->hdr.bestfree[2].length);
+ be16_to_cpu(hdr->bestfree[2].length);
}
}
/*
* Neither neighbor is free. Make a new entry.
*/
else {
- newdup = (xfs_dir2_data_unused_t *)((char *)d + offset);
+ newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
newdup->length = cpu_to_be16(len);
*xfs_dir2_data_unused_tag_p(newdup) =
- cpu_to_be16((char *)newdup - (char *)d);
+ cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
- (void)xfs_dir2_data_freeinsert(d, newdup, needlogp);
+ xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
}
*needscanp = needscan;
}
@@ -699,7 +705,7 @@ xfs_dir2_data_use_free(
int *needlogp, /* out: need to log header */
int *needscanp) /* out: need regen bestfree */
{
- xfs_dir2_data_t *d; /* data block */
+ xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_data_free_t *dfp; /* bestfree pointer */
int matchback; /* matches end of freespace */
int matchfront; /* matches start of freespace */
@@ -708,24 +714,24 @@ xfs_dir2_data_use_free(
xfs_dir2_data_unused_t *newdup2; /* another new unused entry */
int oldlen; /* old unused entry's length */
- d = bp->data;
- ASSERT(be32_to_cpu(d->hdr.magic) == XFS_DIR2_DATA_MAGIC ||
- be32_to_cpu(d->hdr.magic) == XFS_DIR2_BLOCK_MAGIC);
+ hdr = bp->data;
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
+ hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG);
- ASSERT(offset >= (char *)dup - (char *)d);
- ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)d);
- ASSERT((char *)dup - (char *)d == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
+ ASSERT(offset >= (char *)dup - (char *)hdr);
+ ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)hdr);
+ ASSERT((char *)dup - (char *)hdr == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
/*
* Look up the entry in the bestfree table.
*/
- dfp = xfs_dir2_data_freefind(d, dup);
+ dfp = xfs_dir2_data_freefind(hdr, dup);
oldlen = be16_to_cpu(dup->length);
- ASSERT(dfp || oldlen <= be16_to_cpu(d->hdr.bestfree[2].length));
+ ASSERT(dfp || oldlen <= be16_to_cpu(hdr->bestfree[2].length));
/*
* Check for alignment with front and back of the entry.
*/
- matchfront = (char *)dup - (char *)d == offset;
- matchback = (char *)dup + oldlen - (char *)d == offset + len;
+ matchfront = (char *)dup - (char *)hdr == offset;
+ matchback = (char *)dup + oldlen - (char *)hdr == offset + len;
ASSERT(*needscanp == 0);
needscan = 0;
/*
@@ -734,9 +740,9 @@ xfs_dir2_data_use_free(
*/
if (matchfront && matchback) {
if (dfp) {
- needscan = (d->hdr.bestfree[2].offset != 0);
+ needscan = (hdr->bestfree[2].offset != 0);
if (!needscan)
- xfs_dir2_data_freeremove(d, dfp, needlogp);
+ xfs_dir2_data_freeremove(hdr, dfp, needlogp);
}
}
/*
@@ -744,27 +750,27 @@ xfs_dir2_data_use_free(
* Make a new entry with the remaining freespace.
*/
else if (matchfront) {
- newdup = (xfs_dir2_data_unused_t *)((char *)d + offset + len);
+ newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
newdup->length = cpu_to_be16(oldlen - len);
*xfs_dir2_data_unused_tag_p(newdup) =
- cpu_to_be16((char *)newdup - (char *)d);
+ cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
/*
* If it was in the table, remove it and add the new one.
*/
if (dfp) {
- xfs_dir2_data_freeremove(d, dfp, needlogp);
- dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp);
+ xfs_dir2_data_freeremove(hdr, dfp, needlogp);
+ dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
ASSERT(dfp != NULL);
ASSERT(dfp->length == newdup->length);
- ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d);
+ ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr);
/*
* If we got inserted at the last slot,
* that means we don't know if there was a better
* choice for the last slot, or not. Rescan.
*/
- needscan = dfp == &d->hdr.bestfree[2];
+ needscan = dfp == &hdr->bestfree[2];
}
}
/*
@@ -773,25 +779,25 @@ xfs_dir2_data_use_free(
*/
else if (matchback) {
newdup = dup;
- newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup);
+ newdup->length = cpu_to_be16(((char *)hdr + offset) - (char *)newdup);
*xfs_dir2_data_unused_tag_p(newdup) =
- cpu_to_be16((char *)newdup - (char *)d);
+ cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
/*
* If it was in the table, remove it and add the new one.
*/
if (dfp) {
- xfs_dir2_data_freeremove(d, dfp, needlogp);
- dfp = xfs_dir2_data_freeinsert(d, newdup, needlogp);
+ xfs_dir2_data_freeremove(hdr, dfp, needlogp);
+ dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
ASSERT(dfp != NULL);
ASSERT(dfp->length == newdup->length);
- ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)d);
+ ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr);
/*
* If we got inserted at the last slot,
* that means we don't know if there was a better
* choice for the last slot, or not. Rescan.
*/
- needscan = dfp == &d->hdr.bestfree[2];
+ needscan = dfp == &hdr->bestfree[2];
}
}
/*
@@ -800,15 +806,15 @@ xfs_dir2_data_use_free(
*/
else {
newdup = dup;
- newdup->length = cpu_to_be16(((char *)d + offset) - (char *)newdup);
+ newdup->length = cpu_to_be16(((char *)hdr + offset) - (char *)newdup);
*xfs_dir2_data_unused_tag_p(newdup) =
- cpu_to_be16((char *)newdup - (char *)d);
+ cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
- newdup2 = (xfs_dir2_data_unused_t *)((char *)d + offset + len);
+ newdup2 = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length));
*xfs_dir2_data_unused_tag_p(newdup2) =
- cpu_to_be16((char *)newdup2 - (char *)d);
+ cpu_to_be16((char *)newdup2 - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup2);
/*
* If the old entry was in the table, we need to scan
@@ -819,13 +825,12 @@ xfs_dir2_data_use_free(
* the 2 new will work.
*/
if (dfp) {
- needscan = (d->hdr.bestfree[2].length != 0);
+ needscan = (hdr->bestfree[2].length != 0);
if (!needscan) {
- xfs_dir2_data_freeremove(d, dfp, needlogp);
- (void)xfs_dir2_data_freeinsert(d, newdup,
- needlogp);
- (void)xfs_dir2_data_freeinsert(d, newdup2,
- needlogp);
+ xfs_dir2_data_freeremove(hdr, dfp, needlogp);
+ xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
+ xfs_dir2_data_freeinsert(hdr, newdup2,
+ needlogp);
}
}
}
diff --git a/fs/xfs/xfs_dir2_data.h b/fs/xfs/xfs_dir2_data.h
deleted file mode 100644
index efbc290c7fe..00000000000
--- a/fs/xfs/xfs_dir2_data.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2000,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DIR2_DATA_H__
-#define __XFS_DIR2_DATA_H__
-
-/*
- * Directory format 2, data block structures.
- */
-
-struct xfs_dabuf;
-struct xfs_da_args;
-struct xfs_inode;
-struct xfs_trans;
-
-/*
- * Constants.
- */
-#define XFS_DIR2_DATA_MAGIC 0x58443244 /* XD2D: for multiblock dirs */
-#define XFS_DIR2_DATA_ALIGN_LOG 3 /* i.e., 8 bytes */
-#define XFS_DIR2_DATA_ALIGN (1 << XFS_DIR2_DATA_ALIGN_LOG)
-#define XFS_DIR2_DATA_FREE_TAG 0xffff
-#define XFS_DIR2_DATA_FD_COUNT 3
-
-/*
- * Directory address space divided into sections,
- * spaces separated by 32GB.
- */
-#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG))
-#define XFS_DIR2_DATA_SPACE 0
-#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE)
-#define XFS_DIR2_DATA_FIRSTDB(mp) \
- xfs_dir2_byte_to_db(mp, XFS_DIR2_DATA_OFFSET)
-
-/*
- * Offsets of . and .. in data space (always block 0)
- */
-#define XFS_DIR2_DATA_DOT_OFFSET \
- ((xfs_dir2_data_aoff_t)sizeof(xfs_dir2_data_hdr_t))
-#define XFS_DIR2_DATA_DOTDOT_OFFSET \
- (XFS_DIR2_DATA_DOT_OFFSET + xfs_dir2_data_entsize(1))
-#define XFS_DIR2_DATA_FIRST_OFFSET \
- (XFS_DIR2_DATA_DOTDOT_OFFSET + xfs_dir2_data_entsize(2))
-
-/*
- * Structures.
- */
-
-/*
- * Describe a free area in the data block.
- * The freespace will be formatted as a xfs_dir2_data_unused_t.
- */
-typedef struct xfs_dir2_data_free {
- __be16 offset; /* start of freespace */
- __be16 length; /* length of freespace */
-} xfs_dir2_data_free_t;
-
-/*
- * Header for the data blocks.
- * Always at the beginning of a directory-sized block.
- * The code knows that XFS_DIR2_DATA_FD_COUNT is 3.
- */
-typedef struct xfs_dir2_data_hdr {
- __be32 magic; /* XFS_DIR2_DATA_MAGIC */
- /* or XFS_DIR2_BLOCK_MAGIC */
- xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT];
-} xfs_dir2_data_hdr_t;
-
-/*
- * Active entry in a data block. Aligned to 8 bytes.
- * Tag appears as the last 2 bytes.
- */
-typedef struct xfs_dir2_data_entry {
- __be64 inumber; /* inode number */
- __u8 namelen; /* name length */
- __u8 name[1]; /* name bytes, no null */
- /* variable offset */
- __be16 tag; /* starting offset of us */
-} xfs_dir2_data_entry_t;
-
-/*
- * Unused entry in a data block. Aligned to 8 bytes.
- * Tag appears as the last 2 bytes.
- */
-typedef struct xfs_dir2_data_unused {
- __be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */
- __be16 length; /* total free length */
- /* variable offset */
- __be16 tag; /* starting offset of us */
-} xfs_dir2_data_unused_t;
-
-typedef union {
- xfs_dir2_data_entry_t entry;
- xfs_dir2_data_unused_t unused;
-} xfs_dir2_data_union_t;
-
-/*
- * Generic data block structure, for xfs_db.
- */
-typedef struct xfs_dir2_data {
- xfs_dir2_data_hdr_t hdr; /* magic XFS_DIR2_DATA_MAGIC */
- xfs_dir2_data_union_t u[1];
-} xfs_dir2_data_t;
-
-/*
- * Macros.
- */
-
-/*
- * Size of a data entry.
- */
-static inline int xfs_dir2_data_entsize(int n)
-{
- return (int)roundup(offsetof(xfs_dir2_data_entry_t, name[0]) + (n) + \
- (uint)sizeof(xfs_dir2_data_off_t), XFS_DIR2_DATA_ALIGN);
-}
-
-/*
- * Pointer to an entry's tag word.
- */
-static inline __be16 *
-xfs_dir2_data_entry_tag_p(xfs_dir2_data_entry_t *dep)
-{
- return (__be16 *)((char *)dep +
- xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16));
-}
-
-/*
- * Pointer to a freespace's tag word.
- */
-static inline __be16 *
-xfs_dir2_data_unused_tag_p(xfs_dir2_data_unused_t *dup)
-{
- return (__be16 *)((char *)dup +
- be16_to_cpu(dup->length) - sizeof(__be16));
-}
-
-/*
- * Function declarations.
- */
-#ifdef DEBUG
-extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp);
-#else
-#define xfs_dir2_data_check(dp,bp)
-#endif
-extern xfs_dir2_data_free_t *xfs_dir2_data_freefind(xfs_dir2_data_t *d,
- xfs_dir2_data_unused_t *dup);
-extern xfs_dir2_data_free_t *xfs_dir2_data_freeinsert(xfs_dir2_data_t *d,
- xfs_dir2_data_unused_t *dup, int *loghead);
-extern void xfs_dir2_data_freescan(struct xfs_mount *mp, xfs_dir2_data_t *d,
- int *loghead);
-extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
- struct xfs_dabuf **bpp);
-extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp,
- xfs_dir2_data_entry_t *dep);
-extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
- struct xfs_dabuf *bp);
-extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp,
- xfs_dir2_data_unused_t *dup);
-extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
- xfs_dir2_data_aoff_t offset,
- xfs_dir2_data_aoff_t len, int *needlogp,
- int *needscanp);
-extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
- xfs_dir2_data_unused_t *dup,
- xfs_dir2_data_aoff_t offset,
- xfs_dir2_data_aoff_t len, int *needlogp,
- int *needscanp);
-
-#endif /* __XFS_DIR2_DATA_H__ */
diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_dir2_format.h
new file mode 100644
index 00000000000..07270981f48
--- /dev/null
+++ b/fs/xfs/xfs_dir2_format.h
@@ -0,0 +1,597 @@
+/*
+ * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __XFS_DIR2_FORMAT_H__
+#define __XFS_DIR2_FORMAT_H__
+
+/*
+ * Directory version 2.
+ *
+ * There are 4 possible formats:
+ * - shortform - embedded into the inode
+ * - single block - data with embedded leaf at the end
+ * - multiple data blocks, single leaf+freeindex block
+ * - data blocks, node and leaf blocks (btree), freeindex blocks
+ *
+ * Note: many node blocks structures and constants are shared with the attr
+ * code and defined in xfs_da_btree.h.
+ */
+
+#define XFS_DIR2_BLOCK_MAGIC 0x58443242 /* XD2B: single block dirs */
+#define XFS_DIR2_DATA_MAGIC 0x58443244 /* XD2D: multiblock dirs */
+#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F: free index blocks */
+
+/*
+ * Byte offset in data block and shortform entry.
+ */
+typedef __uint16_t xfs_dir2_data_off_t;
+#define NULLDATAOFF 0xffffU
+typedef uint xfs_dir2_data_aoff_t; /* argument form */
+
+/*
+ * Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t.
+ * Only need 16 bits, this is the byte offset into the single block form.
+ */
+typedef struct { __uint8_t i[2]; } __arch_pack xfs_dir2_sf_off_t;
+
+/*
+ * Offset in data space of a data entry.
+ */
+typedef __uint32_t xfs_dir2_dataptr_t;
+#define XFS_DIR2_MAX_DATAPTR ((xfs_dir2_dataptr_t)0xffffffff)
+#define XFS_DIR2_NULL_DATAPTR ((xfs_dir2_dataptr_t)0)
+
+/*
+ * Byte offset in a directory.
+ */
+typedef xfs_off_t xfs_dir2_off_t;
+
+/*
+ * Directory block number (logical dirblk in file)
+ */
+typedef __uint32_t xfs_dir2_db_t;
+
+/*
+ * Inode number stored as 8 8-bit values.
+ */
+typedef struct { __uint8_t i[8]; } xfs_dir2_ino8_t;
+
+/*
+ * Inode number stored as 4 8-bit values.
+ * Works a lot of the time, when all the inode numbers in a directory
+ * fit in 32 bits.
+ */
+typedef struct { __uint8_t i[4]; } xfs_dir2_ino4_t;
+
+typedef union {
+ xfs_dir2_ino8_t i8;
+ xfs_dir2_ino4_t i4;
+} xfs_dir2_inou_t;
+#define XFS_DIR2_MAX_SHORT_INUM ((xfs_ino_t)0xffffffffULL)
+
+/*
+ * Directory layout when stored internal to an inode.
+ *
+ * Small directories are packed as tightly as possible so as to fit into the
+ * literal area of the inode. These "shortform" directories consist of a
+ * single xfs_dir2_sf_hdr header followed by zero or more xfs_dir2_sf_entry
+ * structures. Due the different inode number storage size and the variable
+ * length name field in the xfs_dir2_sf_entry all these structure are
+ * variable length, and the accessors in this file should be used to iterate
+ * over them.
+ */
+typedef struct xfs_dir2_sf_hdr {
+ __uint8_t count; /* count of entries */
+ __uint8_t i8count; /* count of 8-byte inode #s */
+ xfs_dir2_inou_t parent; /* parent dir inode number */
+} __arch_pack xfs_dir2_sf_hdr_t;
+
+typedef struct xfs_dir2_sf_entry {
+ __u8 namelen; /* actual name length */
+ xfs_dir2_sf_off_t offset; /* saved offset */
+ __u8 name[]; /* name, variable size */
+ /*
+ * A xfs_dir2_ino8_t or xfs_dir2_ino4_t follows here, at a
+ * variable offset after the name.
+ */
+} __arch_pack xfs_dir2_sf_entry_t;
+
+static inline int xfs_dir2_sf_hdr_size(int i8count)
+{
+ return sizeof(struct xfs_dir2_sf_hdr) -
+ (i8count == 0) *
+ (sizeof(xfs_dir2_ino8_t) - sizeof(xfs_dir2_ino4_t));
+}
+
+static inline xfs_dir2_data_aoff_t
+xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep)
+{
+ return get_unaligned_be16(&sfep->offset.i);
+}
+
+static inline void
+xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off)
+{
+ put_unaligned_be16(off, &sfep->offset.i);
+}
+
+static inline int
+xfs_dir2_sf_entsize(struct xfs_dir2_sf_hdr *hdr, int len)
+{
+ return sizeof(struct xfs_dir2_sf_entry) + /* namelen + offset */
+ len + /* name */
+ (hdr->i8count ? /* ino */
+ sizeof(xfs_dir2_ino8_t) :
+ sizeof(xfs_dir2_ino4_t));
+}
+
+static inline struct xfs_dir2_sf_entry *
+xfs_dir2_sf_firstentry(struct xfs_dir2_sf_hdr *hdr)
+{
+ return (struct xfs_dir2_sf_entry *)
+ ((char *)hdr + xfs_dir2_sf_hdr_size(hdr->i8count));
+}
+
+static inline struct xfs_dir2_sf_entry *
+xfs_dir2_sf_nextentry(struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ return (struct xfs_dir2_sf_entry *)
+ ((char *)sfep + xfs_dir2_sf_entsize(hdr, sfep->namelen));
+}
+
+
+/*
+ * Data block structures.
+ *
+ * A pure data block looks like the following drawing on disk:
+ *
+ * +-------------------------------------------------+
+ * | xfs_dir2_data_hdr_t |
+ * +-------------------------------------------------+
+ * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t |
+ * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t |
+ * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t |
+ * | ... |
+ * +-------------------------------------------------+
+ * | unused space |
+ * +-------------------------------------------------+
+ *
+ * As all the entries are variable size structures the accessors below should
+ * be used to iterate over them.
+ *
+ * In addition to the pure data blocks for the data and node formats,
+ * most structures are also used for the combined data/freespace "block"
+ * format below.
+ */
+
+#define XFS_DIR2_DATA_ALIGN_LOG 3 /* i.e., 8 bytes */
+#define XFS_DIR2_DATA_ALIGN (1 << XFS_DIR2_DATA_ALIGN_LOG)
+#define XFS_DIR2_DATA_FREE_TAG 0xffff
+#define XFS_DIR2_DATA_FD_COUNT 3
+
+/*
+ * Directory address space divided into sections,
+ * spaces separated by 32GB.
+ */
+#define XFS_DIR2_SPACE_SIZE (1ULL << (32 + XFS_DIR2_DATA_ALIGN_LOG))
+#define XFS_DIR2_DATA_SPACE 0
+#define XFS_DIR2_DATA_OFFSET (XFS_DIR2_DATA_SPACE * XFS_DIR2_SPACE_SIZE)
+#define XFS_DIR2_DATA_FIRSTDB(mp) \
+ xfs_dir2_byte_to_db(mp, XFS_DIR2_DATA_OFFSET)
+
+/*
+ * Offsets of . and .. in data space (always block 0)
+ */
+#define XFS_DIR2_DATA_DOT_OFFSET \
+ ((xfs_dir2_data_aoff_t)sizeof(struct xfs_dir2_data_hdr))
+#define XFS_DIR2_DATA_DOTDOT_OFFSET \
+ (XFS_DIR2_DATA_DOT_OFFSET + xfs_dir2_data_entsize(1))
+#define XFS_DIR2_DATA_FIRST_OFFSET \
+ (XFS_DIR2_DATA_DOTDOT_OFFSET + xfs_dir2_data_entsize(2))
+
+/*
+ * Describe a free area in the data block.
+ *
+ * The freespace will be formatted as a xfs_dir2_data_unused_t.
+ */
+typedef struct xfs_dir2_data_free {
+ __be16 offset; /* start of freespace */
+ __be16 length; /* length of freespace */
+} xfs_dir2_data_free_t;
+
+/*
+ * Header for the data blocks.
+ *
+ * The code knows that XFS_DIR2_DATA_FD_COUNT is 3.
+ */
+typedef struct xfs_dir2_data_hdr {
+ __be32 magic; /* XFS_DIR2_DATA_MAGIC or */
+ /* XFS_DIR2_BLOCK_MAGIC */
+ xfs_dir2_data_free_t bestfree[XFS_DIR2_DATA_FD_COUNT];
+} xfs_dir2_data_hdr_t;
+
+/*
+ * Active entry in a data block.
+ *
+ * Aligned to 8 bytes. After the variable length name field there is a
+ * 2 byte tag field, which can be accessed using xfs_dir2_data_entry_tag_p.
+ */
+typedef struct xfs_dir2_data_entry {
+ __be64 inumber; /* inode number */
+ __u8 namelen; /* name length */
+ __u8 name[]; /* name bytes, no null */
+ /* __be16 tag; */ /* starting offset of us */
+} xfs_dir2_data_entry_t;
+
+/*
+ * Unused entry in a data block.
+ *
+ * Aligned to 8 bytes. Tag appears as the last 2 bytes and must be accessed
+ * using xfs_dir2_data_unused_tag_p.
+ */
+typedef struct xfs_dir2_data_unused {
+ __be16 freetag; /* XFS_DIR2_DATA_FREE_TAG */
+ __be16 length; /* total free length */
+ /* variable offset */
+ __be16 tag; /* starting offset of us */
+} xfs_dir2_data_unused_t;
+
+/*
+ * Size of a data entry.
+ */
+static inline int xfs_dir2_data_entsize(int n)
+{
+ return (int)roundup(offsetof(struct xfs_dir2_data_entry, name[0]) + n +
+ (uint)sizeof(xfs_dir2_data_off_t), XFS_DIR2_DATA_ALIGN);
+}
+
+/*
+ * Pointer to an entry's tag word.
+ */
+static inline __be16 *
+xfs_dir2_data_entry_tag_p(struct xfs_dir2_data_entry *dep)
+{
+ return (__be16 *)((char *)dep +
+ xfs_dir2_data_entsize(dep->namelen) - sizeof(__be16));
+}
+
+/*
+ * Pointer to a freespace's tag word.
+ */
+static inline __be16 *
+xfs_dir2_data_unused_tag_p(struct xfs_dir2_data_unused *dup)
+{
+ return (__be16 *)((char *)dup +
+ be16_to_cpu(dup->length) - sizeof(__be16));
+}
+
+/*
+ * Leaf block structures.
+ *
+ * A pure leaf block looks like the following drawing on disk:
+ *
+ * +---------------------------+
+ * | xfs_dir2_leaf_hdr_t |
+ * +---------------------------+
+ * | xfs_dir2_leaf_entry_t |
+ * | xfs_dir2_leaf_entry_t |
+ * | xfs_dir2_leaf_entry_t |
+ * | xfs_dir2_leaf_entry_t |
+ * | ... |
+ * +---------------------------+
+ * | xfs_dir2_data_off_t |
+ * | xfs_dir2_data_off_t |
+ * | xfs_dir2_data_off_t |
+ * | ... |
+ * +---------------------------+
+ * | xfs_dir2_leaf_tail_t |
+ * +---------------------------+
+ *
+ * The xfs_dir2_data_off_t members (bests) and tail are at the end of the block
+ * for single-leaf (magic = XFS_DIR2_LEAF1_MAGIC) blocks only, but not present
+ * for directories with separate leaf nodes and free space blocks
+ * (magic = XFS_DIR2_LEAFN_MAGIC).
+ *
+ * As all the entries are variable size structures the accessors below should
+ * be used to iterate over them.
+ */
+
+/*
+ * Offset of the leaf/node space. First block in this space
+ * is the btree root.
+ */
+#define XFS_DIR2_LEAF_SPACE 1
+#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE)
+#define XFS_DIR2_LEAF_FIRSTDB(mp) \
+ xfs_dir2_byte_to_db(mp, XFS_DIR2_LEAF_OFFSET)
+
+/*
+ * Leaf block header.
+ */
+typedef struct xfs_dir2_leaf_hdr {
+ xfs_da_blkinfo_t info; /* header for da routines */
+ __be16 count; /* count of entries */
+ __be16 stale; /* count of stale entries */
+} xfs_dir2_leaf_hdr_t;
+
+/*
+ * Leaf block entry.
+ */
+typedef struct xfs_dir2_leaf_entry {
+ __be32 hashval; /* hash value of name */
+ __be32 address; /* address of data entry */
+} xfs_dir2_leaf_entry_t;
+
+/*
+ * Leaf block tail.
+ */
+typedef struct xfs_dir2_leaf_tail {
+ __be32 bestcount;
+} xfs_dir2_leaf_tail_t;
+
+/*
+ * Leaf block.
+ */
+typedef struct xfs_dir2_leaf {
+ xfs_dir2_leaf_hdr_t hdr; /* leaf header */
+ xfs_dir2_leaf_entry_t ents[]; /* entries */
+} xfs_dir2_leaf_t;
+
+/*
+ * DB blocks here are logical directory block numbers, not filesystem blocks.
+ */
+
+static inline int xfs_dir2_max_leaf_ents(struct xfs_mount *mp)
+{
+ return (mp->m_dirblksize - (uint)sizeof(struct xfs_dir2_leaf_hdr)) /
+ (uint)sizeof(struct xfs_dir2_leaf_entry);
+}
+
+/*
+ * Get address of the bestcount field in the single-leaf block.
+ */
+static inline struct xfs_dir2_leaf_tail *
+xfs_dir2_leaf_tail_p(struct xfs_mount *mp, struct xfs_dir2_leaf *lp)
+{
+ return (struct xfs_dir2_leaf_tail *)
+ ((char *)lp + mp->m_dirblksize -
+ sizeof(struct xfs_dir2_leaf_tail));
+}
+
+/*
+ * Get address of the bests array in the single-leaf block.
+ */
+static inline __be16 *
+xfs_dir2_leaf_bests_p(struct xfs_dir2_leaf_tail *ltp)
+{
+ return (__be16 *)ltp - be32_to_cpu(ltp->bestcount);
+}
+
+/*
+ * Convert dataptr to byte in file space
+ */
+static inline xfs_dir2_off_t
+xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
+{
+ return (xfs_dir2_off_t)dp << XFS_DIR2_DATA_ALIGN_LOG;
+}
+
+/*
+ * Convert byte in file space to dataptr. It had better be aligned.
+ */
+static inline xfs_dir2_dataptr_t
+xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by)
+{
+ return (xfs_dir2_dataptr_t)(by >> XFS_DIR2_DATA_ALIGN_LOG);
+}
+
+/*
+ * Convert byte in space to (DB) block
+ */
+static inline xfs_dir2_db_t
+xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by)
+{
+ return (xfs_dir2_db_t)
+ (by >> (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog));
+}
+
+/*
+ * Convert dataptr to a block number
+ */
+static inline xfs_dir2_db_t
+xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
+{
+ return xfs_dir2_byte_to_db(mp, xfs_dir2_dataptr_to_byte(mp, dp));
+}
+
+/*
+ * Convert byte in space to offset in a block
+ */
+static inline xfs_dir2_data_aoff_t
+xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by)
+{
+ return (xfs_dir2_data_aoff_t)(by &
+ ((1 << (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) - 1));
+}
+
+/*
+ * Convert dataptr to a byte offset in a block
+ */
+static inline xfs_dir2_data_aoff_t
+xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
+{
+ return xfs_dir2_byte_to_off(mp, xfs_dir2_dataptr_to_byte(mp, dp));
+}
+
+/*
+ * Convert block and offset to byte in space
+ */
+static inline xfs_dir2_off_t
+xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db,
+ xfs_dir2_data_aoff_t o)
+{
+ return ((xfs_dir2_off_t)db <<
+ (mp->m_sb.sb_blocklog + mp->m_sb.sb_dirblklog)) + o;
+}
+
+/*
+ * Convert block (DB) to block (dablk)
+ */
+static inline xfs_dablk_t
+xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+ return (xfs_dablk_t)(db << mp->m_sb.sb_dirblklog);
+}
+
+/*
+ * Convert byte in space to (DA) block
+ */
+static inline xfs_dablk_t
+xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by)
+{
+ return xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, by));
+}
+
+/*
+ * Convert block and offset to dataptr
+ */
+static inline xfs_dir2_dataptr_t
+xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db,
+ xfs_dir2_data_aoff_t o)
+{
+ return xfs_dir2_byte_to_dataptr(mp, xfs_dir2_db_off_to_byte(mp, db, o));
+}
+
+/*
+ * Convert block (dablk) to block (DB)
+ */
+static inline xfs_dir2_db_t
+xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da)
+{
+ return (xfs_dir2_db_t)(da >> mp->m_sb.sb_dirblklog);
+}
+
+/*
+ * Convert block (dablk) to byte offset in space
+ */
+static inline xfs_dir2_off_t
+xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da)
+{
+ return xfs_dir2_db_off_to_byte(mp, xfs_dir2_da_to_db(mp, da), 0);
+}
+
+/*
+ * Free space block defintions for the node format.
+ */
+
+/*
+ * Offset of the freespace index.
+ */
+#define XFS_DIR2_FREE_SPACE 2
+#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE)
+#define XFS_DIR2_FREE_FIRSTDB(mp) \
+ xfs_dir2_byte_to_db(mp, XFS_DIR2_FREE_OFFSET)
+
+typedef struct xfs_dir2_free_hdr {
+ __be32 magic; /* XFS_DIR2_FREE_MAGIC */
+ __be32 firstdb; /* db of first entry */
+ __be32 nvalid; /* count of valid entries */
+ __be32 nused; /* count of used entries */
+} xfs_dir2_free_hdr_t;
+
+typedef struct xfs_dir2_free {
+ xfs_dir2_free_hdr_t hdr; /* block header */
+ __be16 bests[]; /* best free counts */
+ /* unused entries are -1 */
+} xfs_dir2_free_t;
+
+static inline int xfs_dir2_free_max_bests(struct xfs_mount *mp)
+{
+ return (mp->m_dirblksize - sizeof(struct xfs_dir2_free_hdr)) /
+ sizeof(xfs_dir2_data_off_t);
+}
+
+/*
+ * Convert data space db to the corresponding free db.
+ */
+static inline xfs_dir2_db_t
+xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+ return XFS_DIR2_FREE_FIRSTDB(mp) + db / xfs_dir2_free_max_bests(mp);
+}
+
+/*
+ * Convert data space db to the corresponding index in a free db.
+ */
+static inline int
+xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
+{
+ return db % xfs_dir2_free_max_bests(mp);
+}
+
+/*
+ * Single block format.
+ *
+ * The single block format looks like the following drawing on disk:
+ *
+ * +-------------------------------------------------+
+ * | xfs_dir2_data_hdr_t |
+ * +-------------------------------------------------+
+ * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t |
+ * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t |
+ * | xfs_dir2_data_entry_t OR xfs_dir2_data_unused_t :
+ * | ... |
+ * +-------------------------------------------------+
+ * | unused space |
+ * +-------------------------------------------------+
+ * | ... |
+ * | xfs_dir2_leaf_entry_t |
+ * | xfs_dir2_leaf_entry_t |
+ * +-------------------------------------------------+
+ * | xfs_dir2_block_tail_t |
+ * +-------------------------------------------------+
+ *
+ * As all the entries are variable size structures the accessors below should
+ * be used to iterate over them.
+ */
+
+typedef struct xfs_dir2_block_tail {
+ __be32 count; /* count of leaf entries */
+ __be32 stale; /* count of stale lf entries */
+} xfs_dir2_block_tail_t;
+
+/*
+ * Pointer to the leaf header embedded in a data block (1-block format)
+ */
+static inline struct xfs_dir2_block_tail *
+xfs_dir2_block_tail_p(struct xfs_mount *mp, struct xfs_dir2_data_hdr *hdr)
+{
+ return ((struct xfs_dir2_block_tail *)
+ ((char *)hdr + mp->m_dirblksize)) - 1;
+}
+
+/*
+ * Pointer to the leaf entries embedded in a data block (1-block format)
+ */
+static inline struct xfs_dir2_leaf_entry *
+xfs_dir2_block_leaf_p(struct xfs_dir2_block_tail *btp)
+{
+ return ((struct xfs_dir2_leaf_entry *)btp) - be32_to_cpu(btp->count);
+}
+
+#endif /* __XFS_DIR2_FORMAT_H__ */
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index ae891223be9..ca2386d82cd 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -24,18 +24,14 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
-#include "xfs_dir2_data.h"
-#include "xfs_dir2_leaf.h"
-#include "xfs_dir2_block.h"
-#include "xfs_dir2_node.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@@ -64,7 +60,7 @@ xfs_dir2_block_to_leaf(
{
__be16 *bestsp; /* leaf's bestsp entries */
xfs_dablk_t blkno; /* leaf block's bno */
- xfs_dir2_block_t *block; /* block structure */
+ xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_leaf_entry_t *blp; /* block's leaf entries */
xfs_dir2_block_tail_t *btp; /* block's tail */
xfs_inode_t *dp; /* incore directory inode */
@@ -101,9 +97,9 @@ xfs_dir2_block_to_leaf(
}
ASSERT(lbp != NULL);
leaf = lbp->data;
- block = dbp->data;
+ hdr = dbp->data;
xfs_dir2_data_check(dp, dbp);
- btp = xfs_dir2_block_tail_p(mp, block);
+ btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
/*
* Set the counts in the leaf header.
@@ -123,23 +119,23 @@ xfs_dir2_block_to_leaf(
* tail be free.
*/
xfs_dir2_data_make_free(tp, dbp,
- (xfs_dir2_data_aoff_t)((char *)blp - (char *)block),
- (xfs_dir2_data_aoff_t)((char *)block + mp->m_dirblksize -
+ (xfs_dir2_data_aoff_t)((char *)blp - (char *)hdr),
+ (xfs_dir2_data_aoff_t)((char *)hdr + mp->m_dirblksize -
(char *)blp),
&needlog, &needscan);
/*
* Fix up the block header, make it a data block.
*/
- block->hdr.magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
+ hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
if (needscan)
- xfs_dir2_data_freescan(mp, (xfs_dir2_data_t *)block, &needlog);
+ xfs_dir2_data_freescan(mp, hdr, &needlog);
/*
* Set up leaf tail and bests table.
*/
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
ltp->bestcount = cpu_to_be32(1);
bestsp = xfs_dir2_leaf_bests_p(ltp);
- bestsp[0] = block->hdr.bestfree[0].length;
+ bestsp[0] = hdr->bestfree[0].length;
/*
* Log the data header and leaf bests table.
*/
@@ -152,6 +148,131 @@ xfs_dir2_block_to_leaf(
return 0;
}
+STATIC void
+xfs_dir2_leaf_find_stale(
+ struct xfs_dir2_leaf *leaf,
+ int index,
+ int *lowstale,
+ int *highstale)
+{
+ /*
+ * Find the first stale entry before our index, if any.
+ */
+ for (*lowstale = index - 1; *lowstale >= 0; --*lowstale) {
+ if (leaf->ents[*lowstale].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ break;
+ }
+
+ /*
+ * Find the first stale entry at or after our index, if any.
+ * Stop if the result would require moving more entries than using
+ * lowstale.
+ */
+ for (*highstale = index;
+ *highstale < be16_to_cpu(leaf->hdr.count);
+ ++*highstale) {
+ if (leaf->ents[*highstale].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
+ break;
+ if (*lowstale >= 0 && index - *lowstale <= *highstale - index)
+ break;
+ }
+}
+
+struct xfs_dir2_leaf_entry *
+xfs_dir2_leaf_find_entry(
+ xfs_dir2_leaf_t *leaf, /* leaf structure */
+ int index, /* leaf table position */
+ int compact, /* need to compact leaves */
+ int lowstale, /* index of prev stale leaf */
+ int highstale, /* index of next stale leaf */
+ int *lfloglow, /* low leaf logging index */
+ int *lfloghigh) /* high leaf logging index */
+{
+ if (!leaf->hdr.stale) {
+ xfs_dir2_leaf_entry_t *lep; /* leaf entry table pointer */
+
+ /*
+ * Now we need to make room to insert the leaf entry.
+ *
+ * If there are no stale entries, just insert a hole at index.
+ */
+ lep = &leaf->ents[index];
+ if (index < be16_to_cpu(leaf->hdr.count))
+ memmove(lep + 1, lep,
+ (be16_to_cpu(leaf->hdr.count) - index) *
+ sizeof(*lep));
+
+ /*
+ * Record low and high logging indices for the leaf.
+ */
+ *lfloglow = index;
+ *lfloghigh = be16_to_cpu(leaf->hdr.count);
+ be16_add_cpu(&leaf->hdr.count, 1);
+ return lep;
+ }
+
+ /*
+ * There are stale entries.
+ *
+ * We will use one of them for the new entry. It's probably not at
+ * the right location, so we'll have to shift some up or down first.
+ *
+ * If we didn't compact before, we need to find the nearest stale
+ * entries before and after our insertion point.
+ */
+ if (compact == 0)
+ xfs_dir2_leaf_find_stale(leaf, index, &lowstale, &highstale);
+
+ /*
+ * If the low one is better, use it.
+ */
+ if (lowstale >= 0 &&
+ (highstale == be16_to_cpu(leaf->hdr.count) ||
+ index - lowstale - 1 < highstale - index)) {
+ ASSERT(index - lowstale - 1 >= 0);
+ ASSERT(leaf->ents[lowstale].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR));
+
+ /*
+ * Copy entries up to cover the stale entry and make room
+ * for the new entry.
+ */
+ if (index - lowstale - 1 > 0) {
+ memmove(&leaf->ents[lowstale],
+ &leaf->ents[lowstale + 1],
+ (index - lowstale - 1) *
+ sizeof(xfs_dir2_leaf_entry_t));
+ }
+ *lfloglow = MIN(lowstale, *lfloglow);
+ *lfloghigh = MAX(index - 1, *lfloghigh);
+ be16_add_cpu(&leaf->hdr.stale, -1);
+ return &leaf->ents[index - 1];
+ }
+
+ /*
+ * The high one is better, so use that one.
+ */
+ ASSERT(highstale - index >= 0);
+ ASSERT(leaf->ents[highstale].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR));
+
+ /*
+ * Copy entries down to cover the stale entry and make room for the
+ * new entry.
+ */
+ if (highstale - index > 0) {
+ memmove(&leaf->ents[index + 1],
+ &leaf->ents[index],
+ (highstale - index) * sizeof(xfs_dir2_leaf_entry_t));
+ }
+ *lfloglow = MIN(index, *lfloglow);
+ *lfloghigh = MAX(highstale, *lfloghigh);
+ be16_add_cpu(&leaf->hdr.stale, -1);
+ return &leaf->ents[index];
+}
+
/*
* Add an entry to a leaf form directory.
*/
@@ -161,7 +282,7 @@ xfs_dir2_leaf_addname(
{
__be16 *bestsp; /* freespace table in leaf */
int compact; /* need to compact leaves */
- xfs_dir2_data_t *data; /* data block structure */
+ xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dabuf_t *dbp; /* data block buffer */
xfs_dir2_data_entry_t *dep; /* data block entry */
xfs_inode_t *dp; /* incore directory inode */
@@ -225,7 +346,7 @@ xfs_dir2_leaf_addname(
continue;
i = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address));
ASSERT(i < be32_to_cpu(ltp->bestcount));
- ASSERT(be16_to_cpu(bestsp[i]) != NULLDATAOFF);
+ ASSERT(bestsp[i] != cpu_to_be16(NULLDATAOFF));
if (be16_to_cpu(bestsp[i]) >= length) {
use_block = i;
break;
@@ -239,7 +360,8 @@ xfs_dir2_leaf_addname(
/*
* Remember a block we see that's missing.
*/
- if (be16_to_cpu(bestsp[i]) == NULLDATAOFF && use_block == -1)
+ if (bestsp[i] == cpu_to_be16(NULLDATAOFF) &&
+ use_block == -1)
use_block = i;
else if (be16_to_cpu(bestsp[i]) >= length) {
use_block = i;
@@ -250,14 +372,17 @@ xfs_dir2_leaf_addname(
/*
* How many bytes do we need in the leaf block?
*/
- needbytes =
- (leaf->hdr.stale ? 0 : (uint)sizeof(leaf->ents[0])) +
- (use_block != -1 ? 0 : (uint)sizeof(leaf->bests[0]));
+ needbytes = 0;
+ if (!leaf->hdr.stale)
+ needbytes += sizeof(xfs_dir2_leaf_entry_t);
+ if (use_block == -1)
+ needbytes += sizeof(xfs_dir2_data_off_t);
+
/*
* Now kill use_block if it refers to a missing block, so we
* can use it as an indication of allocation needed.
*/
- if (use_block != -1 && be16_to_cpu(bestsp[use_block]) == NULLDATAOFF)
+ if (use_block != -1 && bestsp[use_block] == cpu_to_be16(NULLDATAOFF))
use_block = -1;
/*
* If we don't have enough free bytes but we can make enough
@@ -369,8 +494,8 @@ xfs_dir2_leaf_addname(
*/
else
xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block);
- data = dbp->data;
- bestsp[use_block] = data->hdr.bestfree[0].length;
+ hdr = dbp->data;
+ bestsp[use_block] = hdr->bestfree[0].length;
grown = 1;
}
/*
@@ -384,7 +509,7 @@ xfs_dir2_leaf_addname(
xfs_da_brelse(tp, lbp);
return error;
}
- data = dbp->data;
+ hdr = dbp->data;
grown = 0;
}
xfs_dir2_data_check(dp, dbp);
@@ -392,14 +517,14 @@ xfs_dir2_leaf_addname(
* Point to the biggest freespace in our data block.
*/
dup = (xfs_dir2_data_unused_t *)
- ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset));
+ ((char *)hdr + be16_to_cpu(hdr->bestfree[0].offset));
ASSERT(be16_to_cpu(dup->length) >= length);
needscan = needlog = 0;
/*
* Mark the initial part of our freespace in use for the new entry.
*/
xfs_dir2_data_use_free(tp, dbp, dup,
- (xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length,
+ (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), length,
&needlog, &needscan);
/*
* Initialize our new entry (at last).
@@ -409,12 +534,12 @@ xfs_dir2_leaf_addname(
dep->namelen = args->namelen;
memcpy(dep->name, args->name, dep->namelen);
tagp = xfs_dir2_data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)data);
+ *tagp = cpu_to_be16((char *)dep - (char *)hdr);
/*
* Need to scan fix up the bestfree table.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, data, &needlog);
+ xfs_dir2_data_freescan(mp, hdr, &needlog);
/*
* Need to log the data block's header.
*/
@@ -425,107 +550,15 @@ xfs_dir2_leaf_addname(
* If the bests table needs to be changed, do it.
* Log the change unless we've already done that.
*/
- if (be16_to_cpu(bestsp[use_block]) != be16_to_cpu(data->hdr.bestfree[0].length)) {
- bestsp[use_block] = data->hdr.bestfree[0].length;
+ if (be16_to_cpu(bestsp[use_block]) != be16_to_cpu(hdr->bestfree[0].length)) {
+ bestsp[use_block] = hdr->bestfree[0].length;
if (!grown)
xfs_dir2_leaf_log_bests(tp, lbp, use_block, use_block);
}
- /*
- * Now we need to make room to insert the leaf entry.
- * If there are no stale entries, we just insert a hole at index.
- */
- if (!leaf->hdr.stale) {
- /*
- * lep is still good as the index leaf entry.
- */
- if (index < be16_to_cpu(leaf->hdr.count))
- memmove(lep + 1, lep,
- (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep));
- /*
- * Record low and high logging indices for the leaf.
- */
- lfloglow = index;
- lfloghigh = be16_to_cpu(leaf->hdr.count);
- be16_add_cpu(&leaf->hdr.count, 1);
- }
- /*
- * There are stale entries.
- * We will use one of them for the new entry.
- * It's probably not at the right location, so we'll have to
- * shift some up or down first.
- */
- else {
- /*
- * If we didn't compact before, we need to find the nearest
- * stale entries before and after our insertion point.
- */
- if (compact == 0) {
- /*
- * Find the first stale entry before the insertion
- * point, if any.
- */
- for (lowstale = index - 1;
- lowstale >= 0 &&
- be32_to_cpu(leaf->ents[lowstale].address) !=
- XFS_DIR2_NULL_DATAPTR;
- lowstale--)
- continue;
- /*
- * Find the next stale entry at or after the insertion
- * point, if any. Stop if we go so far that the
- * lowstale entry would be better.
- */
- for (highstale = index;
- highstale < be16_to_cpu(leaf->hdr.count) &&
- be32_to_cpu(leaf->ents[highstale].address) !=
- XFS_DIR2_NULL_DATAPTR &&
- (lowstale < 0 ||
- index - lowstale - 1 >= highstale - index);
- highstale++)
- continue;
- }
- /*
- * If the low one is better, use it.
- */
- if (lowstale >= 0 &&
- (highstale == be16_to_cpu(leaf->hdr.count) ||
- index - lowstale - 1 < highstale - index)) {
- ASSERT(index - lowstale - 1 >= 0);
- ASSERT(be32_to_cpu(leaf->ents[lowstale].address) ==
- XFS_DIR2_NULL_DATAPTR);
- /*
- * Copy entries up to cover the stale entry
- * and make room for the new entry.
- */
- if (index - lowstale - 1 > 0)
- memmove(&leaf->ents[lowstale],
- &leaf->ents[lowstale + 1],
- (index - lowstale - 1) * sizeof(*lep));
- lep = &leaf->ents[index - 1];
- lfloglow = MIN(lowstale, lfloglow);
- lfloghigh = MAX(index - 1, lfloghigh);
- }
- /*
- * The high one is better, so use that one.
- */
- else {
- ASSERT(highstale - index >= 0);
- ASSERT(be32_to_cpu(leaf->ents[highstale].address) ==
- XFS_DIR2_NULL_DATAPTR);
- /*
- * Copy entries down to cover the stale entry
- * and make room for the new entry.
- */
- if (highstale - index > 0)
- memmove(&leaf->ents[index + 1],
- &leaf->ents[index],
- (highstale - index) * sizeof(*lep));
- lep = &leaf->ents[index];
- lfloglow = MIN(index, lfloglow);
- lfloghigh = MAX(highstale, lfloghigh);
- }
- be16_add_cpu(&leaf->hdr.stale, -1);
- }
+
+ lep = xfs_dir2_leaf_find_entry(leaf, index, compact, lowstale,
+ highstale, &lfloglow, &lfloghigh);
+
/*
* Fill in the new leaf entry.
*/
@@ -562,7 +595,7 @@ xfs_dir2_leaf_check(
leaf = bp->data;
mp = dp->i_mount;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
/*
* This value is not restrictive enough.
* Should factor in the size of the bests table as well.
@@ -582,7 +615,7 @@ xfs_dir2_leaf_check(
if (i + 1 < be16_to_cpu(leaf->hdr.count))
ASSERT(be32_to_cpu(leaf->ents[i].hashval) <=
be32_to_cpu(leaf->ents[i + 1].hashval));
- if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR)
+ if (leaf->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
ASSERT(be16_to_cpu(leaf->hdr.stale) == stale);
@@ -611,7 +644,8 @@ xfs_dir2_leaf_compact(
* Compress out the stale entries in place.
*/
for (from = to = 0, loglow = -1; from < be16_to_cpu(leaf->hdr.count); from++) {
- if (be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR)
+ if (leaf->ents[from].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
continue;
/*
* Only actually copy the entries that are different.
@@ -663,24 +697,9 @@ xfs_dir2_leaf_compact_x1(
leaf = bp->data;
ASSERT(be16_to_cpu(leaf->hdr.stale) > 1);
index = *indexp;
- /*
- * Find the first stale entry before our index, if any.
- */
- for (lowstale = index - 1;
- lowstale >= 0 &&
- be32_to_cpu(leaf->ents[lowstale].address) != XFS_DIR2_NULL_DATAPTR;
- lowstale--)
- continue;
- /*
- * Find the first stale entry at or after our index, if any.
- * Stop if the answer would be worse than lowstale.
- */
- for (highstale = index;
- highstale < be16_to_cpu(leaf->hdr.count) &&
- be32_to_cpu(leaf->ents[highstale].address) != XFS_DIR2_NULL_DATAPTR &&
- (lowstale < 0 || index - lowstale > highstale - index);
- highstale++)
- continue;
+
+ xfs_dir2_leaf_find_stale(leaf, index, &lowstale, &highstale);
+
/*
* Pick the better of lowstale and highstale.
*/
@@ -701,7 +720,8 @@ xfs_dir2_leaf_compact_x1(
if (index == from)
newindex = to;
if (from != keepstale &&
- be32_to_cpu(leaf->ents[from].address) == XFS_DIR2_NULL_DATAPTR) {
+ leaf->ents[from].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR)) {
if (from == to)
*lowlogp = to;
continue;
@@ -760,7 +780,7 @@ xfs_dir2_leaf_getdents(
int byteoff; /* offset in current block */
xfs_dir2_db_t curdb; /* db for current block */
xfs_dir2_off_t curoff; /* current overall offset */
- xfs_dir2_data_t *data; /* data block structure */
+ xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_data_entry_t *dep; /* data entry */
xfs_dir2_data_unused_t *dup; /* unused entry */
int error = 0; /* error return value */
@@ -1018,23 +1038,23 @@ xfs_dir2_leaf_getdents(
else if (curoff > newoff)
ASSERT(xfs_dir2_byte_to_db(mp, curoff) ==
curdb);
- data = bp->data;
+ hdr = bp->data;
xfs_dir2_data_check(dp, bp);
/*
* Find our position in the block.
*/
- ptr = (char *)&data->u;
+ ptr = (char *)(hdr + 1);
byteoff = xfs_dir2_byte_to_off(mp, curoff);
/*
* Skip past the header.
*/
if (byteoff == 0)
- curoff += (uint)sizeof(data->hdr);
+ curoff += (uint)sizeof(*hdr);
/*
* Skip past entries until we reach our offset.
*/
else {
- while ((char *)ptr - (char *)data < byteoff) {
+ while ((char *)ptr - (char *)hdr < byteoff) {
dup = (xfs_dir2_data_unused_t *)ptr;
if (be16_to_cpu(dup->freetag)
@@ -1055,8 +1075,8 @@ xfs_dir2_leaf_getdents(
curoff =
xfs_dir2_db_off_to_byte(mp,
xfs_dir2_byte_to_db(mp, curoff),
- (char *)ptr - (char *)data);
- if (ptr >= (char *)data + mp->m_dirblksize) {
+ (char *)ptr - (char *)hdr);
+ if (ptr >= (char *)hdr + mp->m_dirblksize) {
continue;
}
}
@@ -1179,7 +1199,7 @@ xfs_dir2_leaf_log_bests(
xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
ltp = xfs_dir2_leaf_tail_p(tp->t_mountp, leaf);
firstb = xfs_dir2_leaf_bests_p(ltp) + first;
lastb = xfs_dir2_leaf_bests_p(ltp) + last;
@@ -1202,8 +1222,8 @@ xfs_dir2_leaf_log_ents(
xfs_dir2_leaf_t *leaf; /* leaf structure */
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC ||
- be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
+ leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
firstlep = &leaf->ents[first];
lastlep = &leaf->ents[last];
xfs_da_log_buf(tp, bp, (uint)((char *)firstlep - (char *)leaf),
@@ -1221,8 +1241,8 @@ xfs_dir2_leaf_log_header(
xfs_dir2_leaf_t *leaf; /* leaf structure */
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC ||
- be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC) ||
+ leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
xfs_da_log_buf(tp, bp, (uint)((char *)&leaf->hdr - (char *)leaf),
(uint)(sizeof(leaf->hdr) - 1));
}
@@ -1241,7 +1261,7 @@ xfs_dir2_leaf_log_tail(
mp = tp->t_mountp;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAF1_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAF1_MAGIC));
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
xfs_da_log_buf(tp, bp, (uint)((char *)ltp - (char *)leaf),
(uint)(mp->m_dirblksize - 1));
@@ -1437,7 +1457,7 @@ xfs_dir2_leaf_removename(
xfs_da_args_t *args) /* operation arguments */
{
__be16 *bestsp; /* leaf block best freespace */
- xfs_dir2_data_t *data; /* data block structure */
+ xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_db_t db; /* data block number */
xfs_dabuf_t *dbp; /* data block buffer */
xfs_dir2_data_entry_t *dep; /* data entry structure */
@@ -1467,7 +1487,7 @@ xfs_dir2_leaf_removename(
tp = args->trans;
mp = dp->i_mount;
leaf = lbp->data;
- data = dbp->data;
+ hdr = dbp->data;
xfs_dir2_data_check(dp, dbp);
/*
* Point to the leaf entry, use that to point to the data entry.
@@ -1475,9 +1495,9 @@ xfs_dir2_leaf_removename(
lep = &leaf->ents[index];
db = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address));
dep = (xfs_dir2_data_entry_t *)
- ((char *)data + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
+ ((char *)hdr + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
needscan = needlog = 0;
- oldbest = be16_to_cpu(data->hdr.bestfree[0].length);
+ oldbest = be16_to_cpu(hdr->bestfree[0].length);
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
bestsp = xfs_dir2_leaf_bests_p(ltp);
ASSERT(be16_to_cpu(bestsp[db]) == oldbest);
@@ -1485,7 +1505,7 @@ xfs_dir2_leaf_removename(
* Mark the former data entry unused.
*/
xfs_dir2_data_make_free(tp, dbp,
- (xfs_dir2_data_aoff_t)((char *)dep - (char *)data),
+ (xfs_dir2_data_aoff_t)((char *)dep - (char *)hdr),
xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
/*
* We just mark the leaf entry stale by putting a null in it.
@@ -1499,23 +1519,23 @@ xfs_dir2_leaf_removename(
* log the data block header if necessary.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, data, &needlog);
+ xfs_dir2_data_freescan(mp, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(tp, dbp);
/*
* If the longest freespace in the data block has changed,
* put the new value in the bests table and log that.
*/
- if (be16_to_cpu(data->hdr.bestfree[0].length) != oldbest) {
- bestsp[db] = data->hdr.bestfree[0].length;
+ if (be16_to_cpu(hdr->bestfree[0].length) != oldbest) {
+ bestsp[db] = hdr->bestfree[0].length;
xfs_dir2_leaf_log_bests(tp, lbp, db, db);
}
xfs_dir2_data_check(dp, dbp);
/*
* If the data block is now empty then get rid of the data block.
*/
- if (be16_to_cpu(data->hdr.bestfree[0].length) ==
- mp->m_dirblksize - (uint)sizeof(data->hdr)) {
+ if (be16_to_cpu(hdr->bestfree[0].length) ==
+ mp->m_dirblksize - (uint)sizeof(*hdr)) {
ASSERT(db != mp->m_dirdatablk);
if ((error = xfs_dir2_shrink_inode(args, db, dbp))) {
/*
@@ -1542,7 +1562,7 @@ xfs_dir2_leaf_removename(
* Look for the last active entry (i).
*/
for (i = db - 1; i > 0; i--) {
- if (be16_to_cpu(bestsp[i]) != NULLDATAOFF)
+ if (bestsp[i] != cpu_to_be16(NULLDATAOFF))
break;
}
/*
@@ -1686,9 +1706,6 @@ xfs_dir2_leaf_trim_data(
xfs_dir2_db_t db) /* data block number */
{
__be16 *bestsp; /* leaf bests table */
-#ifdef DEBUG
- xfs_dir2_data_t *data; /* data block structure */
-#endif
xfs_dabuf_t *dbp; /* data block buffer */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return value */
@@ -1707,20 +1724,21 @@ xfs_dir2_leaf_trim_data(
XFS_DATA_FORK))) {
return error;
}
-#ifdef DEBUG
- data = dbp->data;
- ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC);
-#endif
- /* this seems to be an error
- * data is only valid if DEBUG is defined?
- * RMC 09/08/1999
- */
leaf = lbp->data;
ltp = xfs_dir2_leaf_tail_p(mp, leaf);
- ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) ==
- mp->m_dirblksize - (uint)sizeof(data->hdr));
+
+#ifdef DEBUG
+{
+ struct xfs_dir2_data_hdr *hdr = dbp->data;
+
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
+ ASSERT(be16_to_cpu(hdr->bestfree[0].length) ==
+ mp->m_dirblksize - (uint)sizeof(*hdr));
ASSERT(db == be32_to_cpu(ltp->bestcount) - 1);
+}
+#endif
+
/*
* Get rid of the data block.
*/
@@ -1740,6 +1758,20 @@ xfs_dir2_leaf_trim_data(
return 0;
}
+static inline size_t
+xfs_dir2_leaf_size(
+ struct xfs_dir2_leaf_hdr *hdr,
+ int counts)
+{
+ int entries;
+
+ entries = be16_to_cpu(hdr->count) - be16_to_cpu(hdr->stale);
+ return sizeof(xfs_dir2_leaf_hdr_t) +
+ entries * sizeof(xfs_dir2_leaf_entry_t) +
+ counts * sizeof(xfs_dir2_data_off_t) +
+ sizeof(xfs_dir2_leaf_tail_t);
+}
+
/*
* Convert node form directory to leaf form directory.
* The root of the node form dir needs to already be a LEAFN block.
@@ -1810,7 +1842,7 @@ xfs_dir2_node_to_leaf(
return 0;
lbp = state->path.blk[0].bp;
leaf = lbp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
/*
* Read the freespace block.
*/
@@ -1819,20 +1851,19 @@ xfs_dir2_node_to_leaf(
return error;
}
free = fbp->data;
- ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
+ ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
ASSERT(!free->hdr.firstdb);
+
/*
* Now see if the leafn and free data will fit in a leaf1.
* If not, release the buffer and give up.
*/
- if ((uint)sizeof(leaf->hdr) +
- (be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale)) * (uint)sizeof(leaf->ents[0]) +
- be32_to_cpu(free->hdr.nvalid) * (uint)sizeof(leaf->bests[0]) +
- (uint)sizeof(leaf->tail) >
- mp->m_dirblksize) {
+ if (xfs_dir2_leaf_size(&leaf->hdr, be32_to_cpu(free->hdr.nvalid)) >
+ mp->m_dirblksize) {
xfs_da_brelse(tp, fbp);
return 0;
}
+
/*
* If the leaf has any stale entries in it, compress them out.
* The compact routine will log the header.
@@ -1851,7 +1882,7 @@ xfs_dir2_node_to_leaf(
* Set up the leaf bests table.
*/
memcpy(xfs_dir2_leaf_bests_p(ltp), free->bests,
- be32_to_cpu(ltp->bestcount) * sizeof(leaf->bests[0]));
+ be32_to_cpu(ltp->bestcount) * sizeof(xfs_dir2_data_off_t));
xfs_dir2_leaf_log_bests(tp, lbp, 0, be32_to_cpu(ltp->bestcount) - 1);
xfs_dir2_leaf_log_tail(tp, lbp);
xfs_dir2_leaf_check(dp, lbp);
diff --git a/fs/xfs/xfs_dir2_leaf.h b/fs/xfs/xfs_dir2_leaf.h
deleted file mode 100644
index 6c9539f0698..00000000000
--- a/fs/xfs/xfs_dir2_leaf.h
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DIR2_LEAF_H__
-#define __XFS_DIR2_LEAF_H__
-
-struct uio;
-struct xfs_dabuf;
-struct xfs_da_args;
-struct xfs_inode;
-struct xfs_mount;
-struct xfs_trans;
-
-/*
- * Offset of the leaf/node space. First block in this space
- * is the btree root.
- */
-#define XFS_DIR2_LEAF_SPACE 1
-#define XFS_DIR2_LEAF_OFFSET (XFS_DIR2_LEAF_SPACE * XFS_DIR2_SPACE_SIZE)
-#define XFS_DIR2_LEAF_FIRSTDB(mp) \
- xfs_dir2_byte_to_db(mp, XFS_DIR2_LEAF_OFFSET)
-
-/*
- * Offset in data space of a data entry.
- */
-typedef __uint32_t xfs_dir2_dataptr_t;
-#define XFS_DIR2_MAX_DATAPTR ((xfs_dir2_dataptr_t)0xffffffff)
-#define XFS_DIR2_NULL_DATAPTR ((xfs_dir2_dataptr_t)0)
-
-/*
- * Leaf block header.
- */
-typedef struct xfs_dir2_leaf_hdr {
- xfs_da_blkinfo_t info; /* header for da routines */
- __be16 count; /* count of entries */
- __be16 stale; /* count of stale entries */
-} xfs_dir2_leaf_hdr_t;
-
-/*
- * Leaf block entry.
- */
-typedef struct xfs_dir2_leaf_entry {
- __be32 hashval; /* hash value of name */
- __be32 address; /* address of data entry */
-} xfs_dir2_leaf_entry_t;
-
-/*
- * Leaf block tail.
- */
-typedef struct xfs_dir2_leaf_tail {
- __be32 bestcount;
-} xfs_dir2_leaf_tail_t;
-
-/*
- * Leaf block.
- * bests and tail are at the end of the block for single-leaf only
- * (magic = XFS_DIR2_LEAF1_MAGIC not XFS_DIR2_LEAFN_MAGIC).
- */
-typedef struct xfs_dir2_leaf {
- xfs_dir2_leaf_hdr_t hdr; /* leaf header */
- xfs_dir2_leaf_entry_t ents[1]; /* entries */
- /* ... */
- xfs_dir2_data_off_t bests[1]; /* best free counts */
- xfs_dir2_leaf_tail_t tail; /* leaf tail */
-} xfs_dir2_leaf_t;
-
-/*
- * DB blocks here are logical directory block numbers, not filesystem blocks.
- */
-
-static inline int xfs_dir2_max_leaf_ents(struct xfs_mount *mp)
-{
- return (int)(((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_leaf_hdr_t)) /
- (uint)sizeof(xfs_dir2_leaf_entry_t));
-}
-
-/*
- * Get address of the bestcount field in the single-leaf block.
- */
-static inline xfs_dir2_leaf_tail_t *
-xfs_dir2_leaf_tail_p(struct xfs_mount *mp, xfs_dir2_leaf_t *lp)
-{
- return (xfs_dir2_leaf_tail_t *)
- ((char *)(lp) + (mp)->m_dirblksize -
- (uint)sizeof(xfs_dir2_leaf_tail_t));
-}
-
-/*
- * Get address of the bests array in the single-leaf block.
- */
-static inline __be16 *
-xfs_dir2_leaf_bests_p(xfs_dir2_leaf_tail_t *ltp)
-{
- return (__be16 *)ltp - be32_to_cpu(ltp->bestcount);
-}
-
-/*
- * Convert dataptr to byte in file space
- */
-static inline xfs_dir2_off_t
-xfs_dir2_dataptr_to_byte(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
-{
- return (xfs_dir2_off_t)(dp) << XFS_DIR2_DATA_ALIGN_LOG;
-}
-
-/*
- * Convert byte in file space to dataptr. It had better be aligned.
- */
-static inline xfs_dir2_dataptr_t
-xfs_dir2_byte_to_dataptr(struct xfs_mount *mp, xfs_dir2_off_t by)
-{
- return (xfs_dir2_dataptr_t)((by) >> XFS_DIR2_DATA_ALIGN_LOG);
-}
-
-/*
- * Convert byte in space to (DB) block
- */
-static inline xfs_dir2_db_t
-xfs_dir2_byte_to_db(struct xfs_mount *mp, xfs_dir2_off_t by)
-{
- return (xfs_dir2_db_t)((by) >> \
- ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog));
-}
-
-/*
- * Convert dataptr to a block number
- */
-static inline xfs_dir2_db_t
-xfs_dir2_dataptr_to_db(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
-{
- return xfs_dir2_byte_to_db(mp, xfs_dir2_dataptr_to_byte(mp, dp));
-}
-
-/*
- * Convert byte in space to offset in a block
- */
-static inline xfs_dir2_data_aoff_t
-xfs_dir2_byte_to_off(struct xfs_mount *mp, xfs_dir2_off_t by)
-{
- return (xfs_dir2_data_aoff_t)((by) & \
- ((1 << ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)) - 1));
-}
-
-/*
- * Convert dataptr to a byte offset in a block
- */
-static inline xfs_dir2_data_aoff_t
-xfs_dir2_dataptr_to_off(struct xfs_mount *mp, xfs_dir2_dataptr_t dp)
-{
- return xfs_dir2_byte_to_off(mp, xfs_dir2_dataptr_to_byte(mp, dp));
-}
-
-/*
- * Convert block and offset to byte in space
- */
-static inline xfs_dir2_off_t
-xfs_dir2_db_off_to_byte(struct xfs_mount *mp, xfs_dir2_db_t db,
- xfs_dir2_data_aoff_t o)
-{
- return ((xfs_dir2_off_t)(db) << \
- ((mp)->m_sb.sb_blocklog + (mp)->m_sb.sb_dirblklog)) + (o);
-}
-
-/*
- * Convert block (DB) to block (dablk)
- */
-static inline xfs_dablk_t
-xfs_dir2_db_to_da(struct xfs_mount *mp, xfs_dir2_db_t db)
-{
- return (xfs_dablk_t)((db) << (mp)->m_sb.sb_dirblklog);
-}
-
-/*
- * Convert byte in space to (DA) block
- */
-static inline xfs_dablk_t
-xfs_dir2_byte_to_da(struct xfs_mount *mp, xfs_dir2_off_t by)
-{
- return xfs_dir2_db_to_da(mp, xfs_dir2_byte_to_db(mp, by));
-}
-
-/*
- * Convert block and offset to dataptr
- */
-static inline xfs_dir2_dataptr_t
-xfs_dir2_db_off_to_dataptr(struct xfs_mount *mp, xfs_dir2_db_t db,
- xfs_dir2_data_aoff_t o)
-{
- return xfs_dir2_byte_to_dataptr(mp, xfs_dir2_db_off_to_byte(mp, db, o));
-}
-
-/*
- * Convert block (dablk) to block (DB)
- */
-static inline xfs_dir2_db_t
-xfs_dir2_da_to_db(struct xfs_mount *mp, xfs_dablk_t da)
-{
- return (xfs_dir2_db_t)((da) >> (mp)->m_sb.sb_dirblklog);
-}
-
-/*
- * Convert block (dablk) to byte offset in space
- */
-static inline xfs_dir2_off_t
-xfs_dir2_da_to_byte(struct xfs_mount *mp, xfs_dablk_t da)
-{
- return xfs_dir2_db_off_to_byte(mp, xfs_dir2_da_to_db(mp, da), 0);
-}
-
-/*
- * Function declarations.
- */
-extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
- struct xfs_dabuf *dbp);
-extern int xfs_dir2_leaf_addname(struct xfs_da_args *args);
-extern void xfs_dir2_leaf_compact(struct xfs_da_args *args,
- struct xfs_dabuf *bp);
-extern void xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp,
- int *lowstalep, int *highstalep,
- int *lowlogp, int *highlogp);
-extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent,
- size_t bufsize, xfs_off_t *offset,
- filldir_t filldir);
-extern int xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno,
- struct xfs_dabuf **bpp, int magic);
-extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp,
- int first, int last);
-extern void xfs_dir2_leaf_log_header(struct xfs_trans *tp,
- struct xfs_dabuf *bp);
-extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
-extern int xfs_dir2_leaf_removename(struct xfs_da_args *args);
-extern int xfs_dir2_leaf_replace(struct xfs_da_args *args);
-extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args,
- struct xfs_dabuf *lbp);
-extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args,
- struct xfs_dabuf *lbp, xfs_dir2_db_t db);
-extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
-
-#endif /* __XFS_DIR2_LEAF_H__ */
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index a0aab7d3294..084b3247d63 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -23,18 +23,14 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
-#include "xfs_dir2_data.h"
-#include "xfs_dir2_leaf.h"
-#include "xfs_dir2_block.h"
-#include "xfs_dir2_node.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2_priv.h"
#include "xfs_error.h"
#include "xfs_trace.h"
@@ -73,7 +69,7 @@ xfs_dir2_free_log_bests(
xfs_dir2_free_t *free; /* freespace structure */
free = bp->data;
- ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
+ ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
xfs_da_log_buf(tp, bp,
(uint)((char *)&free->bests[first] - (char *)free),
(uint)((char *)&free->bests[last] - (char *)free +
@@ -91,7 +87,7 @@ xfs_dir2_free_log_header(
xfs_dir2_free_t *free; /* freespace structure */
free = bp->data;
- ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
+ ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
xfs_da_log_buf(tp, bp, (uint)((char *)&free->hdr - (char *)free),
(uint)(sizeof(xfs_dir2_free_hdr_t) - 1));
}
@@ -244,89 +240,13 @@ xfs_dir2_leafn_add(
lfloglow = be16_to_cpu(leaf->hdr.count);
lfloghigh = -1;
}
- /*
- * No stale entries, just insert a space for the new entry.
- */
- if (!leaf->hdr.stale) {
- lep = &leaf->ents[index];
- if (index < be16_to_cpu(leaf->hdr.count))
- memmove(lep + 1, lep,
- (be16_to_cpu(leaf->hdr.count) - index) * sizeof(*lep));
- lfloglow = index;
- lfloghigh = be16_to_cpu(leaf->hdr.count);
- be16_add_cpu(&leaf->hdr.count, 1);
- }
- /*
- * There are stale entries. We'll use one for the new entry.
- */
- else {
- /*
- * If we didn't do a compact then we need to figure out
- * which stale entry will be used.
- */
- if (compact == 0) {
- /*
- * Find first stale entry before our insertion point.
- */
- for (lowstale = index - 1;
- lowstale >= 0 &&
- be32_to_cpu(leaf->ents[lowstale].address) !=
- XFS_DIR2_NULL_DATAPTR;
- lowstale--)
- continue;
- /*
- * Find next stale entry after insertion point.
- * Stop looking if the answer would be worse than
- * lowstale already found.
- */
- for (highstale = index;
- highstale < be16_to_cpu(leaf->hdr.count) &&
- be32_to_cpu(leaf->ents[highstale].address) !=
- XFS_DIR2_NULL_DATAPTR &&
- (lowstale < 0 ||
- index - lowstale - 1 >= highstale - index);
- highstale++)
- continue;
- }
- /*
- * Using the low stale entry.
- * Shift entries up toward the stale slot.
- */
- if (lowstale >= 0 &&
- (highstale == be16_to_cpu(leaf->hdr.count) ||
- index - lowstale - 1 < highstale - index)) {
- ASSERT(be32_to_cpu(leaf->ents[lowstale].address) ==
- XFS_DIR2_NULL_DATAPTR);
- ASSERT(index - lowstale - 1 >= 0);
- if (index - lowstale - 1 > 0)
- memmove(&leaf->ents[lowstale],
- &leaf->ents[lowstale + 1],
- (index - lowstale - 1) * sizeof(*lep));
- lep = &leaf->ents[index - 1];
- lfloglow = MIN(lowstale, lfloglow);
- lfloghigh = MAX(index - 1, lfloghigh);
- }
- /*
- * Using the high stale entry.
- * Shift entries down toward the stale slot.
- */
- else {
- ASSERT(be32_to_cpu(leaf->ents[highstale].address) ==
- XFS_DIR2_NULL_DATAPTR);
- ASSERT(highstale - index >= 0);
- if (highstale - index > 0)
- memmove(&leaf->ents[index + 1],
- &leaf->ents[index],
- (highstale - index) * sizeof(*lep));
- lep = &leaf->ents[index];
- lfloglow = MIN(index, lfloglow);
- lfloghigh = MAX(highstale, lfloghigh);
- }
- be16_add_cpu(&leaf->hdr.stale, -1);
- }
+
/*
* Insert the new entry, log everything.
*/
+ lep = xfs_dir2_leaf_find_entry(leaf, index, compact, lowstale,
+ highstale, &lfloglow, &lfloghigh);
+
lep->hashval = cpu_to_be32(args->hashval);
lep->address = cpu_to_be32(xfs_dir2_db_off_to_dataptr(mp,
args->blkno, args->index));
@@ -352,14 +272,14 @@ xfs_dir2_leafn_check(
leaf = bp->data;
mp = dp->i_mount;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
ASSERT(be16_to_cpu(leaf->hdr.count) <= xfs_dir2_max_leaf_ents(mp));
for (i = stale = 0; i < be16_to_cpu(leaf->hdr.count); i++) {
if (i + 1 < be16_to_cpu(leaf->hdr.count)) {
ASSERT(be32_to_cpu(leaf->ents[i].hashval) <=
be32_to_cpu(leaf->ents[i + 1].hashval));
}
- if (be32_to_cpu(leaf->ents[i].address) == XFS_DIR2_NULL_DATAPTR)
+ if (leaf->ents[i].address == cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
ASSERT(be16_to_cpu(leaf->hdr.stale) == stale);
@@ -378,7 +298,7 @@ xfs_dir2_leafn_lasthash(
xfs_dir2_leaf_t *leaf; /* leaf structure */
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
if (count)
*count = be16_to_cpu(leaf->hdr.count);
if (!leaf->hdr.count)
@@ -417,7 +337,7 @@ xfs_dir2_leafn_lookup_for_addname(
tp = args->trans;
mp = dp->i_mount;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
#ifdef __KERNEL__
ASSERT(be16_to_cpu(leaf->hdr.count) > 0);
#endif
@@ -434,7 +354,7 @@ xfs_dir2_leafn_lookup_for_addname(
curbp = state->extrablk.bp;
curfdb = state->extrablk.blkno;
free = curbp->data;
- ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
+ ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
}
length = xfs_dir2_data_entsize(args->namelen);
/*
@@ -488,7 +408,7 @@ xfs_dir2_leafn_lookup_for_addname(
ASSERT(be32_to_cpu(free->hdr.magic) ==
XFS_DIR2_FREE_MAGIC);
ASSERT((be32_to_cpu(free->hdr.firstdb) %
- XFS_DIR2_MAX_FREE_BESTS(mp)) == 0);
+ xfs_dir2_free_max_bests(mp)) == 0);
ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb);
ASSERT(curdb < be32_to_cpu(free->hdr.firstdb) +
be32_to_cpu(free->hdr.nvalid));
@@ -500,7 +420,8 @@ xfs_dir2_leafn_lookup_for_addname(
/*
* If it has room, return it.
*/
- if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) {
+ if (unlikely(free->bests[fi] ==
+ cpu_to_be16(NULLDATAOFF))) {
XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
XFS_ERRLEVEL_LOW, mp);
if (curfdb != newfdb)
@@ -561,7 +482,7 @@ xfs_dir2_leafn_lookup_for_entry(
tp = args->trans;
mp = dp->i_mount;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
#ifdef __KERNEL__
ASSERT(be16_to_cpu(leaf->hdr.count) > 0);
#endif
@@ -742,7 +663,8 @@ xfs_dir2_leafn_moveents(
int i; /* temp leaf index */
for (i = start_s, stale = 0; i < start_s + count; i++) {
- if (be32_to_cpu(leaf_s->ents[i].address) == XFS_DIR2_NULL_DATAPTR)
+ if (leaf_s->ents[i].address ==
+ cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
}
} else
@@ -789,8 +711,8 @@ xfs_dir2_leafn_order(
leaf1 = leaf1_bp->data;
leaf2 = leaf2_bp->data;
- ASSERT(be16_to_cpu(leaf1->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
- ASSERT(be16_to_cpu(leaf2->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
+ ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
if (be16_to_cpu(leaf1->hdr.count) > 0 &&
be16_to_cpu(leaf2->hdr.count) > 0 &&
(be32_to_cpu(leaf2->ents[0].hashval) < be32_to_cpu(leaf1->ents[0].hashval) ||
@@ -918,7 +840,7 @@ xfs_dir2_leafn_remove(
xfs_da_state_blk_t *dblk, /* data block */
int *rval) /* resulting block needs join */
{
- xfs_dir2_data_t *data; /* data block structure */
+ xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_db_t db; /* data block number */
xfs_dabuf_t *dbp; /* data block buffer */
xfs_dir2_data_entry_t *dep; /* data block entry */
@@ -938,7 +860,7 @@ xfs_dir2_leafn_remove(
tp = args->trans;
mp = dp->i_mount;
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
/*
* Point to the entry we're removing.
*/
@@ -963,9 +885,9 @@ xfs_dir2_leafn_remove(
* in the data block in case it changes.
*/
dbp = dblk->bp;
- data = dbp->data;
- dep = (xfs_dir2_data_entry_t *)((char *)data + off);
- longest = be16_to_cpu(data->hdr.bestfree[0].length);
+ hdr = dbp->data;
+ dep = (xfs_dir2_data_entry_t *)((char *)hdr + off);
+ longest = be16_to_cpu(hdr->bestfree[0].length);
needlog = needscan = 0;
xfs_dir2_data_make_free(tp, dbp, off,
xfs_dir2_data_entsize(dep->namelen), &needlog, &needscan);
@@ -974,7 +896,7 @@ xfs_dir2_leafn_remove(
* Log the data block header if needed.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, data, &needlog);
+ xfs_dir2_data_freescan(mp, hdr, &needlog);
if (needlog)
xfs_dir2_data_log_header(tp, dbp);
xfs_dir2_data_check(dp, dbp);
@@ -982,7 +904,7 @@ xfs_dir2_leafn_remove(
* If the longest data block freespace changes, need to update
* the corresponding freeblock entry.
*/
- if (longest < be16_to_cpu(data->hdr.bestfree[0].length)) {
+ if (longest < be16_to_cpu(hdr->bestfree[0].length)) {
int error; /* error return value */
xfs_dabuf_t *fbp; /* freeblock buffer */
xfs_dir2_db_t fdb; /* freeblock block number */
@@ -1000,27 +922,27 @@ xfs_dir2_leafn_remove(
return error;
}
free = fbp->data;
- ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
+ ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
ASSERT(be32_to_cpu(free->hdr.firstdb) ==
- XFS_DIR2_MAX_FREE_BESTS(mp) *
+ xfs_dir2_free_max_bests(mp) *
(fdb - XFS_DIR2_FREE_FIRSTDB(mp)));
/*
* Calculate which entry we need to fix.
*/
findex = xfs_dir2_db_to_fdindex(mp, db);
- longest = be16_to_cpu(data->hdr.bestfree[0].length);
+ longest = be16_to_cpu(hdr->bestfree[0].length);
/*
* If the data block is now empty we can get rid of it
* (usually).
*/
- if (longest == mp->m_dirblksize - (uint)sizeof(data->hdr)) {
+ if (longest == mp->m_dirblksize - (uint)sizeof(*hdr)) {
/*
* Try to punch out the data block.
*/
error = xfs_dir2_shrink_inode(args, db, dbp);
if (error == 0) {
dblk->bp = NULL;
- data = NULL;
+ hdr = NULL;
}
/*
* We can get ENOSPC if there's no space reservation.
@@ -1036,7 +958,7 @@ xfs_dir2_leafn_remove(
* If we got rid of the data block, we can eliminate that entry
* in the free block.
*/
- if (data == NULL) {
+ if (hdr == NULL) {
/*
* One less used entry in the free table.
*/
@@ -1052,7 +974,8 @@ xfs_dir2_leafn_remove(
int i; /* free entry index */
for (i = findex - 1;
- i >= 0 && be16_to_cpu(free->bests[i]) == NULLDATAOFF;
+ i >= 0 &&
+ free->bests[i] == cpu_to_be16(NULLDATAOFF);
i--)
continue;
free->hdr.nvalid = cpu_to_be32(i + 1);
@@ -1209,7 +1132,7 @@ xfs_dir2_leafn_toosmall(
*/
blk = &state->path.blk[state->path.active - 1];
info = blk->bp->data;
- ASSERT(be16_to_cpu(info->magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
leaf = (xfs_dir2_leaf_t *)info;
count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
bytes = (uint)sizeof(leaf->hdr) + count * (uint)sizeof(leaf->ents[0]);
@@ -1268,7 +1191,7 @@ xfs_dir2_leafn_toosmall(
count = be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
bytes = state->blocksize - (state->blocksize >> 2);
leaf = bp->data;
- ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
count += be16_to_cpu(leaf->hdr.count) - be16_to_cpu(leaf->hdr.stale);
bytes -= count * (uint)sizeof(leaf->ents[0]);
/*
@@ -1327,8 +1250,8 @@ xfs_dir2_leafn_unbalance(
ASSERT(save_blk->magic == XFS_DIR2_LEAFN_MAGIC);
drop_leaf = drop_blk->bp->data;
save_leaf = save_blk->bp->data;
- ASSERT(be16_to_cpu(drop_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
- ASSERT(be16_to_cpu(save_leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+ ASSERT(drop_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
+ ASSERT(save_leaf->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC));
/*
* If there are any stale leaf entries, take this opportunity
* to purge them.
@@ -1432,7 +1355,7 @@ xfs_dir2_node_addname_int(
xfs_da_args_t *args, /* operation arguments */
xfs_da_state_blk_t *fblk) /* optional freespace block */
{
- xfs_dir2_data_t *data; /* data block structure */
+ xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_db_t dbno; /* data block number */
xfs_dabuf_t *dbp; /* data block buffer */
xfs_dir2_data_entry_t *dep; /* data entry pointer */
@@ -1469,7 +1392,7 @@ xfs_dir2_node_addname_int(
*/
ifbno = fblk->blkno;
free = fbp->data;
- ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
+ ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
findex = fblk->index;
/*
* This means the free entry showed that the data block had
@@ -1553,7 +1476,7 @@ xfs_dir2_node_addname_int(
continue;
}
free = fbp->data;
- ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
+ ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
findex = 0;
}
/*
@@ -1680,12 +1603,12 @@ xfs_dir2_node_addname_int(
free->hdr.magic = cpu_to_be32(XFS_DIR2_FREE_MAGIC);
free->hdr.firstdb = cpu_to_be32(
(fbno - XFS_DIR2_FREE_FIRSTDB(mp)) *
- XFS_DIR2_MAX_FREE_BESTS(mp));
+ xfs_dir2_free_max_bests(mp));
free->hdr.nvalid = 0;
free->hdr.nused = 0;
} else {
free = fbp->data;
- ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
+ ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
}
/*
@@ -1697,7 +1620,7 @@ xfs_dir2_node_addname_int(
* freespace block, extend that table.
*/
if (findex >= be32_to_cpu(free->hdr.nvalid)) {
- ASSERT(findex < XFS_DIR2_MAX_FREE_BESTS(mp));
+ ASSERT(findex < xfs_dir2_free_max_bests(mp));
free->hdr.nvalid = cpu_to_be32(findex + 1);
/*
* Tag new entry so nused will go up.
@@ -1708,7 +1631,7 @@ xfs_dir2_node_addname_int(
* If this entry was for an empty data block
* (this should always be true) then update the header.
*/
- if (be16_to_cpu(free->bests[findex]) == NULLDATAOFF) {
+ if (free->bests[findex] == cpu_to_be16(NULLDATAOFF)) {
be32_add_cpu(&free->hdr.nused, 1);
xfs_dir2_free_log_header(tp, fbp);
}
@@ -1717,8 +1640,8 @@ xfs_dir2_node_addname_int(
* We haven't allocated the data entry yet so this will
* change again.
*/
- data = dbp->data;
- free->bests[findex] = data->hdr.bestfree[0].length;
+ hdr = dbp->data;
+ free->bests[findex] = hdr->bestfree[0].length;
logfree = 1;
}
/*
@@ -1743,21 +1666,21 @@ xfs_dir2_node_addname_int(
xfs_da_buf_done(fbp);
return error;
}
- data = dbp->data;
+ hdr = dbp->data;
logfree = 0;
}
- ASSERT(be16_to_cpu(data->hdr.bestfree[0].length) >= length);
+ ASSERT(be16_to_cpu(hdr->bestfree[0].length) >= length);
/*
* Point to the existing unused space.
*/
dup = (xfs_dir2_data_unused_t *)
- ((char *)data + be16_to_cpu(data->hdr.bestfree[0].offset));
+ ((char *)hdr + be16_to_cpu(hdr->bestfree[0].offset));
needscan = needlog = 0;
/*
* Mark the first part of the unused space, inuse for us.
*/
xfs_dir2_data_use_free(tp, dbp, dup,
- (xfs_dir2_data_aoff_t)((char *)dup - (char *)data), length,
+ (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr), length,
&needlog, &needscan);
/*
* Fill in the new entry and log it.
@@ -1767,13 +1690,13 @@ xfs_dir2_node_addname_int(
dep->namelen = args->namelen;
memcpy(dep->name, args->name, dep->namelen);
tagp = xfs_dir2_data_entry_tag_p(dep);
- *tagp = cpu_to_be16((char *)dep - (char *)data);
+ *tagp = cpu_to_be16((char *)dep - (char *)hdr);
xfs_dir2_data_log_entry(tp, dbp, dep);
/*
* Rescan the block for bestfree if needed.
*/
if (needscan)
- xfs_dir2_data_freescan(mp, data, &needlog);
+ xfs_dir2_data_freescan(mp, hdr, &needlog);
/*
* Log the data block header if needed.
*/
@@ -1782,8 +1705,8 @@ xfs_dir2_node_addname_int(
/*
* If the freespace entry is now wrong, update it.
*/
- if (be16_to_cpu(free->bests[findex]) != be16_to_cpu(data->hdr.bestfree[0].length)) {
- free->bests[findex] = data->hdr.bestfree[0].length;
+ if (be16_to_cpu(free->bests[findex]) != be16_to_cpu(hdr->bestfree[0].length)) {
+ free->bests[findex] = hdr->bestfree[0].length;
logfree = 1;
}
/*
@@ -1933,7 +1856,7 @@ xfs_dir2_node_replace(
xfs_da_args_t *args) /* operation arguments */
{
xfs_da_state_blk_t *blk; /* leaf block */
- xfs_dir2_data_t *data; /* data block structure */
+ xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_data_entry_t *dep; /* data entry changed */
int error; /* error return value */
int i; /* btree level */
@@ -1977,10 +1900,10 @@ xfs_dir2_node_replace(
/*
* Point to the data entry.
*/
- data = state->extrablk.bp->data;
- ASSERT(be32_to_cpu(data->hdr.magic) == XFS_DIR2_DATA_MAGIC);
+ hdr = state->extrablk.bp->data;
+ ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
dep = (xfs_dir2_data_entry_t *)
- ((char *)data +
+ ((char *)hdr +
xfs_dir2_dataptr_to_off(state->mp, be32_to_cpu(lep->address)));
ASSERT(inum != be64_to_cpu(dep->inumber));
/*
@@ -2044,7 +1967,7 @@ xfs_dir2_node_trim_free(
return 0;
}
free = bp->data;
- ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
+ ASSERT(free->hdr.magic == cpu_to_be32(XFS_DIR2_FREE_MAGIC));
/*
* If there are used entries, there's nothing to do.
*/
diff --git a/fs/xfs/xfs_dir2_node.h b/fs/xfs/xfs_dir2_node.h
deleted file mode 100644
index 82dfe714719..00000000000
--- a/fs/xfs/xfs_dir2_node.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (c) 2000,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DIR2_NODE_H__
-#define __XFS_DIR2_NODE_H__
-
-/*
- * Directory version 2, btree node format structures
- */
-
-struct uio;
-struct xfs_dabuf;
-struct xfs_da_args;
-struct xfs_da_state;
-struct xfs_da_state_blk;
-struct xfs_inode;
-struct xfs_trans;
-
-/*
- * Offset of the freespace index.
- */
-#define XFS_DIR2_FREE_SPACE 2
-#define XFS_DIR2_FREE_OFFSET (XFS_DIR2_FREE_SPACE * XFS_DIR2_SPACE_SIZE)
-#define XFS_DIR2_FREE_FIRSTDB(mp) \
- xfs_dir2_byte_to_db(mp, XFS_DIR2_FREE_OFFSET)
-
-#define XFS_DIR2_FREE_MAGIC 0x58443246 /* XD2F */
-
-typedef struct xfs_dir2_free_hdr {
- __be32 magic; /* XFS_DIR2_FREE_MAGIC */
- __be32 firstdb; /* db of first entry */
- __be32 nvalid; /* count of valid entries */
- __be32 nused; /* count of used entries */
-} xfs_dir2_free_hdr_t;
-
-typedef struct xfs_dir2_free {
- xfs_dir2_free_hdr_t hdr; /* block header */
- __be16 bests[1]; /* best free counts */
- /* unused entries are -1 */
-} xfs_dir2_free_t;
-
-#define XFS_DIR2_MAX_FREE_BESTS(mp) \
- (((mp)->m_dirblksize - (uint)sizeof(xfs_dir2_free_hdr_t)) / \
- (uint)sizeof(xfs_dir2_data_off_t))
-
-/*
- * Convert data space db to the corresponding free db.
- */
-static inline xfs_dir2_db_t
-xfs_dir2_db_to_fdb(struct xfs_mount *mp, xfs_dir2_db_t db)
-{
- return (XFS_DIR2_FREE_FIRSTDB(mp) + (db) / XFS_DIR2_MAX_FREE_BESTS(mp));
-}
-
-/*
- * Convert data space db to the corresponding index in a free db.
- */
-static inline int
-xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
-{
- return ((db) % XFS_DIR2_MAX_FREE_BESTS(mp));
-}
-
-extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
- struct xfs_dabuf *lbp);
-extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count);
-extern int xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp,
- struct xfs_da_args *args, int *indexp,
- struct xfs_da_state *state);
-extern int xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp,
- struct xfs_dabuf *leaf2_bp);
-extern int xfs_dir2_leafn_split(struct xfs_da_state *state,
- struct xfs_da_state_blk *oldblk,
- struct xfs_da_state_blk *newblk);
-extern int xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action);
-extern void xfs_dir2_leafn_unbalance(struct xfs_da_state *state,
- struct xfs_da_state_blk *drop_blk,
- struct xfs_da_state_blk *save_blk);
-extern int xfs_dir2_node_addname(struct xfs_da_args *args);
-extern int xfs_dir2_node_lookup(struct xfs_da_args *args);
-extern int xfs_dir2_node_removename(struct xfs_da_args *args);
-extern int xfs_dir2_node_replace(struct xfs_da_args *args);
-extern int xfs_dir2_node_trim_free(struct xfs_da_args *args, xfs_fileoff_t fo,
- int *rvalp);
-
-#endif /* __XFS_DIR2_NODE_H__ */
diff --git a/fs/xfs/xfs_dir2_priv.h b/fs/xfs/xfs_dir2_priv.h
new file mode 100644
index 00000000000..067f403ecf8
--- /dev/null
+++ b/fs/xfs/xfs_dir2_priv.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __XFS_DIR2_PRIV_H__
+#define __XFS_DIR2_PRIV_H__
+
+/* xfs_dir2.c */
+extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
+extern int xfs_dir2_isblock(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
+extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp, int *r);
+extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
+ xfs_dir2_db_t *dbp);
+extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
+ struct xfs_dabuf *bp);
+extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
+ const unsigned char *name, int len);
+
+/* xfs_dir2_block.c */
+extern int xfs_dir2_block_addname(struct xfs_da_args *args);
+extern int xfs_dir2_block_getdents(struct xfs_inode *dp, void *dirent,
+ xfs_off_t *offset, filldir_t filldir);
+extern int xfs_dir2_block_lookup(struct xfs_da_args *args);
+extern int xfs_dir2_block_removename(struct xfs_da_args *args);
+extern int xfs_dir2_block_replace(struct xfs_da_args *args);
+extern int xfs_dir2_leaf_to_block(struct xfs_da_args *args,
+ struct xfs_dabuf *lbp, struct xfs_dabuf *dbp);
+
+/* xfs_dir2_data.c */
+#ifdef DEBUG
+extern void xfs_dir2_data_check(struct xfs_inode *dp, struct xfs_dabuf *bp);
+#else
+#define xfs_dir2_data_check(dp,bp)
+#endif
+extern struct xfs_dir2_data_free *
+xfs_dir2_data_freeinsert(struct xfs_dir2_data_hdr *hdr,
+ struct xfs_dir2_data_unused *dup, int *loghead);
+extern void xfs_dir2_data_freescan(struct xfs_mount *mp,
+ struct xfs_dir2_data_hdr *hdr, int *loghead);
+extern int xfs_dir2_data_init(struct xfs_da_args *args, xfs_dir2_db_t blkno,
+ struct xfs_dabuf **bpp);
+extern void xfs_dir2_data_log_entry(struct xfs_trans *tp, struct xfs_dabuf *bp,
+ struct xfs_dir2_data_entry *dep);
+extern void xfs_dir2_data_log_header(struct xfs_trans *tp,
+ struct xfs_dabuf *bp);
+extern void xfs_dir2_data_log_unused(struct xfs_trans *tp, struct xfs_dabuf *bp,
+ struct xfs_dir2_data_unused *dup);
+extern void xfs_dir2_data_make_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
+ xfs_dir2_data_aoff_t offset, xfs_dir2_data_aoff_t len,
+ int *needlogp, int *needscanp);
+extern void xfs_dir2_data_use_free(struct xfs_trans *tp, struct xfs_dabuf *bp,
+ struct xfs_dir2_data_unused *dup, xfs_dir2_data_aoff_t offset,
+ xfs_dir2_data_aoff_t len, int *needlogp, int *needscanp);
+
+/* xfs_dir2_leaf.c */
+extern int xfs_dir2_block_to_leaf(struct xfs_da_args *args,
+ struct xfs_dabuf *dbp);
+extern int xfs_dir2_leaf_addname(struct xfs_da_args *args);
+extern void xfs_dir2_leaf_compact(struct xfs_da_args *args,
+ struct xfs_dabuf *bp);
+extern void xfs_dir2_leaf_compact_x1(struct xfs_dabuf *bp, int *indexp,
+ int *lowstalep, int *highstalep, int *lowlogp, int *highlogp);
+extern int xfs_dir2_leaf_getdents(struct xfs_inode *dp, void *dirent,
+ size_t bufsize, xfs_off_t *offset, filldir_t filldir);
+extern int xfs_dir2_leaf_init(struct xfs_da_args *args, xfs_dir2_db_t bno,
+ struct xfs_dabuf **bpp, int magic);
+extern void xfs_dir2_leaf_log_ents(struct xfs_trans *tp, struct xfs_dabuf *bp,
+ int first, int last);
+extern void xfs_dir2_leaf_log_header(struct xfs_trans *tp,
+ struct xfs_dabuf *bp);
+extern int xfs_dir2_leaf_lookup(struct xfs_da_args *args);
+extern int xfs_dir2_leaf_removename(struct xfs_da_args *args);
+extern int xfs_dir2_leaf_replace(struct xfs_da_args *args);
+extern int xfs_dir2_leaf_search_hash(struct xfs_da_args *args,
+ struct xfs_dabuf *lbp);
+extern int xfs_dir2_leaf_trim_data(struct xfs_da_args *args,
+ struct xfs_dabuf *lbp, xfs_dir2_db_t db);
+extern struct xfs_dir2_leaf_entry *
+xfs_dir2_leaf_find_entry(struct xfs_dir2_leaf *leaf, int index, int compact,
+ int lowstale, int highstale,
+ int *lfloglow, int *lfloghigh);
+extern int xfs_dir2_node_to_leaf(struct xfs_da_state *state);
+
+/* xfs_dir2_node.c */
+extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
+ struct xfs_dabuf *lbp);
+extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count);
+extern int xfs_dir2_leafn_lookup_int(struct xfs_dabuf *bp,
+ struct xfs_da_args *args, int *indexp,
+ struct xfs_da_state *state);
+extern int xfs_dir2_leafn_order(struct xfs_dabuf *leaf1_bp,
+ struct xfs_dabuf *leaf2_bp);
+extern int xfs_dir2_leafn_split(struct xfs_da_state *state,
+ struct xfs_da_state_blk *oldblk, struct xfs_da_state_blk *newblk);
+extern int xfs_dir2_leafn_toosmall(struct xfs_da_state *state, int *action);
+extern void xfs_dir2_leafn_unbalance(struct xfs_da_state *state,
+ struct xfs_da_state_blk *drop_blk,
+ struct xfs_da_state_blk *save_blk);
+extern int xfs_dir2_node_addname(struct xfs_da_args *args);
+extern int xfs_dir2_node_lookup(struct xfs_da_args *args);
+extern int xfs_dir2_node_removename(struct xfs_da_args *args);
+extern int xfs_dir2_node_replace(struct xfs_da_args *args);
+extern int xfs_dir2_node_trim_free(struct xfs_da_args *args, xfs_fileoff_t fo,
+ int *rvalp);
+
+/* xfs_dir2_sf.c */
+extern xfs_ino_t xfs_dir2_sf_get_parent_ino(struct xfs_dir2_sf_hdr *sfp);
+extern xfs_ino_t xfs_dir2_sfe_get_ino(struct xfs_dir2_sf_hdr *sfp,
+ struct xfs_dir2_sf_entry *sfep);
+extern int xfs_dir2_block_sfsize(struct xfs_inode *dp,
+ struct xfs_dir2_data_hdr *block, struct xfs_dir2_sf_hdr *sfhp);
+extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp,
+ int size, xfs_dir2_sf_hdr_t *sfhp);
+extern int xfs_dir2_sf_addname(struct xfs_da_args *args);
+extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
+extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, void *dirent,
+ xfs_off_t *offset, filldir_t filldir);
+extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
+extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
+extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
+
+#endif /* __XFS_DIR2_PRIV_H__ */
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index b1bae6b1eed..79d05e84e29 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -23,18 +23,16 @@
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir2.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
-#include "xfs_dir2_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
#include "xfs_error.h"
-#include "xfs_dir2_data.h"
-#include "xfs_dir2_leaf.h"
-#include "xfs_dir2_block.h"
+#include "xfs_dir2.h"
+#include "xfs_dir2_format.h"
+#include "xfs_dir2_priv.h"
#include "xfs_trace.h"
/*
@@ -60,6 +58,82 @@ static void xfs_dir2_sf_toino8(xfs_da_args_t *args);
#endif /* XFS_BIG_INUMS */
/*
+ * Inode numbers in short-form directories can come in two versions,
+ * either 4 bytes or 8 bytes wide. These helpers deal with the
+ * two forms transparently by looking at the headers i8count field.
+ *
+ * For 64-bit inode number the most significant byte must be zero.
+ */
+static xfs_ino_t
+xfs_dir2_sf_get_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ xfs_dir2_inou_t *from)
+{
+ if (hdr->i8count)
+ return get_unaligned_be64(&from->i8.i) & 0x00ffffffffffffffULL;
+ else
+ return get_unaligned_be32(&from->i4.i);
+}
+
+static void
+xfs_dir2_sf_put_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ xfs_dir2_inou_t *to,
+ xfs_ino_t ino)
+{
+ ASSERT((ino & 0xff00000000000000ULL) == 0);
+
+ if (hdr->i8count)
+ put_unaligned_be64(ino, &to->i8.i);
+ else
+ put_unaligned_be32(ino, &to->i4.i);
+}
+
+xfs_ino_t
+xfs_dir2_sf_get_parent_ino(
+ struct xfs_dir2_sf_hdr *hdr)
+{
+ return xfs_dir2_sf_get_ino(hdr, &hdr->parent);
+}
+
+static void
+xfs_dir2_sf_put_parent_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ xfs_ino_t ino)
+{
+ xfs_dir2_sf_put_ino(hdr, &hdr->parent, ino);
+}
+
+/*
+ * In short-form directory entries the inode numbers are stored at variable
+ * offset behind the entry name. The inode numbers may only be accessed
+ * through the helpers below.
+ */
+static xfs_dir2_inou_t *
+xfs_dir2_sfe_inop(
+ struct xfs_dir2_sf_entry *sfep)
+{
+ return (xfs_dir2_inou_t *)&sfep->name[sfep->namelen];
+}
+
+xfs_ino_t
+xfs_dir2_sfe_get_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep)
+{
+ return xfs_dir2_sf_get_ino(hdr, xfs_dir2_sfe_inop(sfep));
+}
+
+static void
+xfs_dir2_sfe_put_ino(
+ struct xfs_dir2_sf_hdr *hdr,
+ struct xfs_dir2_sf_entry *sfep,
+ xfs_ino_t ino)
+{
+ xfs_dir2_sf_put_ino(hdr, xfs_dir2_sfe_inop(sfep), ino);
+}
+
+/*
* Given a block directory (dp/block), calculate its size as a shortform (sf)
* directory and a header for the sf directory, if it will fit it the
* space currently present in the inode. If it won't fit, the output
@@ -68,7 +142,7 @@ static void xfs_dir2_sf_toino8(xfs_da_args_t *args);
int /* size for sf form */
xfs_dir2_block_sfsize(
xfs_inode_t *dp, /* incore inode pointer */
- xfs_dir2_block_t *block, /* block directory data */
+ xfs_dir2_data_hdr_t *hdr, /* block directory data */
xfs_dir2_sf_hdr_t *sfhp) /* output: header for sf form */
{
xfs_dir2_dataptr_t addr; /* data entry address */
@@ -88,7 +162,7 @@ xfs_dir2_block_sfsize(
mp = dp->i_mount;
count = i8count = namelen = 0;
- btp = xfs_dir2_block_tail_p(mp, block);
+ btp = xfs_dir2_block_tail_p(mp, hdr);
blp = xfs_dir2_block_leaf_p(btp);
/*
@@ -101,7 +175,7 @@ xfs_dir2_block_sfsize(
* Calculate the pointer to the entry at hand.
*/
dep = (xfs_dir2_data_entry_t *)
- ((char *)block + xfs_dir2_dataptr_to_off(mp, addr));
+ ((char *)hdr + xfs_dir2_dataptr_to_off(mp, addr));
/*
* Detect . and .., so we can special-case them.
* . is not included in sf directories.
@@ -138,7 +212,7 @@ xfs_dir2_block_sfsize(
*/
sfhp->count = count;
sfhp->i8count = i8count;
- xfs_dir2_sf_put_inumber((xfs_dir2_sf_t *)sfhp, &parent, &sfhp->parent);
+ xfs_dir2_sf_put_parent_ino(sfhp, parent);
return size;
}
@@ -153,7 +227,7 @@ xfs_dir2_block_to_sf(
int size, /* shortform directory size */
xfs_dir2_sf_hdr_t *sfhp) /* shortform directory hdr */
{
- xfs_dir2_block_t *block; /* block structure */
+ xfs_dir2_data_hdr_t *hdr; /* block header */
xfs_dir2_block_tail_t *btp; /* block tail pointer */
xfs_dir2_data_entry_t *dep; /* data entry pointer */
xfs_inode_t *dp; /* incore directory inode */
@@ -164,8 +238,7 @@ xfs_dir2_block_to_sf(
xfs_mount_t *mp; /* filesystem mount point */
char *ptr; /* current data pointer */
xfs_dir2_sf_entry_t *sfep; /* shortform entry */
- xfs_dir2_sf_t *sfp; /* shortform structure */
- xfs_ino_t temp;
+ xfs_dir2_sf_hdr_t *sfp; /* shortform directory header */
trace_xfs_dir2_block_to_sf(args);
@@ -176,13 +249,14 @@ xfs_dir2_block_to_sf(
* Make a copy of the block data, so we can shrink the inode
* and add local data.
*/
- block = kmem_alloc(mp->m_dirblksize, KM_SLEEP);
- memcpy(block, bp->data, mp->m_dirblksize);
+ hdr = kmem_alloc(mp->m_dirblksize, KM_SLEEP);
+ memcpy(hdr, bp->data, mp->m_dirblksize);
logflags = XFS_ILOG_CORE;
if ((error = xfs_dir2_shrink_inode(args, mp->m_dirdatablk, bp))) {
ASSERT(error != ENOSPC);
goto out;
}
+
/*
* The buffer is now unconditionally gone, whether
* xfs_dir2_shrink_inode worked or not.
@@ -198,14 +272,14 @@ xfs_dir2_block_to_sf(
/*
* Copy the header into the newly allocate local space.
*/
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
memcpy(sfp, sfhp, xfs_dir2_sf_hdr_size(sfhp->i8count));
dp->i_d.di_size = size;
/*
* Set up to loop over the block's entries.
*/
- btp = xfs_dir2_block_tail_p(mp, block);
- ptr = (char *)block->u;
+ btp = xfs_dir2_block_tail_p(mp, hdr);
+ ptr = (char *)(hdr + 1);
endptr = (char *)xfs_dir2_block_leaf_p(btp);
sfep = xfs_dir2_sf_firstentry(sfp);
/*
@@ -233,7 +307,7 @@ xfs_dir2_block_to_sf(
else if (dep->namelen == 2 &&
dep->name[0] == '.' && dep->name[1] == '.')
ASSERT(be64_to_cpu(dep->inumber) ==
- xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent));
+ xfs_dir2_sf_get_parent_ino(sfp));
/*
* Normal entry, copy it into shortform.
*/
@@ -241,11 +315,11 @@ xfs_dir2_block_to_sf(
sfep->namelen = dep->namelen;
xfs_dir2_sf_put_offset(sfep,
(xfs_dir2_data_aoff_t)
- ((char *)dep - (char *)block));
+ ((char *)dep - (char *)hdr));
memcpy(sfep->name, dep->name, dep->namelen);
- temp = be64_to_cpu(dep->inumber);
- xfs_dir2_sf_put_inumber(sfp, &temp,
- xfs_dir2_sf_inumberp(sfep));
+ xfs_dir2_sfe_put_ino(sfp, sfep,
+ be64_to_cpu(dep->inumber));
+
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
}
ptr += xfs_dir2_data_entsize(dep->namelen);
@@ -254,7 +328,7 @@ xfs_dir2_block_to_sf(
xfs_dir2_sf_check(args);
out:
xfs_trans_log_inode(args->trans, dp, logflags);
- kmem_free(block);
+ kmem_free(hdr);
return error;
}
@@ -277,7 +351,7 @@ xfs_dir2_sf_addname(
xfs_dir2_data_aoff_t offset = 0; /* offset for new entry */
int old_isize; /* di_size before adding name */
int pick; /* which algorithm to use */
- xfs_dir2_sf_t *sfp; /* shortform structure */
+ xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
xfs_dir2_sf_entry_t *sfep = NULL; /* shortform entry */
trace_xfs_dir2_sf_addname(args);
@@ -294,19 +368,19 @@ xfs_dir2_sf_addname(
}
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
- ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
+ ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
/*
* Compute entry (and change in) size.
*/
- add_entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen);
+ add_entsize = xfs_dir2_sf_entsize(sfp, args->namelen);
incr_isize = add_entsize;
objchange = 0;
#if XFS_BIG_INUMS
/*
* Do we have to change to 8 byte inodes?
*/
- if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) {
+ if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->i8count == 0) {
/*
* Yes, adjust the entry size and the total size.
*/
@@ -314,7 +388,7 @@ xfs_dir2_sf_addname(
(uint)sizeof(xfs_dir2_ino8_t) -
(uint)sizeof(xfs_dir2_ino4_t);
incr_isize +=
- (sfp->hdr.count + 2) *
+ (sfp->count + 2) *
((uint)sizeof(xfs_dir2_ino8_t) -
(uint)sizeof(xfs_dir2_ino4_t));
objchange = 1;
@@ -384,21 +458,21 @@ xfs_dir2_sf_addname_easy(
{
int byteoff; /* byte offset in sf dir */
xfs_inode_t *dp; /* incore directory inode */
- xfs_dir2_sf_t *sfp; /* shortform structure */
+ xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
dp = args->dp;
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
byteoff = (int)((char *)sfep - (char *)sfp);
/*
* Grow the in-inode space.
*/
- xfs_idata_realloc(dp, xfs_dir2_sf_entsize_byname(sfp, args->namelen),
+ xfs_idata_realloc(dp, xfs_dir2_sf_entsize(sfp, args->namelen),
XFS_DATA_FORK);
/*
* Need to set up again due to realloc of the inode data.
*/
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
sfep = (xfs_dir2_sf_entry_t *)((char *)sfp + byteoff);
/*
* Fill in the new entry.
@@ -406,15 +480,14 @@ xfs_dir2_sf_addname_easy(
sfep->namelen = args->namelen;
xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, args->name, sfep->namelen);
- xfs_dir2_sf_put_inumber(sfp, &args->inumber,
- xfs_dir2_sf_inumberp(sfep));
+ xfs_dir2_sfe_put_ino(sfp, sfep, args->inumber);
/*
* Update the header and inode.
*/
- sfp->hdr.count++;
+ sfp->count++;
#if XFS_BIG_INUMS
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM)
- sfp->hdr.i8count++;
+ sfp->i8count++;
#endif
dp->i_d.di_size = new_isize;
xfs_dir2_sf_check(args);
@@ -444,19 +517,19 @@ xfs_dir2_sf_addname_hard(
xfs_dir2_data_aoff_t offset; /* current offset value */
int old_isize; /* previous di_size */
xfs_dir2_sf_entry_t *oldsfep; /* entry in original dir */
- xfs_dir2_sf_t *oldsfp; /* original shortform dir */
+ xfs_dir2_sf_hdr_t *oldsfp; /* original shortform dir */
xfs_dir2_sf_entry_t *sfep; /* entry in new dir */
- xfs_dir2_sf_t *sfp; /* new shortform dir */
+ xfs_dir2_sf_hdr_t *sfp; /* new shortform dir */
/*
* Copy the old directory to the stack buffer.
*/
dp = args->dp;
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
old_isize = (int)dp->i_d.di_size;
buf = kmem_alloc(old_isize, KM_SLEEP);
- oldsfp = (xfs_dir2_sf_t *)buf;
+ oldsfp = (xfs_dir2_sf_hdr_t *)buf;
memcpy(oldsfp, sfp, old_isize);
/*
* Loop over the old directory finding the place we're going
@@ -485,7 +558,7 @@ xfs_dir2_sf_addname_hard(
/*
* Reset the pointer since the buffer was reallocated.
*/
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
/*
* Copy the first part of the directory, including the header.
*/
@@ -498,12 +571,11 @@ xfs_dir2_sf_addname_hard(
sfep->namelen = args->namelen;
xfs_dir2_sf_put_offset(sfep, offset);
memcpy(sfep->name, args->name, sfep->namelen);
- xfs_dir2_sf_put_inumber(sfp, &args->inumber,
- xfs_dir2_sf_inumberp(sfep));
- sfp->hdr.count++;
+ xfs_dir2_sfe_put_ino(sfp, sfep, args->inumber);
+ sfp->count++;
#if XFS_BIG_INUMS
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && !objchange)
- sfp->hdr.i8count++;
+ sfp->i8count++;
#endif
/*
* If there's more left to copy, do that.
@@ -537,14 +609,14 @@ xfs_dir2_sf_addname_pick(
xfs_mount_t *mp; /* filesystem mount point */
xfs_dir2_data_aoff_t offset; /* data block offset */
xfs_dir2_sf_entry_t *sfep; /* shortform entry */
- xfs_dir2_sf_t *sfp; /* shortform structure */
+ xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
int size; /* entry's data size */
int used; /* data bytes used */
dp = args->dp;
mp = dp->i_mount;
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
size = xfs_dir2_data_entsize(args->namelen);
offset = XFS_DIR2_DATA_FIRST_OFFSET;
sfep = xfs_dir2_sf_firstentry(sfp);
@@ -554,7 +626,7 @@ xfs_dir2_sf_addname_pick(
* Keep track of data offset and whether we've seen a place
* to insert the new entry.
*/
- for (i = 0; i < sfp->hdr.count; i++) {
+ for (i = 0; i < sfp->count; i++) {
if (!holefit)
holefit = offset + size <= xfs_dir2_sf_get_offset(sfep);
offset = xfs_dir2_sf_get_offset(sfep) +
@@ -566,7 +638,7 @@ xfs_dir2_sf_addname_pick(
* was a data block (block form directory).
*/
used = offset +
- (sfp->hdr.count + 3) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+ (sfp->count + 3) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
(uint)sizeof(xfs_dir2_block_tail_t);
/*
* If it won't fit in a block form then we can't insert it,
@@ -612,30 +684,30 @@ xfs_dir2_sf_check(
xfs_ino_t ino; /* entry inode number */
int offset; /* data offset */
xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */
- xfs_dir2_sf_t *sfp; /* shortform structure */
+ xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
dp = args->dp;
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
offset = XFS_DIR2_DATA_FIRST_OFFSET;
- ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
i8count = ino > XFS_DIR2_MAX_SHORT_INUM;
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
- i < sfp->hdr.count;
+ i < sfp->count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
ASSERT(xfs_dir2_sf_get_offset(sfep) >= offset);
- ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
+ ino = xfs_dir2_sfe_get_ino(sfp, sfep);
i8count += ino > XFS_DIR2_MAX_SHORT_INUM;
offset =
xfs_dir2_sf_get_offset(sfep) +
xfs_dir2_data_entsize(sfep->namelen);
}
- ASSERT(i8count == sfp->hdr.i8count);
+ ASSERT(i8count == sfp->i8count);
ASSERT(XFS_BIG_INUMS || i8count == 0);
ASSERT((char *)sfep - (char *)sfp == dp->i_d.di_size);
ASSERT(offset +
- (sfp->hdr.count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
+ (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) +
(uint)sizeof(xfs_dir2_block_tail_t) <=
dp->i_mount->m_dirblksize);
}
@@ -651,7 +723,7 @@ xfs_dir2_sf_create(
{
xfs_inode_t *dp; /* incore directory inode */
int i8count; /* parent inode is an 8-byte number */
- xfs_dir2_sf_t *sfp; /* shortform structure */
+ xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
int size; /* directory size */
trace_xfs_dir2_sf_create(args);
@@ -681,13 +753,13 @@ xfs_dir2_sf_create(
/*
* Fill in the header,
*/
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
- sfp->hdr.i8count = i8count;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
+ sfp->i8count = i8count;
/*
* Now can put in the inode number, since i8count is set.
*/
- xfs_dir2_sf_put_inumber(sfp, &pino, &sfp->hdr.parent);
- sfp->hdr.count = 0;
+ xfs_dir2_sf_put_parent_ino(sfp, pino);
+ sfp->count = 0;
dp->i_d.di_size = size;
xfs_dir2_sf_check(args);
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
@@ -705,7 +777,7 @@ xfs_dir2_sf_getdents(
xfs_mount_t *mp; /* filesystem mount point */
xfs_dir2_dataptr_t off; /* current entry's offset */
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
- xfs_dir2_sf_t *sfp; /* shortform structure */
+ xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
xfs_dir2_dataptr_t dot_offset;
xfs_dir2_dataptr_t dotdot_offset;
xfs_ino_t ino;
@@ -724,9 +796,9 @@ xfs_dir2_sf_getdents(
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
- ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
+ ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
/*
* If the block number in the offset is out of range, we're done.
@@ -759,7 +831,7 @@ xfs_dir2_sf_getdents(
* Put .. entry unless we're starting past it.
*/
if (*offset <= dotdot_offset) {
- ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
if (filldir(dirent, "..", 2, dotdot_offset & 0x7fffffff, ino, DT_DIR)) {
*offset = dotdot_offset & 0x7fffffff;
return 0;
@@ -770,7 +842,7 @@ xfs_dir2_sf_getdents(
* Loop while there are more entries and put'ing works.
*/
sfep = xfs_dir2_sf_firstentry(sfp);
- for (i = 0; i < sfp->hdr.count; i++) {
+ for (i = 0; i < sfp->count; i++) {
off = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
xfs_dir2_sf_get_offset(sfep));
@@ -779,7 +851,7 @@ xfs_dir2_sf_getdents(
continue;
}
- ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
+ ino = xfs_dir2_sfe_get_ino(sfp, sfep);
if (filldir(dirent, (char *)sfep->name, sfep->namelen,
off & 0x7fffffff, ino, DT_UNKNOWN)) {
*offset = off & 0x7fffffff;
@@ -805,7 +877,7 @@ xfs_dir2_sf_lookup(
int i; /* entry index */
int error;
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
- xfs_dir2_sf_t *sfp; /* shortform structure */
+ xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
enum xfs_dacmp cmp; /* comparison result */
xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */
@@ -824,8 +896,8 @@ xfs_dir2_sf_lookup(
}
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
- ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
+ ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
/*
* Special case for .
*/
@@ -839,7 +911,7 @@ xfs_dir2_sf_lookup(
*/
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
- args->inumber = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
+ args->inumber = xfs_dir2_sf_get_parent_ino(sfp);
args->cmpresult = XFS_CMP_EXACT;
return XFS_ERROR(EEXIST);
}
@@ -847,7 +919,7 @@ xfs_dir2_sf_lookup(
* Loop over all the entries trying to match ours.
*/
ci_sfep = NULL;
- for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count;
+ for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
/*
* Compare name and if it's an exact match, return the inode
@@ -858,8 +930,7 @@ xfs_dir2_sf_lookup(
sfep->namelen);
if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
args->cmpresult = cmp;
- args->inumber = xfs_dir2_sf_get_inumber(sfp,
- xfs_dir2_sf_inumberp(sfep));
+ args->inumber = xfs_dir2_sfe_get_ino(sfp, sfep);
if (cmp == XFS_CMP_EXACT)
return XFS_ERROR(EEXIST);
ci_sfep = sfep;
@@ -891,7 +962,7 @@ xfs_dir2_sf_removename(
int newsize; /* new inode size */
int oldsize; /* old inode size */
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
- xfs_dir2_sf_t *sfp; /* shortform structure */
+ xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
trace_xfs_dir2_sf_removename(args);
@@ -908,32 +979,31 @@ xfs_dir2_sf_removename(
}
ASSERT(dp->i_df.if_bytes == oldsize);
ASSERT(dp->i_df.if_u1.if_data != NULL);
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
- ASSERT(oldsize >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
+ ASSERT(oldsize >= xfs_dir2_sf_hdr_size(sfp->i8count));
/*
* Loop over the old directory entries.
* Find the one we're deleting.
*/
- for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count;
+ for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
XFS_CMP_EXACT) {
- ASSERT(xfs_dir2_sf_get_inumber(sfp,
- xfs_dir2_sf_inumberp(sfep)) ==
- args->inumber);
+ ASSERT(xfs_dir2_sfe_get_ino(sfp, sfep) ==
+ args->inumber);
break;
}
}
/*
* Didn't find it.
*/
- if (i == sfp->hdr.count)
+ if (i == sfp->count)
return XFS_ERROR(ENOENT);
/*
* Calculate sizes.
*/
byteoff = (int)((char *)sfep - (char *)sfp);
- entsize = xfs_dir2_sf_entsize_byname(sfp, args->namelen);
+ entsize = xfs_dir2_sf_entsize(sfp, args->namelen);
newsize = oldsize - entsize;
/*
* Copy the part if any after the removed entry, sliding it down.
@@ -944,22 +1014,22 @@ xfs_dir2_sf_removename(
/*
* Fix up the header and file size.
*/
- sfp->hdr.count--;
+ sfp->count--;
dp->i_d.di_size = newsize;
/*
* Reallocate, making it smaller.
*/
xfs_idata_realloc(dp, newsize - oldsize, XFS_DATA_FORK);
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
#if XFS_BIG_INUMS
/*
* Are we changing inode number size?
*/
if (args->inumber > XFS_DIR2_MAX_SHORT_INUM) {
- if (sfp->hdr.i8count == 1)
+ if (sfp->i8count == 1)
xfs_dir2_sf_toino4(args);
else
- sfp->hdr.i8count--;
+ sfp->i8count--;
}
#endif
xfs_dir2_sf_check(args);
@@ -983,7 +1053,7 @@ xfs_dir2_sf_replace(
int i8elevated; /* sf_toino8 set i8count=1 */
#endif
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
- xfs_dir2_sf_t *sfp; /* shortform structure */
+ xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
trace_xfs_dir2_sf_replace(args);
@@ -999,19 +1069,19 @@ xfs_dir2_sf_replace(
}
ASSERT(dp->i_df.if_bytes == dp->i_d.di_size);
ASSERT(dp->i_df.if_u1.if_data != NULL);
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
- ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->hdr.i8count));
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
+ ASSERT(dp->i_d.di_size >= xfs_dir2_sf_hdr_size(sfp->i8count));
#if XFS_BIG_INUMS
/*
* New inode number is large, and need to convert to 8-byte inodes.
*/
- if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->hdr.i8count == 0) {
+ if (args->inumber > XFS_DIR2_MAX_SHORT_INUM && sfp->i8count == 0) {
int error; /* error return value */
int newsize; /* new inode size */
newsize =
dp->i_df.if_bytes +
- (sfp->hdr.count + 1) *
+ (sfp->count + 1) *
((uint)sizeof(xfs_dir2_ino8_t) -
(uint)sizeof(xfs_dir2_ino4_t));
/*
@@ -1029,7 +1099,7 @@ xfs_dir2_sf_replace(
*/
xfs_dir2_sf_toino8(args);
i8elevated = 1;
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
} else
i8elevated = 0;
#endif
@@ -1040,34 +1110,32 @@ xfs_dir2_sf_replace(
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
#if XFS_BIG_INUMS || defined(DEBUG)
- ino = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
+ ino = xfs_dir2_sf_get_parent_ino(sfp);
ASSERT(args->inumber != ino);
#endif
- xfs_dir2_sf_put_inumber(sfp, &args->inumber, &sfp->hdr.parent);
+ xfs_dir2_sf_put_parent_ino(sfp, args->inumber);
}
/*
* Normal entry, look for the name.
*/
else {
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
- i < sfp->hdr.count;
+ i < sfp->count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
XFS_CMP_EXACT) {
#if XFS_BIG_INUMS || defined(DEBUG)
- ino = xfs_dir2_sf_get_inumber(sfp,
- xfs_dir2_sf_inumberp(sfep));
+ ino = xfs_dir2_sfe_get_ino(sfp, sfep);
ASSERT(args->inumber != ino);
#endif
- xfs_dir2_sf_put_inumber(sfp, &args->inumber,
- xfs_dir2_sf_inumberp(sfep));
+ xfs_dir2_sfe_put_ino(sfp, sfep, args->inumber);
break;
}
}
/*
* Didn't find it.
*/
- if (i == sfp->hdr.count) {
+ if (i == sfp->count) {
ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
#if XFS_BIG_INUMS
if (i8elevated)
@@ -1085,10 +1153,10 @@ xfs_dir2_sf_replace(
/*
* And the old count was one, so need to convert to small.
*/
- if (sfp->hdr.i8count == 1)
+ if (sfp->i8count == 1)
xfs_dir2_sf_toino4(args);
else
- sfp->hdr.i8count--;
+ sfp->i8count--;
}
/*
* See if the old number was small, the new number is large.
@@ -1099,9 +1167,9 @@ xfs_dir2_sf_replace(
* add to the i8count unless we just converted to 8-byte
* inodes (which does an implied i8count = 1)
*/
- ASSERT(sfp->hdr.i8count != 0);
+ ASSERT(sfp->i8count != 0);
if (!i8elevated)
- sfp->hdr.i8count++;
+ sfp->i8count++;
}
#endif
xfs_dir2_sf_check(args);
@@ -1121,13 +1189,12 @@ xfs_dir2_sf_toino4(
char *buf; /* old dir's buffer */
xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
- xfs_ino_t ino; /* entry inode number */
int newsize; /* new inode size */
xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */
- xfs_dir2_sf_t *oldsfp; /* old sf directory */
+ xfs_dir2_sf_hdr_t *oldsfp; /* old sf directory */
int oldsize; /* old inode size */
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
- xfs_dir2_sf_t *sfp; /* new sf directory */
+ xfs_dir2_sf_hdr_t *sfp; /* new sf directory */
trace_xfs_dir2_sf_toino4(args);
@@ -1140,44 +1207,42 @@ xfs_dir2_sf_toino4(
*/
oldsize = dp->i_df.if_bytes;
buf = kmem_alloc(oldsize, KM_SLEEP);
- oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
- ASSERT(oldsfp->hdr.i8count == 1);
+ oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
+ ASSERT(oldsfp->i8count == 1);
memcpy(buf, oldsfp, oldsize);
/*
* Compute the new inode size.
*/
newsize =
oldsize -
- (oldsfp->hdr.count + 1) *
+ (oldsfp->count + 1) *
((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t));
xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK);
xfs_idata_realloc(dp, newsize, XFS_DATA_FORK);
/*
* Reset our pointers, the data has moved.
*/
- oldsfp = (xfs_dir2_sf_t *)buf;
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ oldsfp = (xfs_dir2_sf_hdr_t *)buf;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
/*
* Fill in the new header.
*/
- sfp->hdr.count = oldsfp->hdr.count;
- sfp->hdr.i8count = 0;
- ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent);
- xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent);
+ sfp->count = oldsfp->count;
+ sfp->i8count = 0;
+ xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
/*
* Copy the entries field by field.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
- i < sfp->hdr.count;
+ i < sfp->count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep),
oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
sfep->offset = oldsfep->offset;
memcpy(sfep->name, oldsfep->name, sfep->namelen);
- ino = xfs_dir2_sf_get_inumber(oldsfp,
- xfs_dir2_sf_inumberp(oldsfep));
- xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep));
+ xfs_dir2_sfe_put_ino(sfp, sfep,
+ xfs_dir2_sfe_get_ino(oldsfp, oldsfep));
}
/*
* Clean up the inode.
@@ -1199,13 +1264,12 @@ xfs_dir2_sf_toino8(
char *buf; /* old dir's buffer */
xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
- xfs_ino_t ino; /* entry inode number */
int newsize; /* new inode size */
xfs_dir2_sf_entry_t *oldsfep; /* old sf entry */
- xfs_dir2_sf_t *oldsfp; /* old sf directory */
+ xfs_dir2_sf_hdr_t *oldsfp; /* old sf directory */
int oldsize; /* old inode size */
xfs_dir2_sf_entry_t *sfep; /* new sf entry */
- xfs_dir2_sf_t *sfp; /* new sf directory */
+ xfs_dir2_sf_hdr_t *sfp; /* new sf directory */
trace_xfs_dir2_sf_toino8(args);
@@ -1218,44 +1282,42 @@ xfs_dir2_sf_toino8(
*/
oldsize = dp->i_df.if_bytes;
buf = kmem_alloc(oldsize, KM_SLEEP);
- oldsfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
- ASSERT(oldsfp->hdr.i8count == 0);
+ oldsfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
+ ASSERT(oldsfp->i8count == 0);
memcpy(buf, oldsfp, oldsize);
/*
* Compute the new inode size.
*/
newsize =
oldsize +
- (oldsfp->hdr.count + 1) *
+ (oldsfp->count + 1) *
((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t));
xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK);
xfs_idata_realloc(dp, newsize, XFS_DATA_FORK);
/*
* Reset our pointers, the data has moved.
*/
- oldsfp = (xfs_dir2_sf_t *)buf;
- sfp = (xfs_dir2_sf_t *)dp->i_df.if_u1.if_data;
+ oldsfp = (xfs_dir2_sf_hdr_t *)buf;
+ sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
/*
* Fill in the new header.
*/
- sfp->hdr.count = oldsfp->hdr.count;
- sfp->hdr.i8count = 1;
- ino = xfs_dir2_sf_get_inumber(oldsfp, &oldsfp->hdr.parent);
- xfs_dir2_sf_put_inumber(sfp, &ino, &sfp->hdr.parent);
+ sfp->count = oldsfp->count;
+ sfp->i8count = 1;
+ xfs_dir2_sf_put_parent_ino(sfp, xfs_dir2_sf_get_parent_ino(oldsfp));
/*
* Copy the entries field by field.
*/
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp),
oldsfep = xfs_dir2_sf_firstentry(oldsfp);
- i < sfp->hdr.count;
+ i < sfp->count;
i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep),
oldsfep = xfs_dir2_sf_nextentry(oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
sfep->offset = oldsfep->offset;
memcpy(sfep->name, oldsfep->name, sfep->namelen);
- ino = xfs_dir2_sf_get_inumber(oldsfp,
- xfs_dir2_sf_inumberp(oldsfep));
- xfs_dir2_sf_put_inumber(sfp, &ino, xfs_dir2_sf_inumberp(sfep));
+ xfs_dir2_sfe_put_ino(sfp, sfep,
+ xfs_dir2_sfe_get_ino(oldsfp, oldsfep));
}
/*
* Clean up the inode.
diff --git a/fs/xfs/xfs_dir2_sf.h b/fs/xfs/xfs_dir2_sf.h
deleted file mode 100644
index 6ac44b550d3..00000000000
--- a/fs/xfs/xfs_dir2_sf.h
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2000-2001,2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-#ifndef __XFS_DIR2_SF_H__
-#define __XFS_DIR2_SF_H__
-
-/*
- * Directory layout when stored internal to an inode.
- *
- * Small directories are packed as tightly as possible so as to
- * fit into the literal area of the inode.
- */
-
-struct uio;
-struct xfs_dabuf;
-struct xfs_da_args;
-struct xfs_dir2_block;
-struct xfs_inode;
-struct xfs_mount;
-struct xfs_trans;
-
-/*
- * Inode number stored as 8 8-bit values.
- */
-typedef struct { __uint8_t i[8]; } xfs_dir2_ino8_t;
-
-/*
- * Inode number stored as 4 8-bit values.
- * Works a lot of the time, when all the inode numbers in a directory
- * fit in 32 bits.
- */
-typedef struct { __uint8_t i[4]; } xfs_dir2_ino4_t;
-
-typedef union {
- xfs_dir2_ino8_t i8;
- xfs_dir2_ino4_t i4;
-} xfs_dir2_inou_t;
-#define XFS_DIR2_MAX_SHORT_INUM ((xfs_ino_t)0xffffffffULL)
-
-/*
- * Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t.
- * Only need 16 bits, this is the byte offset into the single block form.
- */
-typedef struct { __uint8_t i[2]; } __arch_pack xfs_dir2_sf_off_t;
-
-/*
- * The parent directory has a dedicated field, and the self-pointer must
- * be calculated on the fly.
- *
- * Entries are packed toward the top as tightly as possible. The header
- * and the elements must be memcpy'd out into a work area to get correct
- * alignment for the inode number fields.
- */
-typedef struct xfs_dir2_sf_hdr {
- __uint8_t count; /* count of entries */
- __uint8_t i8count; /* count of 8-byte inode #s */
- xfs_dir2_inou_t parent; /* parent dir inode number */
-} __arch_pack xfs_dir2_sf_hdr_t;
-
-typedef struct xfs_dir2_sf_entry {
- __uint8_t namelen; /* actual name length */
- xfs_dir2_sf_off_t offset; /* saved offset */
- __uint8_t name[1]; /* name, variable size */
- xfs_dir2_inou_t inumber; /* inode number, var. offset */
-} __arch_pack xfs_dir2_sf_entry_t;
-
-typedef struct xfs_dir2_sf {
- xfs_dir2_sf_hdr_t hdr; /* shortform header */
- xfs_dir2_sf_entry_t list[1]; /* shortform entries */
-} xfs_dir2_sf_t;
-
-static inline int xfs_dir2_sf_hdr_size(int i8count)
-{
- return ((uint)sizeof(xfs_dir2_sf_hdr_t) - \
- ((i8count) == 0) * \
- ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)));
-}
-
-static inline xfs_dir2_inou_t *xfs_dir2_sf_inumberp(xfs_dir2_sf_entry_t *sfep)
-{
- return (xfs_dir2_inou_t *)&(sfep)->name[(sfep)->namelen];
-}
-
-static inline xfs_intino_t
-xfs_dir2_sf_get_inumber(xfs_dir2_sf_t *sfp, xfs_dir2_inou_t *from)
-{
- return ((sfp)->hdr.i8count == 0 ? \
- (xfs_intino_t)XFS_GET_DIR_INO4((from)->i4) : \
- (xfs_intino_t)XFS_GET_DIR_INO8((from)->i8));
-}
-
-static inline void xfs_dir2_sf_put_inumber(xfs_dir2_sf_t *sfp, xfs_ino_t *from,
- xfs_dir2_inou_t *to)
-{
- if ((sfp)->hdr.i8count == 0)
- XFS_PUT_DIR_INO4(*(from), (to)->i4);
- else
- XFS_PUT_DIR_INO8(*(from), (to)->i8);
-}
-
-static inline xfs_dir2_data_aoff_t
-xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep)
-{
- return INT_GET_UNALIGNED_16_BE(&(sfep)->offset.i);
-}
-
-static inline void
-xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off)
-{
- INT_SET_UNALIGNED_16_BE(&(sfep)->offset.i, off);
-}
-
-static inline int xfs_dir2_sf_entsize_byname(xfs_dir2_sf_t *sfp, int len)
-{
- return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (len) - \
- ((sfp)->hdr.i8count == 0) * \
- ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)));
-}
-
-static inline int
-xfs_dir2_sf_entsize_byentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep)
-{
- return ((uint)sizeof(xfs_dir2_sf_entry_t) - 1 + (sfep)->namelen - \
- ((sfp)->hdr.i8count == 0) * \
- ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t)));
-}
-
-static inline xfs_dir2_sf_entry_t *xfs_dir2_sf_firstentry(xfs_dir2_sf_t *sfp)
-{
- return ((xfs_dir2_sf_entry_t *) \
- ((char *)(sfp) + xfs_dir2_sf_hdr_size(sfp->hdr.i8count)));
-}
-
-static inline xfs_dir2_sf_entry_t *
-xfs_dir2_sf_nextentry(xfs_dir2_sf_t *sfp, xfs_dir2_sf_entry_t *sfep)
-{
- return ((xfs_dir2_sf_entry_t *) \
- ((char *)(sfep) + xfs_dir2_sf_entsize_byentry(sfp,sfep)));
-}
-
-/*
- * Functions.
- */
-extern int xfs_dir2_block_sfsize(struct xfs_inode *dp,
- struct xfs_dir2_block *block,
- xfs_dir2_sf_hdr_t *sfhp);
-extern int xfs_dir2_block_to_sf(struct xfs_da_args *args, struct xfs_dabuf *bp,
- int size, xfs_dir2_sf_hdr_t *sfhp);
-extern int xfs_dir2_sf_addname(struct xfs_da_args *args);
-extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino);
-extern int xfs_dir2_sf_getdents(struct xfs_inode *dp, void *dirent,
- xfs_off_t *offset, filldir_t filldir);
-extern int xfs_dir2_sf_lookup(struct xfs_da_args *args);
-extern int xfs_dir2_sf_removename(struct xfs_da_args *args);
-extern int xfs_dir2_sf_replace(struct xfs_da_args *args);
-
-#endif /* __XFS_DIR2_SF_H__ */
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 8f6fc1a9638..c13fed8c394 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -249,6 +249,11 @@ typedef struct xfs_fsop_resblks {
#define XFS_MAX_LOG_BYTES \
((2 * 1024 * 1024 * 1024ULL) - XFS_MIN_LOG_BYTES)
+/* Used for sanity checks on superblock */
+#define XFS_MAX_DBLOCKS(s) ((xfs_drfsbno_t)(s)->sb_agcount * (s)->sb_agblocks)
+#define XFS_MIN_DBLOCKS(s) ((xfs_drfsbno_t)((s)->sb_agcount - 1) * \
+ (s)->sb_agblocks + XFS_MIN_AG_BLOCKS)
+
/*
* Structures for XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG & XFS_IOC_FSGROWFSRT
*/
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 84ebeec1664..dd5628bd8d0 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -683,7 +683,7 @@ xfs_dialloc(
return 0;
}
agi = XFS_BUF_TO_AGI(agbp);
- ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
+ ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
} else {
/*
* Continue where we left off before. In this case, we
@@ -691,7 +691,7 @@ xfs_dialloc(
*/
agbp = *IO_agbp;
agi = XFS_BUF_TO_AGI(agbp);
- ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
+ ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
ASSERT(be32_to_cpu(agi->agi_freecount) > 0);
}
mp = tp->t_mountp;
@@ -775,7 +775,7 @@ nextag:
if (error)
goto nextag;
agi = XFS_BUF_TO_AGI(agbp);
- ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
+ ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
}
/*
* Here with an allocation group that has a free inode.
@@ -944,7 +944,7 @@ nextag:
* See if the most recently allocated block has any free.
*/
newino:
- if (be32_to_cpu(agi->agi_newino) != NULLAGINO) {
+ if (agi->agi_newino != cpu_to_be32(NULLAGINO)) {
error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino),
XFS_LOOKUP_EQ, &i);
if (error)
@@ -1085,7 +1085,7 @@ xfs_difree(
return error;
}
agi = XFS_BUF_TO_AGI(agbp);
- ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
+ ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
ASSERT(agbno < be32_to_cpu(agi->agi_length));
/*
* Initialize the cursor.
@@ -1438,7 +1438,7 @@ xfs_ialloc_log_agi(
xfs_agi_t *agi; /* allocation group header */
agi = XFS_BUF_TO_AGI(bp);
- ASSERT(be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC);
+ ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC));
#endif
/*
* Compute byte offsets for the first and last fields.
@@ -1492,7 +1492,7 @@ xfs_read_agi(
/*
* Validate the magic number of the agi block.
*/
- agi_ok = be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
+ agi_ok = agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC) &&
XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum)) &&
be32_to_cpu(agi->agi_seqno) == agno;
if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IALLOC_READ_AGI,
diff --git a/fs/xfs/xfs_ialloc_btree.c b/fs/xfs/xfs_ialloc_btree.c
index 16921f55c54..c6a75815aea 100644
--- a/fs/xfs/xfs_ialloc_btree.c
+++ b/fs/xfs/xfs_ialloc_btree.c
@@ -31,7 +31,6 @@
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
-#include "xfs_btree_trace.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
@@ -205,72 +204,6 @@ xfs_inobt_recs_inorder(
}
#endif /* DEBUG */
-#ifdef XFS_BTREE_TRACE
-ktrace_t *xfs_inobt_trace_buf;
-
-STATIC void
-xfs_inobt_trace_enter(
- struct xfs_btree_cur *cur,
- const char *func,
- char *s,
- int type,
- int line,
- __psunsigned_t a0,
- __psunsigned_t a1,
- __psunsigned_t a2,
- __psunsigned_t a3,
- __psunsigned_t a4,
- __psunsigned_t a5,
- __psunsigned_t a6,
- __psunsigned_t a7,
- __psunsigned_t a8,
- __psunsigned_t a9,
- __psunsigned_t a10)
-{
- ktrace_enter(xfs_inobt_trace_buf, (void *)(__psint_t)type,
- (void *)func, (void *)s, NULL, (void *)cur,
- (void *)a0, (void *)a1, (void *)a2, (void *)a3,
- (void *)a4, (void *)a5, (void *)a6, (void *)a7,
- (void *)a8, (void *)a9, (void *)a10);
-}
-
-STATIC void
-xfs_inobt_trace_cursor(
- struct xfs_btree_cur *cur,
- __uint32_t *s0,
- __uint64_t *l0,
- __uint64_t *l1)
-{
- *s0 = cur->bc_private.a.agno;
- *l0 = cur->bc_rec.i.ir_startino;
- *l1 = cur->bc_rec.i.ir_free;
-}
-
-STATIC void
-xfs_inobt_trace_key(
- struct xfs_btree_cur *cur,
- union xfs_btree_key *key,
- __uint64_t *l0,
- __uint64_t *l1)
-{
- *l0 = be32_to_cpu(key->inobt.ir_startino);
- *l1 = 0;
-}
-
-STATIC void
-xfs_inobt_trace_record(
- struct xfs_btree_cur *cur,
- union xfs_btree_rec *rec,
- __uint64_t *l0,
- __uint64_t *l1,
- __uint64_t *l2)
-{
- *l0 = be32_to_cpu(rec->inobt.ir_startino);
- *l1 = be32_to_cpu(rec->inobt.ir_freecount);
- *l2 = be64_to_cpu(rec->inobt.ir_free);
-}
-#endif /* XFS_BTREE_TRACE */
-
static const struct xfs_btree_ops xfs_inobt_ops = {
.rec_len = sizeof(xfs_inobt_rec_t),
.key_len = sizeof(xfs_inobt_key_t),
@@ -286,18 +219,10 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
.init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
.key_diff = xfs_inobt_key_diff,
-
#ifdef DEBUG
.keys_inorder = xfs_inobt_keys_inorder,
.recs_inorder = xfs_inobt_recs_inorder,
#endif
-
-#ifdef XFS_BTREE_TRACE
- .trace_enter = xfs_inobt_trace_enter,
- .trace_cursor = xfs_inobt_trace_cursor,
- .trace_key = xfs_inobt_trace_key,
- .trace_record = xfs_inobt_trace_record,
-#endif
};
/*
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 3631783b2b5..7759812c1bb 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -38,7 +38,6 @@
#include "xfs_trans_priv.h"
#include "xfs_inode_item.h"
#include "xfs_bmap.h"
-#include "xfs_btree_trace.h"
#include "xfs_trace.h"
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index a098a20ca63..3cc21ddf9f7 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -37,7 +37,6 @@
#include "xfs_buf_item.h"
#include "xfs_inode_item.h"
#include "xfs_btree.h"
-#include "xfs_btree_trace.h"
#include "xfs_alloc.h"
#include "xfs_ialloc.h"
#include "xfs_bmap.h"
@@ -52,7 +51,7 @@ kmem_zone_t *xfs_ifork_zone;
kmem_zone_t *xfs_inode_zone;
/*
- * Used in xfs_itruncate(). This is the maximum number of extents
+ * Used in xfs_itruncate_extents(). This is the maximum number of extents
* freed from a file in a single transaction.
*/
#define XFS_ITRUNC_MAX_EXTENTS 2
@@ -167,7 +166,7 @@ xfs_imap_to_bp(
dip = (xfs_dinode_t *)xfs_buf_offset(bp,
(i << mp->m_sb.sb_inodelog));
- di_ok = be16_to_cpu(dip->di_magic) == XFS_DINODE_MAGIC &&
+ di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
XFS_DINODE_GOOD_VERSION(dip->di_version);
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP,
@@ -802,7 +801,7 @@ xfs_iread(
* If we got something that isn't an inode it means someone
* (nfs or dmi) has a stale handle.
*/
- if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) {
+ if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC)) {
#ifdef DEBUG
xfs_alert(mp,
"%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)",
@@ -1179,15 +1178,15 @@ xfs_ialloc(
* at least do it for regular files.
*/
#ifdef DEBUG
-void
+STATIC void
xfs_isize_check(
- xfs_mount_t *mp,
- xfs_inode_t *ip,
- xfs_fsize_t isize)
+ struct xfs_inode *ip,
+ xfs_fsize_t isize)
{
- xfs_fileoff_t map_first;
- int nimaps;
- xfs_bmbt_irec_t imaps[2];
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t map_first;
+ int nimaps;
+ xfs_bmbt_irec_t imaps[2];
if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
return;
@@ -1214,168 +1213,14 @@ xfs_isize_check(
ASSERT(nimaps == 1);
ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
}
+#else /* DEBUG */
+#define xfs_isize_check(ip, isize)
#endif /* DEBUG */
/*
- * Calculate the last possible buffered byte in a file. This must
- * include data that was buffered beyond the EOF by the write code.
- * This also needs to deal with overflowing the xfs_fsize_t type
- * which can happen for sizes near the limit.
- *
- * We also need to take into account any blocks beyond the EOF. It
- * may be the case that they were buffered by a write which failed.
- * In that case the pages will still be in memory, but the inode size
- * will never have been updated.
- */
-STATIC xfs_fsize_t
-xfs_file_last_byte(
- xfs_inode_t *ip)
-{
- xfs_mount_t *mp;
- xfs_fsize_t last_byte;
- xfs_fileoff_t last_block;
- xfs_fileoff_t size_last_block;
- int error;
-
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED));
-
- mp = ip->i_mount;
- /*
- * Only check for blocks beyond the EOF if the extents have
- * been read in. This eliminates the need for the inode lock,
- * and it also saves us from looking when it really isn't
- * necessary.
- */
- if (ip->i_df.if_flags & XFS_IFEXTENTS) {
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- error = xfs_bmap_last_offset(NULL, ip, &last_block,
- XFS_DATA_FORK);
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
- if (error) {
- last_block = 0;
- }
- } else {
- last_block = 0;
- }
- size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
- last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
-
- last_byte = XFS_FSB_TO_B(mp, last_block);
- if (last_byte < 0) {
- return XFS_MAXIOFFSET(mp);
- }
- last_byte += (1 << mp->m_writeio_log);
- if (last_byte < 0) {
- return XFS_MAXIOFFSET(mp);
- }
- return last_byte;
-}
-
-/*
- * Start the truncation of the file to new_size. The new size
- * must be smaller than the current size. This routine will
- * clear the buffer and page caches of file data in the removed
- * range, and xfs_itruncate_finish() will remove the underlying
- * disk blocks.
- *
- * The inode must have its I/O lock locked EXCLUSIVELY, and it
- * must NOT have the inode lock held at all. This is because we're
- * calling into the buffer/page cache code and we can't hold the
- * inode lock when we do so.
- *
- * We need to wait for any direct I/Os in flight to complete before we
- * proceed with the truncate. This is needed to prevent the extents
- * being read or written by the direct I/Os from being removed while the
- * I/O is in flight as there is no other method of synchronising
- * direct I/O with the truncate operation. Also, because we hold
- * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
- * started until the truncate completes and drops the lock. Essentially,
- * the xfs_ioend_wait() call forms an I/O barrier that provides strict
- * ordering between direct I/Os and the truncate operation.
- *
- * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
- * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
- * in the case that the caller is locking things out of order and
- * may not be able to call xfs_itruncate_finish() with the inode lock
- * held without dropping the I/O lock. If the caller must drop the
- * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
- * must be called again with all the same restrictions as the initial
- * call.
- */
-int
-xfs_itruncate_start(
- xfs_inode_t *ip,
- uint flags,
- xfs_fsize_t new_size)
-{
- xfs_fsize_t last_byte;
- xfs_off_t toss_start;
- xfs_mount_t *mp;
- int error = 0;
-
- ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
- ASSERT((new_size == 0) || (new_size <= ip->i_size));
- ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
- (flags == XFS_ITRUNC_MAYBE));
-
- mp = ip->i_mount;
-
- /* wait for the completion of any pending DIOs */
- if (new_size == 0 || new_size < ip->i_size)
- xfs_ioend_wait(ip);
-
- /*
- * Call toss_pages or flushinval_pages to get rid of pages
- * overlapping the region being removed. We have to use
- * the less efficient flushinval_pages in the case that the
- * caller may not be able to finish the truncate without
- * dropping the inode's I/O lock. Make sure
- * to catch any pages brought in by buffers overlapping
- * the EOF by searching out beyond the isize by our
- * block size. We round new_size up to a block boundary
- * so that we don't toss things on the same block as
- * new_size but before it.
- *
- * Before calling toss_page or flushinval_pages, make sure to
- * call remapf() over the same region if the file is mapped.
- * This frees up mapped file references to the pages in the
- * given range and for the flushinval_pages case it ensures
- * that we get the latest mapped changes flushed out.
- */
- toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
- toss_start = XFS_FSB_TO_B(mp, toss_start);
- if (toss_start < 0) {
- /*
- * The place to start tossing is beyond our maximum
- * file size, so there is no way that the data extended
- * out there.
- */
- return 0;
- }
- last_byte = xfs_file_last_byte(ip);
- trace_xfs_itruncate_start(ip, new_size, flags, toss_start, last_byte);
- if (last_byte > toss_start) {
- if (flags & XFS_ITRUNC_DEFINITE) {
- xfs_tosspages(ip, toss_start,
- -1, FI_REMAPF_LOCKED);
- } else {
- error = xfs_flushinval_pages(ip, toss_start,
- -1, FI_REMAPF_LOCKED);
- }
- }
-
-#ifdef DEBUG
- if (new_size == 0) {
- ASSERT(VN_CACHED(VFS_I(ip)) == 0);
- }
-#endif
- return error;
-}
-
-/*
- * Shrink the file to the given new_size. The new size must be smaller than
- * the current size. This will free up the underlying blocks in the removed
- * range after a call to xfs_itruncate_start() or xfs_atruncate_start().
+ * Free up the underlying blocks past new_size. The new size must be smaller
+ * than the current size. This routine can be used both for the attribute and
+ * data fork, and does not modify the inode size, which is left to the caller.
*
* The transaction passed to this routine must have made a permanent log
* reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
@@ -1387,31 +1232,6 @@ xfs_itruncate_start(
* will be "held" within the returned transaction. This routine does NOT
* require any disk space to be reserved for it within the transaction.
*
- * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it
- * indicates the fork which is to be truncated. For the attribute fork we only
- * support truncation to size 0.
- *
- * We use the sync parameter to indicate whether or not the first transaction
- * we perform might have to be synchronous. For the attr fork, it needs to be
- * so if the unlink of the inode is not yet known to be permanent in the log.
- * This keeps us from freeing and reusing the blocks of the attribute fork
- * before the unlink of the inode becomes permanent.
- *
- * For the data fork, we normally have to run synchronously if we're being
- * called out of the inactive path or we're being called out of the create path
- * where we're truncating an existing file. Either way, the truncate needs to
- * be sync so blocks don't reappear in the file with altered data in case of a
- * crash. wsync filesystems can run the first case async because anything that
- * shrinks the inode has to run sync so by the time we're called here from
- * inactive, the inode size is permanently set to 0.
- *
- * Calls from the truncate path always need to be sync unless we're in a wsync
- * filesystem and the file has already been unlinked.
- *
- * The caller is responsible for correctly setting the sync parameter. It gets
- * too hard for us to guess here which path we're being called out of just
- * based on inode state.
- *
* If we get an error, we must return with the inode locked and linked into the
* current transaction. This keeps things simple for the higher level code,
* because it always knows that the inode is locked and held in the transaction
@@ -1419,124 +1239,30 @@ xfs_itruncate_start(
* dirty on error so that transactions can be easily aborted if possible.
*/
int
-xfs_itruncate_finish(
- xfs_trans_t **tp,
- xfs_inode_t *ip,
- xfs_fsize_t new_size,
- int fork,
- int sync)
+xfs_itruncate_extents(
+ struct xfs_trans **tpp,
+ struct xfs_inode *ip,
+ int whichfork,
+ xfs_fsize_t new_size)
{
- xfs_fsblock_t first_block;
- xfs_fileoff_t first_unmap_block;
- xfs_fileoff_t last_block;
- xfs_filblks_t unmap_len=0;
- xfs_mount_t *mp;
- xfs_trans_t *ntp;
- int done;
- int committed;
- xfs_bmap_free_t free_list;
- int error;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp = *tpp;
+ struct xfs_trans *ntp;
+ xfs_bmap_free_t free_list;
+ xfs_fsblock_t first_block;
+ xfs_fileoff_t first_unmap_block;
+ xfs_fileoff_t last_block;
+ xfs_filblks_t unmap_len;
+ int committed;
+ int error = 0;
+ int done = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
- ASSERT((new_size == 0) || (new_size <= ip->i_size));
- ASSERT(*tp != NULL);
- ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
- ASSERT(ip->i_transp == *tp);
+ ASSERT(new_size <= ip->i_size);
+ ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
ASSERT(ip->i_itemp != NULL);
ASSERT(ip->i_itemp->ili_lock_flags == 0);
-
-
- ntp = *tp;
- mp = (ntp)->t_mountp;
- ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
-
- /*
- * We only support truncating the entire attribute fork.
- */
- if (fork == XFS_ATTR_FORK) {
- new_size = 0LL;
- }
- first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
- trace_xfs_itruncate_finish_start(ip, new_size);
-
- /*
- * The first thing we do is set the size to new_size permanently
- * on disk. This way we don't have to worry about anyone ever
- * being able to look at the data being freed even in the face
- * of a crash. What we're getting around here is the case where
- * we free a block, it is allocated to another file, it is written
- * to, and then we crash. If the new data gets written to the
- * file but the log buffers containing the free and reallocation
- * don't, then we'd end up with garbage in the blocks being freed.
- * As long as we make the new_size permanent before actually
- * freeing any blocks it doesn't matter if they get written to.
- *
- * The callers must signal into us whether or not the size
- * setting here must be synchronous. There are a few cases
- * where it doesn't have to be synchronous. Those cases
- * occur if the file is unlinked and we know the unlink is
- * permanent or if the blocks being truncated are guaranteed
- * to be beyond the inode eof (regardless of the link count)
- * and the eof value is permanent. Both of these cases occur
- * only on wsync-mounted filesystems. In those cases, we're
- * guaranteed that no user will ever see the data in the blocks
- * that are being truncated so the truncate can run async.
- * In the free beyond eof case, the file may wind up with
- * more blocks allocated to it than it needs if we crash
- * and that won't get fixed until the next time the file
- * is re-opened and closed but that's ok as that shouldn't
- * be too many blocks.
- *
- * However, we can't just make all wsync xactions run async
- * because there's one call out of the create path that needs
- * to run sync where it's truncating an existing file to size
- * 0 whose size is > 0.
- *
- * It's probably possible to come up with a test in this
- * routine that would correctly distinguish all the above
- * cases from the values of the function parameters and the
- * inode state but for sanity's sake, I've decided to let the
- * layers above just tell us. It's simpler to correctly figure
- * out in the layer above exactly under what conditions we
- * can run async and I think it's easier for others read and
- * follow the logic in case something has to be changed.
- * cscope is your friend -- rcc.
- *
- * The attribute fork is much simpler.
- *
- * For the attribute fork we allow the caller to tell us whether
- * the unlink of the inode that led to this call is yet permanent
- * in the on disk log. If it is not and we will be freeing extents
- * in this inode then we make the first transaction synchronous
- * to make sure that the unlink is permanent by the time we free
- * the blocks.
- */
- if (fork == XFS_DATA_FORK) {
- if (ip->i_d.di_nextents > 0) {
- /*
- * If we are not changing the file size then do
- * not update the on-disk file size - we may be
- * called from xfs_inactive_free_eofblocks(). If we
- * update the on-disk file size and then the system
- * crashes before the contents of the file are
- * flushed to disk then the files may be full of
- * holes (ie NULL files bug).
- */
- if (ip->i_size != new_size) {
- ip->i_d.di_size = new_size;
- ip->i_size = new_size;
- xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
- }
- }
- } else if (sync) {
- ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
- if (ip->i_d.di_anextents > 0)
- xfs_trans_set_sync(ntp);
- }
- ASSERT(fork == XFS_DATA_FORK ||
- (fork == XFS_ATTR_FORK &&
- ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
- (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
+ ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
/*
* Since it is possible for space to become allocated beyond
@@ -1547,128 +1273,142 @@ xfs_itruncate_finish(
* beyond the maximum file size (ie it is the same as last_block),
* then there is nothing to do.
*/
+ first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
- ASSERT(first_unmap_block <= last_block);
- done = 0;
- if (last_block == first_unmap_block) {
- done = 1;
- } else {
- unmap_len = last_block - first_unmap_block + 1;
- }
+ if (first_unmap_block == last_block)
+ return 0;
+
+ ASSERT(first_unmap_block < last_block);
+ unmap_len = last_block - first_unmap_block + 1;
while (!done) {
- /*
- * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi()
- * will tell us whether it freed the entire range or
- * not. If this is a synchronous mount (wsync),
- * then we can tell bunmapi to keep all the
- * transactions asynchronous since the unlink
- * transaction that made this inode inactive has
- * already hit the disk. There's no danger of
- * the freed blocks being reused, there being a
- * crash, and the reused blocks suddenly reappearing
- * in this file with garbage in them once recovery
- * runs.
- */
xfs_bmap_init(&free_list, &first_block);
- error = xfs_bunmapi(ntp, ip,
+ error = xfs_bunmapi(tp, ip,
first_unmap_block, unmap_len,
- xfs_bmapi_aflag(fork),
+ xfs_bmapi_aflag(whichfork),
XFS_ITRUNC_MAX_EXTENTS,
&first_block, &free_list,
&done);
- if (error) {
- /*
- * If the bunmapi call encounters an error,
- * return to the caller where the transaction
- * can be properly aborted. We just need to
- * make sure we're not holding any resources
- * that we were not when we came in.
- */
- xfs_bmap_cancel(&free_list);
- return error;
- }
+ if (error)
+ goto out_bmap_cancel;
/*
* Duplicate the transaction that has the permanent
* reservation and commit the old transaction.
*/
- error = xfs_bmap_finish(tp, &free_list, &committed);
- ntp = *tp;
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
if (committed)
- xfs_trans_ijoin(ntp, ip);
-
- if (error) {
- /*
- * If the bmap finish call encounters an error, return
- * to the caller where the transaction can be properly
- * aborted. We just need to make sure we're not
- * holding any resources that we were not when we came
- * in.
- *
- * Aborting from this point might lose some blocks in
- * the file system, but oh well.
- */
- xfs_bmap_cancel(&free_list);
- return error;
- }
+ xfs_trans_ijoin(tp, ip);
+ if (error)
+ goto out_bmap_cancel;
if (committed) {
/*
* Mark the inode dirty so it will be logged and
* moved forward in the log as part of every commit.
*/
- xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
- ntp = xfs_trans_dup(ntp);
- error = xfs_trans_commit(*tp, 0);
- *tp = ntp;
+ ntp = xfs_trans_dup(tp);
+ error = xfs_trans_commit(tp, 0);
+ tp = ntp;
- xfs_trans_ijoin(ntp, ip);
+ xfs_trans_ijoin(tp, ip);
if (error)
- return error;
+ goto out;
+
/*
- * transaction commit worked ok so we can drop the extra ticket
+ * Transaction commit worked ok so we can drop the extra ticket
* reference that we gained in xfs_trans_dup()
*/
- xfs_log_ticket_put(ntp->t_ticket);
- error = xfs_trans_reserve(ntp, 0,
+ xfs_log_ticket_put(tp->t_ticket);
+ error = xfs_trans_reserve(tp, 0,
XFS_ITRUNCATE_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES,
XFS_ITRUNCATE_LOG_COUNT);
if (error)
- return error;
+ goto out;
}
+
+out:
+ *tpp = tp;
+ return error;
+out_bmap_cancel:
/*
- * Only update the size in the case of the data fork, but
- * always re-log the inode so that our permanent transaction
- * can keep on rolling it forward in the log.
+ * If the bunmapi call encounters an error, return to the caller where
+ * the transaction can be properly aborted. We just need to make sure
+ * we're not holding any resources that we were not when we came in.
*/
- if (fork == XFS_DATA_FORK) {
- xfs_isize_check(mp, ip, new_size);
+ xfs_bmap_cancel(&free_list);
+ goto out;
+}
+
+int
+xfs_itruncate_data(
+ struct xfs_trans **tpp,
+ struct xfs_inode *ip,
+ xfs_fsize_t new_size)
+{
+ int error;
+
+ trace_xfs_itruncate_data_start(ip, new_size);
+
+ /*
+ * The first thing we do is set the size to new_size permanently on
+ * disk. This way we don't have to worry about anyone ever being able
+ * to look at the data being freed even in the face of a crash.
+ * What we're getting around here is the case where we free a block, it
+ * is allocated to another file, it is written to, and then we crash.
+ * If the new data gets written to the file but the log buffers
+ * containing the free and reallocation don't, then we'd end up with
+ * garbage in the blocks being freed. As long as we make the new_size
+ * permanent before actually freeing any blocks it doesn't matter if
+ * they get written to.
+ */
+ if (ip->i_d.di_nextents > 0) {
/*
- * If we are not changing the file size then do
- * not update the on-disk file size - we may be
- * called from xfs_inactive_free_eofblocks(). If we
- * update the on-disk file size and then the system
- * crashes before the contents of the file are
- * flushed to disk then the files may be full of
- * holes (ie NULL files bug).
+ * If we are not changing the file size then do not update
+ * the on-disk file size - we may be called from
+ * xfs_inactive_free_eofblocks(). If we update the on-disk
+ * file size and then the system crashes before the contents
+ * of the file are flushed to disk then the files may be
+ * full of holes (ie NULL files bug).
*/
if (ip->i_size != new_size) {
ip->i_d.di_size = new_size;
ip->i_size = new_size;
+ xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
}
}
- xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
- ASSERT((new_size != 0) ||
- (fork == XFS_ATTR_FORK) ||
- (ip->i_delayed_blks == 0));
- ASSERT((new_size != 0) ||
- (fork == XFS_ATTR_FORK) ||
- (ip->i_d.di_nextents == 0));
- trace_xfs_itruncate_finish_end(ip, new_size);
+
+ error = xfs_itruncate_extents(tpp, ip, XFS_DATA_FORK, new_size);
+ if (error)
+ return error;
+
+ /*
+ * If we are not changing the file size then do not update the on-disk
+ * file size - we may be called from xfs_inactive_free_eofblocks().
+ * If we update the on-disk file size and then the system crashes
+ * before the contents of the file are flushed to disk then the files
+ * may be full of holes (ie NULL files bug).
+ */
+ xfs_isize_check(ip, new_size);
+ if (ip->i_size != new_size) {
+ ip->i_d.di_size = new_size;
+ ip->i_size = new_size;
+ }
+
+ ASSERT(new_size != 0 || ip->i_delayed_blks == 0);
+ ASSERT(new_size != 0 || ip->i_d.di_nextents == 0);
+
+ /*
+ * Always re-log the inode so that our permanent transaction can keep
+ * on rolling it forward in the log.
+ */
+ xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
+
+ trace_xfs_itruncate_data_end(ip, new_size);
return 0;
}
@@ -1694,7 +1434,6 @@ xfs_iunlink(
ASSERT(ip->i_d.di_nlink == 0);
ASSERT(ip->i_d.di_mode != 0);
- ASSERT(ip->i_transp == tp);
mp = tp->t_mountp;
@@ -1717,7 +1456,7 @@ xfs_iunlink(
ASSERT(agi->agi_unlinked[bucket_index]);
ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
- if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
+ if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
/*
* There is already another inode in the bucket we need
* to add ourselves to. Add us at the front of the list.
@@ -1728,8 +1467,7 @@ xfs_iunlink(
if (error)
return error;
- ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO);
- /* both on-disk, don't endian flip twice */
+ ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
offset = ip->i_imap.im_boffset +
offsetof(xfs_dinode_t, di_next_unlinked);
@@ -1794,7 +1532,7 @@ xfs_iunlink_remove(
agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
ASSERT(agino != 0);
bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
- ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
+ ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
ASSERT(agi->agi_unlinked[bucket_index]);
if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
@@ -1959,7 +1697,7 @@ xfs_ifree_cluster(
* stale first, we will not attempt to lock them in the loop
* below as the XFS_ISTALE flag will be set.
*/
- lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+ lip = bp->b_fspriv;
while (lip) {
if (lip->li_type == XFS_LI_INODE) {
iip = (xfs_inode_log_item_t *)lip;
@@ -2086,7 +1824,6 @@ xfs_ifree(
xfs_buf_t *ibp;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(ip->i_transp == tp);
ASSERT(ip->i_d.di_nlink == 0);
ASSERT(ip->i_d.di_nextents == 0);
ASSERT(ip->i_d.di_anextents == 0);
@@ -2733,7 +2470,7 @@ cluster_corrupt_out:
* mark the buffer as an error and call them. Otherwise
* mark it as stale and brelse.
*/
- if (XFS_BUF_IODONE_FUNC(bp)) {
+ if (bp->b_iodone) {
XFS_BUF_UNDONE(bp);
XFS_BUF_STALE(bp);
XFS_BUF_ERROR(bp,EIO);
@@ -2920,7 +2657,7 @@ xfs_iflush_int(
*/
xfs_synchronize_times(ip);
- if (XFS_TEST_ERROR(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC,
+ if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
"%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
@@ -3073,8 +2810,8 @@ xfs_iflush_int(
*/
xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
- ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
+ ASSERT(bp->b_fspriv != NULL);
+ ASSERT(bp->b_iodone != NULL);
} else {
/*
* We're flushing an inode which is not in the AIL and has
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 964cfea7768..a97644ab945 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -241,7 +241,6 @@ typedef struct xfs_inode {
xfs_ifork_t i_df; /* data fork */
/* Transaction and locking information. */
- struct xfs_trans *i_transp; /* ptr to owning transaction*/
struct xfs_inode_log_item *i_itemp; /* logging information */
mrlock_t i_lock; /* inode lock */
mrlock_t i_iolock; /* inode IO lock */
@@ -458,16 +457,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
extern struct lock_class_key xfs_iolock_reclaimable;
/*
- * Flags for xfs_itruncate_start().
- */
-#define XFS_ITRUNC_DEFINITE 0x1
-#define XFS_ITRUNC_MAYBE 0x2
-
-#define XFS_ITRUNC_FLAGS \
- { XFS_ITRUNC_DEFINITE, "DEFINITE" }, \
- { XFS_ITRUNC_MAYBE, "MAYBE" }
-
-/*
* For multiple groups support: if S_ISGID bit is set in the parent
* directory, group of new file is set to that of the parent, and
* new subdirectory gets S_ISGID bit from parent.
@@ -501,9 +490,10 @@ uint xfs_ip2xflags(struct xfs_inode *);
uint xfs_dic2xflags(struct xfs_dinode *);
int xfs_ifree(struct xfs_trans *, xfs_inode_t *,
struct xfs_bmap_free *);
-int xfs_itruncate_start(xfs_inode_t *, uint, xfs_fsize_t);
-int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *,
- xfs_fsize_t, int, int);
+int xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *,
+ int, xfs_fsize_t);
+int xfs_itruncate_data(struct xfs_trans **, struct xfs_inode *,
+ xfs_fsize_t);
int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
void xfs_iext_realloc(xfs_inode_t *, int, int);
@@ -579,13 +569,6 @@ void xfs_iext_irec_update_extoffs(xfs_ifork_t *, int, int);
#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
-#ifdef DEBUG
-void xfs_isize_check(struct xfs_mount *, struct xfs_inode *,
- xfs_fsize_t);
-#else /* DEBUG */
-#define xfs_isize_check(mp, ip, isize)
-#endif /* DEBUG */
-
#if defined(DEBUG)
void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
#else
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index b1e88d56069..588406dc6a3 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -632,13 +632,8 @@ xfs_inode_item_unlock(
struct xfs_inode *ip = iip->ili_inode;
unsigned short lock_flags;
- ASSERT(iip->ili_inode->i_itemp != NULL);
- ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
-
- /*
- * Clear the transaction pointer in the inode.
- */
- ip->i_transp = NULL;
+ ASSERT(ip->i_itemp != NULL);
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
/*
* If the inode needed a separate buffer with which to log
@@ -664,8 +659,8 @@ xfs_inode_item_unlock(
lock_flags = iip->ili_lock_flags;
iip->ili_lock_flags = 0;
if (lock_flags) {
- xfs_iunlock(iip->ili_inode, lock_flags);
- IRELE(iip->ili_inode);
+ xfs_iunlock(ip, lock_flags);
+ IRELE(ip);
}
}
@@ -879,7 +874,7 @@ xfs_iflush_done(
* Scan the buffer IO completions for other inodes being completed and
* attach them to the current inode log item.
*/
- blip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
+ blip = bp->b_fspriv;
prev = NULL;
while (blip != NULL) {
if (lip->li_cb != xfs_iflush_done) {
@@ -891,7 +886,7 @@ xfs_iflush_done(
/* remove from list */
next = blip->li_bio_list;
if (!prev) {
- XFS_BUF_SET_FSPRIVATE(bp, next);
+ bp->b_fspriv = next;
} else {
prev->li_bio_list = next;
}
diff --git a/fs/xfs/xfs_inum.h b/fs/xfs/xfs_inum.h
index b8e4ee4e89a..b253c0ea5be 100644
--- a/fs/xfs/xfs_inum.h
+++ b/fs/xfs/xfs_inum.h
@@ -28,17 +28,6 @@
typedef __uint32_t xfs_agino_t; /* within allocation grp inode number */
-/*
- * Useful inode bits for this kernel.
- * Used in some places where having 64-bits in the 32-bit kernels
- * costs too much.
- */
-#if XFS_BIG_INUMS
-typedef xfs_ino_t xfs_intino_t;
-#else
-typedef __uint32_t xfs_intino_t;
-#endif
-
#define NULLFSINO ((xfs_ino_t)-1)
#define NULLAGINO ((xfs_agino_t)-1)
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 41d5b8f2bf9..06ff8437ed8 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -871,15 +871,9 @@ xlog_space_left(
void
xlog_iodone(xfs_buf_t *bp)
{
- xlog_in_core_t *iclog;
- xlog_t *l;
- int aborted;
-
- iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
- ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long) 2);
- XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
- aborted = 0;
- l = iclog->ic_log;
+ xlog_in_core_t *iclog = bp->b_fspriv;
+ xlog_t *l = iclog->ic_log;
+ int aborted = 0;
/*
* Race to shutdown the filesystem if we see an error.
@@ -1056,10 +1050,9 @@ xlog_alloc_log(xfs_mount_t *mp,
bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
if (!bp)
goto out_free_log;
- XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
- XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
+ bp->b_iodone = xlog_iodone;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+ ASSERT(xfs_buf_islocked(bp));
log->l_xbuf = bp;
spin_lock_init(&log->l_icloglock);
@@ -1090,10 +1083,8 @@ xlog_alloc_log(xfs_mount_t *mp,
log->l_iclog_size, 0);
if (!bp)
goto out_free_iclog;
- if (!XFS_BUF_CPSEMA(bp))
- ASSERT(0);
- XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
- XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
+
+ bp->b_iodone = xlog_iodone;
iclog->ic_bp = bp;
iclog->ic_data = bp->b_addr;
#ifdef DEBUG
@@ -1118,7 +1109,7 @@ xlog_alloc_log(xfs_mount_t *mp,
iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
ASSERT(XFS_BUF_ISBUSY(iclog->ic_bp));
- ASSERT(XFS_BUF_VALUSEMA(iclog->ic_bp) <= 0);
+ ASSERT(xfs_buf_islocked(iclog->ic_bp));
init_waitqueue_head(&iclog->ic_force_wait);
init_waitqueue_head(&iclog->ic_write_wait);
@@ -1254,9 +1245,8 @@ STATIC int
xlog_bdstrat(
struct xfs_buf *bp)
{
- struct xlog_in_core *iclog;
+ struct xlog_in_core *iclog = bp->b_fspriv;
- iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
if (iclog->ic_state & XLOG_STATE_IOERROR) {
XFS_BUF_ERROR(bp, EIO);
XFS_BUF_STALE(bp);
@@ -1269,7 +1259,6 @@ xlog_bdstrat(
return 0;
}
- bp->b_flags |= _XBF_RUN_QUEUES;
xfs_buf_iorequest(bp);
return 0;
}
@@ -1351,8 +1340,6 @@ xlog_sync(xlog_t *log,
}
bp = iclog->ic_bp;
- ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) == (unsigned long)1);
- XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
@@ -1366,22 +1353,28 @@ xlog_sync(xlog_t *log,
iclog->ic_bwritecnt = 1;
}
XFS_BUF_SET_COUNT(bp, count);
- XFS_BUF_SET_FSPRIVATE(bp, iclog); /* save for later */
+ bp->b_fspriv = iclog;
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp);
XFS_BUF_ASYNC(bp);
- bp->b_flags |= XBF_LOG_BUFFER;
+ bp->b_flags |= XBF_SYNCIO;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
+ bp->b_flags |= XBF_FUA;
+
/*
- * If we have an external log device, flush the data device
- * before flushing the log to make sure all meta data
- * written back from the AIL actually made it to disk
- * before writing out the new log tail LSN in the log buffer.
+ * Flush the data device before flushing the log to make
+ * sure all meta data written back from the AIL actually made
+ * it to disk before stamping the new log tail LSN into the
+ * log buffer. For an external log we need to issue the
+ * flush explicitly, and unfortunately synchronously here;
+ * for an internal log we can simply use the block layer
+ * state machine for preflushes.
*/
if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
- XFS_BUF_ORDERED(bp);
+ else
+ bp->b_flags |= XBF_FLUSH;
}
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
@@ -1404,19 +1397,16 @@ xlog_sync(xlog_t *log,
}
if (split) {
bp = iclog->ic_log->l_xbuf;
- ASSERT(XFS_BUF_FSPRIVATE2(bp, unsigned long) ==
- (unsigned long)1);
- XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */
XFS_BUF_SET_PTR(bp, (xfs_caddr_t)((__psint_t)&(iclog->ic_header)+
(__psint_t)count), split);
- XFS_BUF_SET_FSPRIVATE(bp, iclog);
+ bp->b_fspriv = iclog;
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp);
XFS_BUF_ASYNC(bp);
- bp->b_flags |= XBF_LOG_BUFFER;
+ bp->b_flags |= XBF_SYNCIO;
if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
- XFS_BUF_ORDERED(bp);
+ bp->b_flags |= XBF_FUA;
dptr = XFS_BUF_PTR(bp);
/*
* Bump the cycle numbers at the start of each block
@@ -3521,13 +3511,13 @@ xlog_verify_iclog(xlog_t *log,
spin_unlock(&log->l_icloglock);
/* check log magic numbers */
- if (be32_to_cpu(iclog->ic_header.h_magicno) != XLOG_HEADER_MAGIC_NUM)
+ if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
ptr = (xfs_caddr_t) &iclog->ic_header;
for (ptr += BBSIZE; ptr < ((xfs_caddr_t)&iclog->ic_header) + count;
ptr += BBSIZE) {
- if (be32_to_cpu(*(__be32 *)ptr) == XLOG_HEADER_MAGIC_NUM)
+ if (*(__be32 *)ptr == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
xfs_emerg(log->l_mp, "%s: unexpected magic num",
__func__);
}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 04142caedb2..8fe4206de05 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -91,6 +91,8 @@ xlog_get_bp(
xlog_t *log,
int nbblks)
{
+ struct xfs_buf *bp;
+
if (!xlog_buf_bbcount_valid(log, nbblks)) {
xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
nbblks);
@@ -118,8 +120,10 @@ xlog_get_bp(
nbblks += log->l_sectBBsize;
nbblks = round_up(nbblks, log->l_sectBBsize);
- return xfs_buf_get_uncached(log->l_mp->m_logdev_targp,
- BBTOB(nbblks), 0);
+ bp = xfs_buf_get_uncached(log->l_mp->m_logdev_targp, BBTOB(nbblks), 0);
+ if (bp)
+ xfs_buf_unlock(bp);
+ return bp;
}
STATIC void
@@ -264,7 +268,7 @@ xlog_bwrite(
XFS_BUF_ZEROFLAGS(bp);
XFS_BUF_BUSY(bp);
XFS_BUF_HOLD(bp);
- XFS_BUF_PSEMA(bp, PRIBIO);
+ xfs_buf_lock(bp);
XFS_BUF_SET_COUNT(bp, BBTOB(nbblks));
XFS_BUF_SET_TARGET(bp, log->l_mp->m_logdev_targp);
@@ -300,14 +304,14 @@ xlog_header_check_recover(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
- ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
+ ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
/*
* IRIX doesn't write the h_fmt field and leaves it zeroed
* (XLOG_FMT_UNKNOWN). This stops us from trying to recover
* a dirty log created in IRIX.
*/
- if (unlikely(be32_to_cpu(head->h_fmt) != XLOG_FMT)) {
+ if (unlikely(head->h_fmt != cpu_to_be32(XLOG_FMT))) {
xfs_warn(mp,
"dirty log written in incompatible format - can't recover");
xlog_header_check_dump(mp, head);
@@ -333,7 +337,7 @@ xlog_header_check_mount(
xfs_mount_t *mp,
xlog_rec_header_t *head)
{
- ASSERT(be32_to_cpu(head->h_magicno) == XLOG_HEADER_MAGIC_NUM);
+ ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
if (uuid_is_nil(&head->h_fs_uuid)) {
/*
@@ -367,7 +371,7 @@ xlog_recover_iodone(
xfs_force_shutdown(bp->b_target->bt_mount,
SHUTDOWN_META_IO_ERROR);
}
- XFS_BUF_CLR_IODONE_FUNC(bp);
+ bp->b_iodone = NULL;
xfs_buf_ioend(bp, 0);
}
@@ -534,7 +538,7 @@ xlog_find_verify_log_record(
head = (xlog_rec_header_t *)offset;
- if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(head->h_magicno))
+ if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
break;
if (!smallmem)
@@ -916,7 +920,7 @@ xlog_find_tail(
if (error)
goto done;
- if (XLOG_HEADER_MAGIC_NUM == be32_to_cpu(*(__be32 *)offset)) {
+ if (*(__be32 *)offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
found = 1;
break;
}
@@ -933,8 +937,8 @@ xlog_find_tail(
if (error)
goto done;
- if (XLOG_HEADER_MAGIC_NUM ==
- be32_to_cpu(*(__be32 *)offset)) {
+ if (*(__be32 *)offset ==
+ cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
found = 2;
break;
}
@@ -1947,7 +1951,7 @@ xfs_qm_dqcheck(
* This is all fine; things are still consistent, and we haven't lost
* any quota information. Just don't complain about bad dquot blks.
*/
- if (be16_to_cpu(ddq->d_magic) != XFS_DQUOT_MAGIC) {
+ if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) {
if (flags & XFS_QMOPT_DOWARN)
xfs_alert(mp,
"%s : XFS dquot ID 0x%x, magic 0x%x != 0x%x",
@@ -2174,7 +2178,7 @@ xlog_recover_buffer_pass2(
error = xfs_bwrite(mp, bp);
} else {
ASSERT(bp->b_target->bt_mount == mp);
- XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
+ bp->b_iodone = xlog_recover_iodone;
xfs_bdwrite(mp, bp);
}
@@ -2238,7 +2242,7 @@ xlog_recover_inode_pass2(
* Make sure the place we're flushing out to really looks
* like an inode!
*/
- if (unlikely(be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC)) {
+ if (unlikely(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))) {
xfs_buf_relse(bp);
xfs_alert(mp,
"%s: Bad inode magic number, dip = 0x%p, dino bp = 0x%p, ino = %Ld",
@@ -2434,7 +2438,7 @@ xlog_recover_inode_pass2(
write_inode_buffer:
ASSERT(bp->b_target->bt_mount == mp);
- XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
+ bp->b_iodone = xlog_recover_iodone;
xfs_bdwrite(mp, bp);
error:
if (need_free)
@@ -2556,7 +2560,7 @@ xlog_recover_dquot_pass2(
ASSERT(dq_f->qlf_size == 2);
ASSERT(bp->b_target->bt_mount == mp);
- XFS_BUF_SET_IODONE_FUNC(bp, xlog_recover_iodone);
+ bp->b_iodone = xlog_recover_iodone;
xfs_bdwrite(mp, bp);
return (0);
@@ -3295,7 +3299,7 @@ xlog_valid_rec_header(
{
int hlen;
- if (unlikely(be32_to_cpu(rhead->h_magicno) != XLOG_HEADER_MAGIC_NUM)) {
+ if (unlikely(rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))) {
XFS_ERROR_REPORT("xlog_valid_rec_header(1)",
XFS_ERRLEVEL_LOW, log->l_mp);
return XFS_ERROR(EFSCORRUPTED);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index b49b82363d2..7f25245da28 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -348,7 +348,7 @@ xfs_mount_validate_sb(
}
/*
- * More sanity checking. These were stolen directly from
+ * More sanity checking. Most of these were stolen directly from
* xfs_repair.
*/
if (unlikely(
@@ -371,23 +371,13 @@ xfs_mount_validate_sb(
(sbp->sb_blocklog - sbp->sb_inodelog != sbp->sb_inopblog) ||
(sbp->sb_rextsize * sbp->sb_blocksize > XFS_MAX_RTEXTSIZE) ||
(sbp->sb_rextsize * sbp->sb_blocksize < XFS_MIN_RTEXTSIZE) ||
- (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */))) {
+ (sbp->sb_imax_pct > 100 /* zero sb_imax_pct is valid */) ||
+ sbp->sb_dblocks == 0 ||
+ sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) ||
+ sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) {
if (loud)
- xfs_warn(mp, "SB sanity check 1 failed");
- return XFS_ERROR(EFSCORRUPTED);
- }
-
- /*
- * Sanity check AG count, size fields against data size field
- */
- if (unlikely(
- sbp->sb_dblocks == 0 ||
- sbp->sb_dblocks >
- (xfs_drfsbno_t)sbp->sb_agcount * sbp->sb_agblocks ||
- sbp->sb_dblocks < (xfs_drfsbno_t)(sbp->sb_agcount - 1) *
- sbp->sb_agblocks + XFS_MIN_AG_BLOCKS)) {
- if (loud)
- xfs_warn(mp, "SB sanity check 2 failed");
+ XFS_CORRUPTION_ERROR("SB sanity check failed",
+ XFS_ERRLEVEL_LOW, mp, sbp);
return XFS_ERROR(EFSCORRUPTED);
}
@@ -864,7 +854,8 @@ xfs_update_alignment(xfs_mount_t *mp)
if ((BBTOB(mp->m_dalign) & mp->m_blockmask) ||
(BBTOB(mp->m_swidth) & mp->m_blockmask)) {
if (mp->m_flags & XFS_MOUNT_RETERR) {
- xfs_warn(mp, "alignment check 1 failed");
+ xfs_warn(mp, "alignment check failed: "
+ "(sunit/swidth vs. blocksize)");
return XFS_ERROR(EINVAL);
}
mp->m_dalign = mp->m_swidth = 0;
@@ -875,6 +866,8 @@ xfs_update_alignment(xfs_mount_t *mp)
mp->m_dalign = XFS_BB_TO_FSBT(mp, mp->m_dalign);
if (mp->m_dalign && (sbp->sb_agblocks % mp->m_dalign)) {
if (mp->m_flags & XFS_MOUNT_RETERR) {
+ xfs_warn(mp, "alignment check failed: "
+ "(sunit/swidth vs. ag size)");
return XFS_ERROR(EINVAL);
}
xfs_warn(mp,
@@ -889,8 +882,8 @@ xfs_update_alignment(xfs_mount_t *mp)
mp->m_swidth = XFS_BB_TO_FSBT(mp, mp->m_swidth);
} else {
if (mp->m_flags & XFS_MOUNT_RETERR) {
- xfs_warn(mp,
- "stripe alignment turned off: sunit(%d) less than bsize(%d)",
+ xfs_warn(mp, "alignment check failed: "
+ "sunit(%d) less than bsize(%d)",
mp->m_dalign,
mp->m_blockmask +1);
return XFS_ERROR(EINVAL);
@@ -1096,10 +1089,6 @@ xfs_mount_reset_sbqflags(
if (mp->m_flags & XFS_MOUNT_RDONLY)
return 0;
-#ifdef QUOTADEBUG
- xfs_notice(mp, "Writing superblock quota changes");
-#endif
-
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
XFS_DEFAULT_LOG_COUNT);
@@ -1532,7 +1521,7 @@ xfs_unmountfs(
xfs_warn(mp, "Unable to free reserved block pool. "
"Freespace may not be correct on next mount.");
- error = xfs_log_sbcount(mp, 1);
+ error = xfs_log_sbcount(mp);
if (error)
xfs_warn(mp, "Unable to update superblock counters. "
"Freespace may not be correct on next mount.");
@@ -1568,18 +1557,14 @@ xfs_fs_writable(xfs_mount_t *mp)
/*
* xfs_log_sbcount
*
- * Called either periodically to keep the on disk superblock values
- * roughly up to date or from unmount to make sure the values are
- * correct on a clean unmount.
+ * Sync the superblock counters to disk.
*
* Note this code can be called during the process of freezing, so
- * we may need to use the transaction allocator which does not not
+ * we may need to use the transaction allocator which does not
* block when the transaction subsystem is in its frozen state.
*/
int
-xfs_log_sbcount(
- xfs_mount_t *mp,
- uint sync)
+xfs_log_sbcount(xfs_mount_t *mp)
{
xfs_trans_t *tp;
int error;
@@ -1605,8 +1590,7 @@ xfs_log_sbcount(
}
xfs_mod_sb(tp, XFS_SB_IFREE | XFS_SB_ICOUNT | XFS_SB_FDBLOCKS);
- if (sync)
- xfs_trans_set_sync(tp);
+ xfs_trans_set_sync(tp);
error = xfs_trans_commit(tp, 0);
return error;
}
@@ -1941,22 +1925,19 @@ unwind:
* the superblock buffer if it can be locked without sleeping.
* If it can't then we'll return NULL.
*/
-xfs_buf_t *
+struct xfs_buf *
xfs_getsb(
- xfs_mount_t *mp,
- int flags)
+ struct xfs_mount *mp,
+ int flags)
{
- xfs_buf_t *bp;
+ struct xfs_buf *bp = mp->m_sb_bp;
- ASSERT(mp->m_sb_bp != NULL);
- bp = mp->m_sb_bp;
- if (flags & XBF_TRYLOCK) {
- if (!XFS_BUF_CPSEMA(bp)) {
+ if (!xfs_buf_trylock(bp)) {
+ if (flags & XBF_TRYLOCK)
return NULL;
- }
- } else {
- XFS_BUF_PSEMA(bp, PRIBIO);
+ xfs_buf_lock(bp);
}
+
XFS_BUF_HOLD(bp);
ASSERT(XFS_BUF_ISDONE(bp));
return bp;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 3d68bb267c5..bb24dac42a2 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -371,7 +371,7 @@ typedef struct xfs_mod_sb {
int64_t msb_delta; /* Change to make to specified field */
} xfs_mod_sb_t;
-extern int xfs_log_sbcount(xfs_mount_t *, uint);
+extern int xfs_log_sbcount(xfs_mount_t *);
extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
extern int xfs_mountfs(xfs_mount_t *mp);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index c83f63b33aa..efc147f0e9b 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -1426,6 +1426,7 @@ xfs_trans_committed(
static inline void
xfs_log_item_batch_insert(
struct xfs_ail *ailp,
+ struct xfs_ail_cursor *cur,
struct xfs_log_item **log_items,
int nr_items,
xfs_lsn_t commit_lsn)
@@ -1434,7 +1435,7 @@ xfs_log_item_batch_insert(
spin_lock(&ailp->xa_lock);
/* xfs_trans_ail_update_bulk drops ailp->xa_lock */
- xfs_trans_ail_update_bulk(ailp, log_items, nr_items, commit_lsn);
+ xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
for (i = 0; i < nr_items; i++)
IOP_UNPIN(log_items[i], 0);
@@ -1452,6 +1453,13 @@ xfs_log_item_batch_insert(
* as an iclog write error even though we haven't started any IO yet. Hence in
* this case all we need to do is IOP_COMMITTED processing, followed by an
* IOP_UNPIN(aborted) call.
+ *
+ * The AIL cursor is used to optimise the insert process. If commit_lsn is not
+ * at the end of the AIL, the insert cursor avoids the need to walk
+ * the AIL to find the insertion point on every xfs_log_item_batch_insert()
+ * call. This saves a lot of needless list walking and is a net win, even
+ * though it slightly increases that amount of AIL lock traffic to set it up
+ * and tear it down.
*/
void
xfs_trans_committed_bulk(
@@ -1463,8 +1471,13 @@ xfs_trans_committed_bulk(
#define LOG_ITEM_BATCH_SIZE 32
struct xfs_log_item *log_items[LOG_ITEM_BATCH_SIZE];
struct xfs_log_vec *lv;
+ struct xfs_ail_cursor cur;
int i = 0;
+ spin_lock(&ailp->xa_lock);
+ xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
+ spin_unlock(&ailp->xa_lock);
+
/* unpin all the log items */
for (lv = log_vector; lv; lv = lv->lv_next ) {
struct xfs_log_item *lip = lv->lv_item;
@@ -1493,7 +1506,9 @@ xfs_trans_committed_bulk(
/*
* Not a bulk update option due to unusual item_lsn.
* Push into AIL immediately, rechecking the lsn once
- * we have the ail lock. Then unpin the item.
+ * we have the ail lock. Then unpin the item. This does
+ * not affect the AIL cursor the bulk insert path is
+ * using.
*/
spin_lock(&ailp->xa_lock);
if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
@@ -1507,7 +1522,7 @@ xfs_trans_committed_bulk(
/* Item is a candidate for bulk AIL insert. */
log_items[i++] = lv->lv_item;
if (i >= LOG_ITEM_BATCH_SIZE) {
- xfs_log_item_batch_insert(ailp, log_items,
+ xfs_log_item_batch_insert(ailp, &cur, log_items,
LOG_ITEM_BATCH_SIZE, commit_lsn);
i = 0;
}
@@ -1515,7 +1530,11 @@ xfs_trans_committed_bulk(
/* make sure we insert the remainder! */
if (i)
- xfs_log_item_batch_insert(ailp, log_items, i, commit_lsn);
+ xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
+
+ spin_lock(&ailp->xa_lock);
+ xfs_trans_ail_cursor_done(ailp, &cur);
+ spin_unlock(&ailp->xa_lock);
}
/*
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 5fc2380092c..43233e92f0f 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -163,17 +163,11 @@ xfs_ail_max_lsn(
}
/*
- * AIL traversal cursor initialisation.
- *
- * The cursor keeps track of where our current traversal is up
- * to by tracking the next ƣtem in the list for us. However, for
- * this to be safe, removing an object from the AIL needs to invalidate
- * any cursor that points to it. hence the traversal cursor needs to
- * be linked to the struct xfs_ail so that deletion can search all the
- * active cursors for invalidation.
- *
- * We don't link the push cursor because it is embedded in the struct
- * xfs_ail and hence easily findable.
+ * The cursor keeps track of where our current traversal is up to by tracking
+ * the next item in the list for us. However, for this to be safe, removing an
+ * object from the AIL needs to invalidate any cursor that points to it. hence
+ * the traversal cursor needs to be linked to the struct xfs_ail so that
+ * deletion can search all the active cursors for invalidation.
*/
STATIC void
xfs_trans_ail_cursor_init(
@@ -181,31 +175,12 @@ xfs_trans_ail_cursor_init(
struct xfs_ail_cursor *cur)
{
cur->item = NULL;
- if (cur == &ailp->xa_cursors)
- return;
-
- cur->next = ailp->xa_cursors.next;
- ailp->xa_cursors.next = cur;
-}
-
-/*
- * Set the cursor to the next item, because when we look
- * up the cursor the current item may have been freed.
- */
-STATIC void
-xfs_trans_ail_cursor_set(
- struct xfs_ail *ailp,
- struct xfs_ail_cursor *cur,
- struct xfs_log_item *lip)
-{
- if (lip)
- cur->item = xfs_ail_next(ailp, lip);
+ list_add_tail(&cur->list, &ailp->xa_cursors);
}
/*
- * Get the next item in the traversal and advance the cursor.
- * If the cursor was invalidated (inidicated by a lip of 1),
- * restart the traversal.
+ * Get the next item in the traversal and advance the cursor. If the cursor
+ * was invalidated (indicated by a lip of 1), restart the traversal.
*/
struct xfs_log_item *
xfs_trans_ail_cursor_next(
@@ -216,45 +191,31 @@ xfs_trans_ail_cursor_next(
if ((__psint_t)lip & 1)
lip = xfs_ail_min(ailp);
- xfs_trans_ail_cursor_set(ailp, cur, lip);
+ if (lip)
+ cur->item = xfs_ail_next(ailp, lip);
return lip;
}
/*
- * Now that the traversal is complete, we need to remove the cursor
- * from the list of traversing cursors. Avoid removing the embedded
- * push cursor, but use the fact it is always present to make the
- * list deletion simple.
+ * When the traversal is complete, we need to remove the cursor from the list
+ * of traversing cursors.
*/
void
xfs_trans_ail_cursor_done(
struct xfs_ail *ailp,
- struct xfs_ail_cursor *done)
+ struct xfs_ail_cursor *cur)
{
- struct xfs_ail_cursor *prev = NULL;
- struct xfs_ail_cursor *cur;
-
- done->item = NULL;
- if (done == &ailp->xa_cursors)
- return;
- prev = &ailp->xa_cursors;
- for (cur = prev->next; cur; prev = cur, cur = prev->next) {
- if (cur == done) {
- prev->next = cur->next;
- break;
- }
- }
- ASSERT(cur);
+ cur->item = NULL;
+ list_del_init(&cur->list);
}
/*
- * Invalidate any cursor that is pointing to this item. This is
- * called when an item is removed from the AIL. Any cursor pointing
- * to this object is now invalid and the traversal needs to be
- * terminated so it doesn't reference a freed object. We set the
- * cursor item to a value of 1 so we can distinguish between an
- * invalidation and the end of the list when getting the next item
- * from the cursor.
+ * Invalidate any cursor that is pointing to this item. This is called when an
+ * item is removed from the AIL. Any cursor pointing to this object is now
+ * invalid and the traversal needs to be terminated so it doesn't reference a
+ * freed object. We set the low bit of the cursor item pointer so we can
+ * distinguish between an invalidation and the end of the list when getting the
+ * next item from the cursor.
*/
STATIC void
xfs_trans_ail_cursor_clear(
@@ -263,8 +224,7 @@ xfs_trans_ail_cursor_clear(
{
struct xfs_ail_cursor *cur;
- /* need to search all cursors */
- for (cur = &ailp->xa_cursors; cur; cur = cur->next) {
+ list_for_each_entry(cur, &ailp->xa_cursors, list) {
if (cur->item == lip)
cur->item = (struct xfs_log_item *)
((__psint_t)cur->item | 1);
@@ -272,9 +232,10 @@ xfs_trans_ail_cursor_clear(
}
/*
- * Return the item in the AIL with the current lsn.
- * Return the current tree generation number for use
- * in calls to xfs_trans_next_ail().
+ * Find the first item in the AIL with the given @lsn by searching in ascending
+ * LSN order and initialise the cursor to point to the next item for a
+ * ascending traversal. Pass a @lsn of zero to initialise the cursor to the
+ * first item in the AIL. Returns NULL if the list is empty.
*/
xfs_log_item_t *
xfs_trans_ail_cursor_first(
@@ -285,46 +246,112 @@ xfs_trans_ail_cursor_first(
xfs_log_item_t *lip;
xfs_trans_ail_cursor_init(ailp, cur);
- lip = xfs_ail_min(ailp);
- if (lsn == 0)
+
+ if (lsn == 0) {
+ lip = xfs_ail_min(ailp);
goto out;
+ }
list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
goto out;
}
- lip = NULL;
+ return NULL;
+
out:
- xfs_trans_ail_cursor_set(ailp, cur, lip);
+ if (lip)
+ cur->item = xfs_ail_next(ailp, lip);
return lip;
}
+static struct xfs_log_item *
+__xfs_trans_ail_cursor_last(
+ struct xfs_ail *ailp,
+ xfs_lsn_t lsn)
+{
+ xfs_log_item_t *lip;
+
+ list_for_each_entry_reverse(lip, &ailp->xa_ail, li_ail) {
+ if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
+ return lip;
+ }
+ return NULL;
+}
+
+/*
+ * Find the last item in the AIL with the given @lsn by searching in descending
+ * LSN order and initialise the cursor to point to that item. If there is no
+ * item with the value of @lsn, then it sets the cursor to the last item with an
+ * LSN lower than @lsn. Returns NULL if the list is empty.
+ */
+struct xfs_log_item *
+xfs_trans_ail_cursor_last(
+ struct xfs_ail *ailp,
+ struct xfs_ail_cursor *cur,
+ xfs_lsn_t lsn)
+{
+ xfs_trans_ail_cursor_init(ailp, cur);
+ cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
+ return cur->item;
+}
+
/*
- * splice the log item list into the AIL at the given LSN.
+ * Splice the log item list into the AIL at the given LSN. We splice to the
+ * tail of the given LSN to maintain insert order for push traversals. The
+ * cursor is optional, allowing repeated updates to the same LSN to avoid
+ * repeated traversals.
*/
static void
xfs_ail_splice(
- struct xfs_ail *ailp,
- struct list_head *list,
- xfs_lsn_t lsn)
+ struct xfs_ail *ailp,
+ struct xfs_ail_cursor *cur,
+ struct list_head *list,
+ xfs_lsn_t lsn)
{
- xfs_log_item_t *next_lip;
+ struct xfs_log_item *lip = cur ? cur->item : NULL;
+ struct xfs_log_item *next_lip;
- /* If the list is empty, just insert the item. */
- if (list_empty(&ailp->xa_ail)) {
- list_splice(list, &ailp->xa_ail);
- return;
+ /*
+ * Get a new cursor if we don't have a placeholder or the existing one
+ * has been invalidated.
+ */
+ if (!lip || (__psint_t)lip & 1) {
+ lip = __xfs_trans_ail_cursor_last(ailp, lsn);
+
+ if (!lip) {
+ /* The list is empty, so just splice and return. */
+ if (cur)
+ cur->item = NULL;
+ list_splice(list, &ailp->xa_ail);
+ return;
+ }
}
- list_for_each_entry_reverse(next_lip, &ailp->xa_ail, li_ail) {
- if (XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0)
- break;
+ /*
+ * Our cursor points to the item we want to insert _after_, so we have
+ * to update the cursor to point to the end of the list we are splicing
+ * in so that it points to the correct location for the next splice.
+ * i.e. before the splice
+ *
+ * lsn -> lsn -> lsn + x -> lsn + x ...
+ * ^
+ * | cursor points here
+ *
+ * After the splice we have:
+ *
+ * lsn -> lsn -> lsn -> lsn -> .... -> lsn -> lsn + x -> lsn + x ...
+ * ^ ^
+ * | cursor points here | needs to move here
+ *
+ * So we set the cursor to the last item in the list to be spliced
+ * before we execute the splice, resulting in the cursor pointing to
+ * the correct item after the splice occurs.
+ */
+ if (cur) {
+ next_lip = list_entry(list->prev, struct xfs_log_item, li_ail);
+ cur->item = next_lip;
}
-
- ASSERT(&next_lip->li_ail == &ailp->xa_ail ||
- XFS_LSN_CMP(next_lip->li_lsn, lsn) <= 0);
-
- list_splice_init(list, &next_lip->li_ail);
+ list_splice(list, &lip->li_ail);
}
/*
@@ -351,7 +378,7 @@ xfs_ail_worker(
struct xfs_ail *ailp = container_of(to_delayed_work(work),
struct xfs_ail, xa_work);
xfs_mount_t *mp = ailp->xa_mount;
- struct xfs_ail_cursor *cur = &ailp->xa_cursors;
+ struct xfs_ail_cursor cur;
xfs_log_item_t *lip;
xfs_lsn_t lsn;
xfs_lsn_t target;
@@ -363,13 +390,12 @@ xfs_ail_worker(
spin_lock(&ailp->xa_lock);
target = ailp->xa_target;
- xfs_trans_ail_cursor_init(ailp, cur);
- lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
+ lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->xa_last_pushed_lsn);
if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
/*
* AIL is empty or our push has reached the end.
*/
- xfs_trans_ail_cursor_done(ailp, cur);
+ xfs_trans_ail_cursor_done(ailp, &cur);
spin_unlock(&ailp->xa_lock);
goto out_done;
}
@@ -457,12 +483,12 @@ xfs_ail_worker(
if (stuck > 100)
break;
- lip = xfs_trans_ail_cursor_next(ailp, cur);
+ lip = xfs_trans_ail_cursor_next(ailp, &cur);
if (lip == NULL)
break;
lsn = lip->li_lsn;
}
- xfs_trans_ail_cursor_done(ailp, cur);
+ xfs_trans_ail_cursor_done(ailp, &cur);
spin_unlock(&ailp->xa_lock);
if (flush_log) {
@@ -645,6 +671,7 @@ xfs_trans_unlocked_item(
void
xfs_trans_ail_update_bulk(
struct xfs_ail *ailp,
+ struct xfs_ail_cursor *cur,
struct xfs_log_item **log_items,
int nr_items,
xfs_lsn_t lsn) __releases(ailp->xa_lock)
@@ -674,7 +701,7 @@ xfs_trans_ail_update_bulk(
list_add(&lip->li_ail, &tmp);
}
- xfs_ail_splice(ailp, &tmp, lsn);
+ xfs_ail_splice(ailp, cur, &tmp, lsn);
if (!mlip_changed) {
spin_unlock(&ailp->xa_lock);
@@ -793,6 +820,7 @@ xfs_trans_ail_init(
ailp->xa_mount = mp;
INIT_LIST_HEAD(&ailp->xa_ail);
+ INIT_LIST_HEAD(&ailp->xa_cursors);
spin_lock_init(&ailp->xa_lock);
INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
mp->m_ail = ailp;
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 03b3b7f85a3..15584fc3ed7 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -81,7 +81,7 @@ _xfs_trans_bjoin(
struct xfs_buf_log_item *bip;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
+ ASSERT(bp->b_transp == NULL);
/*
* The xfs_buf_log_item pointer is stored in b_fsprivate. If
@@ -89,7 +89,7 @@ _xfs_trans_bjoin(
* The checks to see if one is there are in xfs_buf_item_init().
*/
xfs_buf_item_init(bp, tp->t_mountp);
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+ bip = bp->b_fspriv;
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(!(bip->bli_flags & XFS_BLI_LOGGED));
@@ -110,7 +110,7 @@ _xfs_trans_bjoin(
* Initialize b_fsprivate2 so we can find it with incore_match()
* in xfs_trans_get_buf() and friends above.
*/
- XFS_BUF_SET_FSPRIVATE2(bp, tp);
+ bp->b_transp = tp;
}
@@ -160,7 +160,7 @@ xfs_trans_get_buf(xfs_trans_t *tp,
*/
bp = xfs_trans_buf_item_match(tp, target_dev, blkno, len);
if (bp != NULL) {
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
+ ASSERT(xfs_buf_islocked(bp));
if (XFS_FORCED_SHUTDOWN(tp->t_mountp))
XFS_BUF_SUPER_STALE(bp);
@@ -172,8 +172,8 @@ xfs_trans_get_buf(xfs_trans_t *tp,
else if (XFS_BUF_ISSTALE(bp))
ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+ ASSERT(bp->b_transp == tp);
+ bip = bp->b_fspriv;
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
@@ -232,8 +232,8 @@ xfs_trans_getsb(xfs_trans_t *tp,
* recursion count and return the buffer to the caller.
*/
bp = mp->m_sb_bp;
- if (XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp) {
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
+ if (bp->b_transp == tp) {
+ bip = bp->b_fspriv;
ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_recur++;
@@ -327,9 +327,9 @@ xfs_trans_read_buf(
*/
bp = xfs_trans_buf_item_match(tp, target, blkno, len);
if (bp != NULL) {
- ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+ ASSERT(xfs_buf_islocked(bp));
+ ASSERT(bp->b_transp == tp);
+ ASSERT(bp->b_fspriv != NULL);
ASSERT((XFS_BUF_ISERROR(bp)) == 0);
if (!(XFS_BUF_ISDONE(bp))) {
trace_xfs_trans_read_buf_io(bp, _RET_IP_);
@@ -363,7 +363,7 @@ xfs_trans_read_buf(
}
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t*);
+ bip = bp->b_fspriv;
bip->bli_recur++;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
@@ -460,32 +460,30 @@ xfs_trans_brelse(xfs_trans_t *tp,
xfs_buf_t *bp)
{
xfs_buf_log_item_t *bip;
- xfs_log_item_t *lip;
/*
* Default to a normal brelse() call if the tp is NULL.
*/
if (tp == NULL) {
- ASSERT(XFS_BUF_FSPRIVATE2(bp, void *) == NULL);
+ struct xfs_log_item *lip = bp->b_fspriv;
+
+ ASSERT(bp->b_transp == NULL);
+
/*
* If there's a buf log item attached to the buffer,
* then let the AIL know that the buffer is being
* unlocked.
*/
- if (XFS_BUF_FSPRIVATE(bp, void *) != NULL) {
- lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
- if (lip->li_type == XFS_LI_BUF) {
- bip = XFS_BUF_FSPRIVATE(bp,xfs_buf_log_item_t*);
- xfs_trans_unlocked_item(bip->bli_item.li_ailp,
- lip);
- }
+ if (lip != NULL && lip->li_type == XFS_LI_BUF) {
+ bip = bp->b_fspriv;
+ xfs_trans_unlocked_item(bip->bli_item.li_ailp, lip);
}
xfs_buf_relse(bp);
return;
}
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+ ASSERT(bp->b_transp == tp);
+ bip = bp->b_fspriv;
ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
@@ -556,7 +554,7 @@ xfs_trans_brelse(xfs_trans_t *tp,
xfs_buf_item_relse(bp);
bip = NULL;
}
- XFS_BUF_SET_FSPRIVATE2(bp, NULL);
+ bp->b_transp = NULL;
/*
* If we've still got a buf log item on the buffer, then
@@ -581,16 +579,15 @@ void
xfs_trans_bhold(xfs_trans_t *tp,
xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip;
+ xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
-
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+ ASSERT(bp->b_transp == tp);
+ ASSERT(bip != NULL);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
+
bip->bli_flags |= XFS_BLI_HOLD;
trace_xfs_trans_bhold(bip);
}
@@ -603,19 +600,17 @@ void
xfs_trans_bhold_release(xfs_trans_t *tp,
xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip;
+ xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
-
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+ ASSERT(bp->b_transp == tp);
+ ASSERT(bip != NULL);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
ASSERT(!(bip->bli_format.blf_flags & XFS_BLF_CANCEL));
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT(bip->bli_flags & XFS_BLI_HOLD);
- bip->bli_flags &= ~XFS_BLI_HOLD;
+ bip->bli_flags &= ~XFS_BLI_HOLD;
trace_xfs_trans_bhold_release(bip);
}
@@ -634,14 +629,14 @@ xfs_trans_log_buf(xfs_trans_t *tp,
uint first,
uint last)
{
- xfs_buf_log_item_t *bip;
+ xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+ ASSERT(bp->b_transp == tp);
+ ASSERT(bip != NULL);
ASSERT((first <= last) && (last < XFS_BUF_COUNT(bp)));
- ASSERT((XFS_BUF_IODONE_FUNC(bp) == NULL) ||
- (XFS_BUF_IODONE_FUNC(bp) == xfs_buf_iodone_callbacks));
+ ASSERT(bp->b_iodone == NULL ||
+ bp->b_iodone == xfs_buf_iodone_callbacks);
/*
* Mark the buffer as needing to be written out eventually,
@@ -656,9 +651,8 @@ xfs_trans_log_buf(xfs_trans_t *tp,
XFS_BUF_DELAYWRITE(bp);
XFS_BUF_DONE(bp);
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
- XFS_BUF_SET_IODONE_FUNC(bp, xfs_buf_iodone_callbacks);
+ bp->b_iodone = xfs_buf_iodone_callbacks;
bip->bli_item.li_cb = xfs_buf_iodone;
trace_xfs_trans_log_buf(bip);
@@ -706,13 +700,11 @@ xfs_trans_binval(
xfs_trans_t *tp,
xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip;
+ xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
-
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+ ASSERT(bp->b_transp == tp);
+ ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
trace_xfs_trans_binval(bip);
@@ -780,13 +772,11 @@ xfs_trans_inode_buf(
xfs_trans_t *tp,
xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip;
+ xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
-
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+ ASSERT(bp->b_transp == tp);
+ ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_INODE_BUF;
@@ -806,13 +796,11 @@ xfs_trans_stale_inode_buf(
xfs_trans_t *tp,
xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip;
+ xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
-
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+ ASSERT(bp->b_transp == tp);
+ ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_STALE_INODE;
@@ -833,13 +821,11 @@ xfs_trans_inode_alloc_buf(
xfs_trans_t *tp,
xfs_buf_t *bp)
{
- xfs_buf_log_item_t *bip;
+ xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
-
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
+ ASSERT(bp->b_transp == tp);
+ ASSERT(bip != NULL);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF;
@@ -863,16 +849,14 @@ xfs_trans_dquot_buf(
xfs_buf_t *bp,
uint type)
{
- xfs_buf_log_item_t *bip;
+ xfs_buf_log_item_t *bip = bp->b_fspriv;
ASSERT(XFS_BUF_ISBUSY(bp));
- ASSERT(XFS_BUF_FSPRIVATE2(bp, xfs_trans_t *) == tp);
- ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
+ ASSERT(bp->b_transp == tp);
+ ASSERT(bip != NULL);
ASSERT(type == XFS_BLF_UDQUOT_BUF ||
type == XFS_BLF_PDQUOT_BUF ||
type == XFS_BLF_GDQUOT_BUF);
-
- bip = XFS_BUF_FSPRIVATE(bp, xfs_buf_log_item_t *);
ASSERT(atomic_read(&bip->bli_refcount) > 0);
bip->bli_format.blf_flags |= type;
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 048b0c689d3..c8dea2fd7e6 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -55,7 +55,6 @@ xfs_trans_ijoin(
{
xfs_inode_log_item_t *iip;
- ASSERT(ip->i_transp == NULL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
if (ip->i_itemp == NULL)
xfs_inode_item_init(ip, ip->i_mount);
@@ -68,12 +67,6 @@ xfs_trans_ijoin(
xfs_trans_add_item(tp, &iip->ili_item);
xfs_trans_inode_broot_debug(ip);
-
- /*
- * Initialize i_transp so we can find it with xfs_inode_incore()
- * in xfs_trans_iget() above.
- */
- ip->i_transp = tp;
}
/*
@@ -111,7 +104,6 @@ xfs_trans_ichgtime(
ASSERT(tp);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(ip->i_transp == tp);
tv = current_fs_time(inode->i_sb);
@@ -140,7 +132,6 @@ xfs_trans_log_inode(
xfs_inode_t *ip,
uint flags)
{
- ASSERT(ip->i_transp == tp);
ASSERT(ip->i_itemp != NULL);
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h
index 6b164e9e9a1..212946b9723 100644
--- a/fs/xfs/xfs_trans_priv.h
+++ b/fs/xfs/xfs_trans_priv.h
@@ -53,7 +53,7 @@ void xfs_trans_committed_bulk(struct xfs_ail *ailp, struct xfs_log_vec *lv,
* of the list to trigger traversal restarts.
*/
struct xfs_ail_cursor {
- struct xfs_ail_cursor *next;
+ struct list_head list;
struct xfs_log_item *item;
};
@@ -66,7 +66,7 @@ struct xfs_ail {
struct xfs_mount *xa_mount;
struct list_head xa_ail;
xfs_lsn_t xa_target;
- struct xfs_ail_cursor xa_cursors;
+ struct list_head xa_cursors;
spinlock_t xa_lock;
struct delayed_work xa_work;
xfs_lsn_t xa_last_pushed_lsn;
@@ -82,6 +82,7 @@ struct xfs_ail {
extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
+ struct xfs_ail_cursor *cur,
struct xfs_log_item **log_items, int nr_items,
xfs_lsn_t lsn) __releases(ailp->xa_lock);
static inline void
@@ -90,7 +91,7 @@ xfs_trans_ail_update(
struct xfs_log_item *lip,
xfs_lsn_t lsn) __releases(ailp->xa_lock)
{
- xfs_trans_ail_update_bulk(ailp, &lip, 1, lsn);
+ xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
}
void xfs_trans_ail_delete_bulk(struct xfs_ail *ailp,
@@ -111,10 +112,13 @@ xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
void xfs_trans_unlocked_item(struct xfs_ail *,
xfs_log_item_t *);
-struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
+struct xfs_log_item * xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
xfs_lsn_t lsn);
-struct xfs_log_item *xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
+struct xfs_log_item * xfs_trans_ail_cursor_last(struct xfs_ail *ailp,
+ struct xfs_ail_cursor *cur,
+ xfs_lsn_t lsn);
+struct xfs_log_item * xfs_trans_ail_cursor_next(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur);
void xfs_trans_ail_cursor_done(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 619720705bc..88d121486c5 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -50,430 +50,6 @@
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
-int
-xfs_setattr(
- struct xfs_inode *ip,
- struct iattr *iattr,
- int flags)
-{
- xfs_mount_t *mp = ip->i_mount;
- struct inode *inode = VFS_I(ip);
- int mask = iattr->ia_valid;
- xfs_trans_t *tp;
- int code;
- uint lock_flags;
- uint commit_flags=0;
- uid_t uid=0, iuid=0;
- gid_t gid=0, igid=0;
- struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
- int need_iolock = 1;
-
- trace_xfs_setattr(ip);
-
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return XFS_ERROR(EROFS);
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return XFS_ERROR(EIO);
-
- code = -inode_change_ok(inode, iattr);
- if (code)
- return code;
-
- olddquot1 = olddquot2 = NULL;
- udqp = gdqp = NULL;
-
- /*
- * If disk quotas is on, we make sure that the dquots do exist on disk,
- * before we start any other transactions. Trying to do this later
- * is messy. We don't care to take a readlock to look at the ids
- * in inode here, because we can't hold it across the trans_reserve.
- * If the IDs do change before we take the ilock, we're covered
- * because the i_*dquot fields will get updated anyway.
- */
- if (XFS_IS_QUOTA_ON(mp) && (mask & (ATTR_UID|ATTR_GID))) {
- uint qflags = 0;
-
- if ((mask & ATTR_UID) && XFS_IS_UQUOTA_ON(mp)) {
- uid = iattr->ia_uid;
- qflags |= XFS_QMOPT_UQUOTA;
- } else {
- uid = ip->i_d.di_uid;
- }
- if ((mask & ATTR_GID) && XFS_IS_GQUOTA_ON(mp)) {
- gid = iattr->ia_gid;
- qflags |= XFS_QMOPT_GQUOTA;
- } else {
- gid = ip->i_d.di_gid;
- }
-
- /*
- * We take a reference when we initialize udqp and gdqp,
- * so it is important that we never blindly double trip on
- * the same variable. See xfs_create() for an example.
- */
- ASSERT(udqp == NULL);
- ASSERT(gdqp == NULL);
- code = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
- qflags, &udqp, &gdqp);
- if (code)
- return code;
- }
-
- /*
- * For the other attributes, we acquire the inode lock and
- * first do an error checking pass.
- */
- tp = NULL;
- lock_flags = XFS_ILOCK_EXCL;
- if (flags & XFS_ATTR_NOLOCK)
- need_iolock = 0;
- if (!(mask & ATTR_SIZE)) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
- commit_flags = 0;
- code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp),
- 0, 0, 0);
- if (code) {
- lock_flags = 0;
- goto error_return;
- }
- } else {
- if (need_iolock)
- lock_flags |= XFS_IOLOCK_EXCL;
- }
-
- xfs_ilock(ip, lock_flags);
-
- /*
- * Change file ownership. Must be the owner or privileged.
- */
- if (mask & (ATTR_UID|ATTR_GID)) {
- /*
- * These IDs could have changed since we last looked at them.
- * But, we're assured that if the ownership did change
- * while we didn't have the inode locked, inode's dquot(s)
- * would have changed also.
- */
- iuid = ip->i_d.di_uid;
- igid = ip->i_d.di_gid;
- gid = (mask & ATTR_GID) ? iattr->ia_gid : igid;
- uid = (mask & ATTR_UID) ? iattr->ia_uid : iuid;
-
- /*
- * Do a quota reservation only if uid/gid is actually
- * going to change.
- */
- if (XFS_IS_QUOTA_RUNNING(mp) &&
- ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
- (XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
- ASSERT(tp);
- code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
- capable(CAP_FOWNER) ?
- XFS_QMOPT_FORCE_RES : 0);
- if (code) /* out of quota */
- goto error_return;
- }
- }
-
- /*
- * Truncate file. Must have write permission and not be a directory.
- */
- if (mask & ATTR_SIZE) {
- /* Short circuit the truncate case for zero length files */
- if (iattr->ia_size == 0 &&
- ip->i_size == 0 && ip->i_d.di_nextents == 0) {
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- lock_flags &= ~XFS_ILOCK_EXCL;
- if (mask & ATTR_CTIME) {
- inode->i_mtime = inode->i_ctime =
- current_fs_time(inode->i_sb);
- xfs_mark_inode_dirty_sync(ip);
- }
- code = 0;
- goto error_return;
- }
-
- if (S_ISDIR(ip->i_d.di_mode)) {
- code = XFS_ERROR(EISDIR);
- goto error_return;
- } else if (!S_ISREG(ip->i_d.di_mode)) {
- code = XFS_ERROR(EINVAL);
- goto error_return;
- }
-
- /*
- * Make sure that the dquots are attached to the inode.
- */
- code = xfs_qm_dqattach_locked(ip, 0);
- if (code)
- goto error_return;
-
- /*
- * Now we can make the changes. Before we join the inode
- * to the transaction, if ATTR_SIZE is set then take care of
- * the part of the truncation that must be done without the
- * inode lock. This needs to be done before joining the inode
- * to the transaction, because the inode cannot be unlocked
- * once it is a part of the transaction.
- */
- if (iattr->ia_size > ip->i_size) {
- /*
- * Do the first part of growing a file: zero any data
- * in the last block that is beyond the old EOF. We
- * need to do this before the inode is joined to the
- * transaction to modify the i_size.
- */
- code = xfs_zero_eof(ip, iattr->ia_size, ip->i_size);
- if (code)
- goto error_return;
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- lock_flags &= ~XFS_ILOCK_EXCL;
-
- /*
- * We are going to log the inode size change in this
- * transaction so any previous writes that are beyond the on
- * disk EOF and the new EOF that have not been written out need
- * to be written here. If we do not write the data out, we
- * expose ourselves to the null files problem.
- *
- * Only flush from the on disk size to the smaller of the in
- * memory file size or the new size as that's the range we
- * really care about here and prevents waiting for other data
- * not within the range we care about here.
- */
- if (ip->i_size != ip->i_d.di_size &&
- iattr->ia_size > ip->i_d.di_size) {
- code = xfs_flush_pages(ip,
- ip->i_d.di_size, iattr->ia_size,
- XBF_ASYNC, FI_NONE);
- if (code)
- goto error_return;
- }
-
- /* wait for all I/O to complete */
- xfs_ioend_wait(ip);
-
- code = -block_truncate_page(inode->i_mapping, iattr->ia_size,
- xfs_get_blocks);
- if (code)
- goto error_return;
-
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_SIZE);
- code = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
- XFS_TRANS_PERM_LOG_RES,
- XFS_ITRUNCATE_LOG_COUNT);
- if (code)
- goto error_return;
-
- truncate_setsize(inode, iattr->ia_size);
-
- commit_flags = XFS_TRANS_RELEASE_LOG_RES;
- lock_flags |= XFS_ILOCK_EXCL;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
-
- xfs_trans_ijoin(tp, ip);
-
- /*
- * Only change the c/mtime if we are changing the size
- * or we are explicitly asked to change it. This handles
- * the semantic difference between truncate() and ftruncate()
- * as implemented in the VFS.
- *
- * The regular truncate() case without ATTR_CTIME and ATTR_MTIME
- * is a special case where we need to update the times despite
- * not having these flags set. For all other operations the
- * VFS set these flags explicitly if it wants a timestamp
- * update.
- */
- if (iattr->ia_size != ip->i_size &&
- (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
- iattr->ia_ctime = iattr->ia_mtime =
- current_fs_time(inode->i_sb);
- mask |= ATTR_CTIME | ATTR_MTIME;
- }
-
- if (iattr->ia_size > ip->i_size) {
- ip->i_d.di_size = iattr->ia_size;
- ip->i_size = iattr->ia_size;
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- } else if (iattr->ia_size <= ip->i_size ||
- (iattr->ia_size == 0 && ip->i_d.di_nextents)) {
- /*
- * signal a sync transaction unless
- * we're truncating an already unlinked
- * file on a wsync filesystem
- */
- code = xfs_itruncate_finish(&tp, ip, iattr->ia_size,
- XFS_DATA_FORK,
- ((ip->i_d.di_nlink != 0 ||
- !(mp->m_flags & XFS_MOUNT_WSYNC))
- ? 1 : 0));
- if (code)
- goto abort_return;
- /*
- * Truncated "down", so we're removing references
- * to old data here - if we now delay flushing for
- * a long time, we expose ourselves unduly to the
- * notorious NULL files problem. So, we mark this
- * vnode and flush it when the file is closed, and
- * do not wait the usual (long) time for writeout.
- */
- xfs_iflags_set(ip, XFS_ITRUNCATED);
- }
- } else if (tp) {
- xfs_trans_ijoin(tp, ip);
- }
-
- /*
- * Change file ownership. Must be the owner or privileged.
- */
- if (mask & (ATTR_UID|ATTR_GID)) {
- /*
- * CAP_FSETID overrides the following restrictions:
- *
- * The set-user-ID and set-group-ID bits of a file will be
- * cleared upon successful return from chown()
- */
- if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
- !capable(CAP_FSETID)) {
- ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
- }
-
- /*
- * Change the ownerships and register quota modifications
- * in the transaction.
- */
- if (iuid != uid) {
- if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_UQUOTA_ON(mp)) {
- ASSERT(mask & ATTR_UID);
- ASSERT(udqp);
- olddquot1 = xfs_qm_vop_chown(tp, ip,
- &ip->i_udquot, udqp);
- }
- ip->i_d.di_uid = uid;
- inode->i_uid = uid;
- }
- if (igid != gid) {
- if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
- ASSERT(!XFS_IS_PQUOTA_ON(mp));
- ASSERT(mask & ATTR_GID);
- ASSERT(gdqp);
- olddquot2 = xfs_qm_vop_chown(tp, ip,
- &ip->i_gdquot, gdqp);
- }
- ip->i_d.di_gid = gid;
- inode->i_gid = gid;
- }
- }
-
- /*
- * Change file access modes.
- */
- if (mask & ATTR_MODE) {
- umode_t mode = iattr->ia_mode;
-
- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
- mode &= ~S_ISGID;
-
- ip->i_d.di_mode &= S_IFMT;
- ip->i_d.di_mode |= mode & ~S_IFMT;
-
- inode->i_mode &= S_IFMT;
- inode->i_mode |= mode & ~S_IFMT;
- }
-
- /*
- * Change file access or modified times.
- */
- if (mask & ATTR_ATIME) {
- inode->i_atime = iattr->ia_atime;
- ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
- ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
- ip->i_update_core = 1;
- }
- if (mask & ATTR_CTIME) {
- inode->i_ctime = iattr->ia_ctime;
- ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
- ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
- ip->i_update_core = 1;
- }
- if (mask & ATTR_MTIME) {
- inode->i_mtime = iattr->ia_mtime;
- ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
- ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
- ip->i_update_core = 1;
- }
-
- /*
- * And finally, log the inode core if any attribute in it
- * has been changed.
- */
- if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE|
- ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
- XFS_STATS_INC(xs_ig_attrchg);
-
- /*
- * If this is a synchronous mount, make sure that the
- * transaction goes to disk before returning to the user.
- * This is slightly sub-optimal in that truncates require
- * two sync transactions instead of one for wsync filesystems.
- * One for the truncate and one for the timestamps since we
- * don't want to change the timestamps unless we're sure the
- * truncate worked. Truncates are less than 1% of the laddis
- * mix so this probably isn't worth the trouble to optimize.
- */
- code = 0;
- if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(tp);
-
- code = xfs_trans_commit(tp, commit_flags);
-
- xfs_iunlock(ip, lock_flags);
-
- /*
- * Release any dquot(s) the inode had kept before chown.
- */
- xfs_qm_dqrele(olddquot1);
- xfs_qm_dqrele(olddquot2);
- xfs_qm_dqrele(udqp);
- xfs_qm_dqrele(gdqp);
-
- if (code)
- return code;
-
- /*
- * XXX(hch): Updating the ACL entries is not atomic vs the i_mode
- * update. We could avoid this with linked transactions
- * and passing down the transaction pointer all the way
- * to attr_set. No previous user of the generic
- * Posix ACL code seems to care about this issue either.
- */
- if ((mask & ATTR_MODE) && !(flags & XFS_ATTR_NOACL)) {
- code = -xfs_acl_chmod(inode);
- if (code)
- return XFS_ERROR(code);
- }
-
- return 0;
-
- abort_return:
- commit_flags |= XFS_TRANS_ABORT;
- error_return:
- xfs_qm_dqrele(udqp);
- xfs_qm_dqrele(gdqp);
- if (tp) {
- xfs_trans_cancel(tp, commit_flags);
- }
- if (lock_flags != 0) {
- xfs_iunlock(ip, lock_flags);
- }
- return code;
-}
-
/*
* The maximum pathlen is 1024 bytes. Since the minimum file system
* blocksize is 512 bytes, we can get a max of 2 extents back from
@@ -621,13 +197,6 @@ xfs_free_eofblocks(
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
- /*
- * Do the xfs_itruncate_start() call before
- * reserving any log space because
- * itruncate_start will call into the buffer
- * cache and we can't
- * do that within a transaction.
- */
if (flags & XFS_FREE_EOF_TRYLOCK) {
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
xfs_trans_cancel(tp, 0);
@@ -636,13 +205,6 @@ xfs_free_eofblocks(
} else {
xfs_ilock(ip, XFS_IOLOCK_EXCL);
}
- error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
- ip->i_size);
- if (error) {
- xfs_trans_cancel(tp, 0);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return error;
- }
error = xfs_trans_reserve(tp, 0,
XFS_ITRUNCATE_LOG_RES(mp),
@@ -658,15 +220,12 @@ xfs_free_eofblocks(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip);
- error = xfs_itruncate_finish(&tp, ip,
- ip->i_size,
- XFS_DATA_FORK,
- 0);
- /*
- * If we get an error at this point we
- * simply don't bother truncating the file.
- */
+ error = xfs_itruncate_data(&tp, ip, ip->i_size);
if (error) {
+ /*
+ * If we get an error at this point we simply don't
+ * bother truncating the file.
+ */
xfs_trans_cancel(tp,
(XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT));
@@ -1084,20 +643,9 @@ xfs_inactive(
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
if (truncate) {
- /*
- * Do the xfs_itruncate_start() call before
- * reserving any log space because itruncate_start
- * will call into the buffer cache and we can't
- * do that within a transaction.
- */
xfs_ilock(ip, XFS_IOLOCK_EXCL);
- error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0);
- if (error) {
- xfs_trans_cancel(tp, 0);
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return VN_INACTIVE_CACHE;
- }
+ xfs_ioend_wait(ip);
error = xfs_trans_reserve(tp, 0,
XFS_ITRUNCATE_LOG_RES(mp),
@@ -1114,16 +662,7 @@ xfs_inactive(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip);
- /*
- * normally, we have to run xfs_itruncate_finish sync.
- * But if filesystem is wsync and we're in the inactive
- * path, then we know that nlink == 0, and that the
- * xaction that made nlink == 0 is permanently committed
- * since xfs_remove runs as a synchronous transaction.
- */
- error = xfs_itruncate_finish(&tp, ip, 0, XFS_DATA_FORK,
- (!(mp->m_flags & XFS_MOUNT_WSYNC) ? 1 : 0));
-
+ error = xfs_itruncate_data(&tp, ip, 0);
if (error) {
xfs_trans_cancel(tp,
XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
@@ -2430,6 +1969,8 @@ xfs_zero_remaining_bytes(
if (!bp)
return XFS_ERROR(ENOMEM);
+ xfs_buf_unlock(bp);
+
for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimap = 1;
@@ -2784,7 +2325,7 @@ xfs_change_file_space(
iattr.ia_valid = ATTR_SIZE;
iattr.ia_size = startoffset;
- error = xfs_setattr(ip, &iattr, attr_flags);
+ error = xfs_setattr_size(ip, &iattr, attr_flags);
if (error)
return error;
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index 3bcd23353d6..35d3d513e1e 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -13,7 +13,8 @@ struct xfs_inode;
struct xfs_iomap;
-int xfs_setattr(struct xfs_inode *ip, struct iattr *vap, int flags);
+int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap, int flags);
+int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap, int flags);
#define XFS_ATTR_DMI 0x01 /* invocation from a DMI function */
#define XFS_ATTR_NONBLOCK 0x02 /* return EAGAIN if operation would block */
#define XFS_ATTR_NOLOCK 0x04 /* Don't grab any conflicting locks */
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index 47dac5ea8d3..12c517b51ca 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -53,6 +53,7 @@ struct proc_event {
PROC_EVENT_UID = 0x00000004,
PROC_EVENT_GID = 0x00000040,
PROC_EVENT_SID = 0x00000080,
+ PROC_EVENT_PTRACE = 0x00000100,
/* "next" should be 0x00000400 */
/* "last" is the last process event: exit */
PROC_EVENT_EXIT = 0x80000000
@@ -95,6 +96,13 @@ struct proc_event {
__kernel_pid_t process_tgid;
} sid;
+ struct ptrace_proc_event {
+ __kernel_pid_t process_pid;
+ __kernel_pid_t process_tgid;
+ __kernel_pid_t tracer_pid;
+ __kernel_pid_t tracer_tgid;
+ } ptrace;
+
struct exit_proc_event {
__kernel_pid_t process_pid;
__kernel_pid_t process_tgid;
@@ -109,6 +117,7 @@ void proc_fork_connector(struct task_struct *task);
void proc_exec_connector(struct task_struct *task);
void proc_id_connector(struct task_struct *task, int which_id);
void proc_sid_connector(struct task_struct *task);
+void proc_ptrace_connector(struct task_struct *task, int which_id);
void proc_exit_connector(struct task_struct *task);
#else
static inline void proc_fork_connector(struct task_struct *task)
@@ -124,6 +133,10 @@ static inline void proc_id_connector(struct task_struct *task,
static inline void proc_sid_connector(struct task_struct *task)
{}
+static inline void proc_ptrace_connector(struct task_struct *task,
+ int ptrace_id)
+{}
+
static inline void proc_exit_connector(struct task_struct *task)
{}
#endif /* CONFIG_PROC_EVENTS */
diff --git a/include/linux/device.h b/include/linux/device.h
index e4f62d8896b..160d4ddb249 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -516,7 +516,7 @@ struct device_dma_parameters {
* minimizes board-specific #ifdefs in drivers.
* @power: For device power management.
* See Documentation/power/devices.txt for details.
- * @pwr_domain: Provide callbacks that are executed during system suspend,
+ * @pm_domain: Provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions
* along with subsystem-level and driver-level callbacks.
* @numa_node: NUMA node this device is close to.
@@ -567,7 +567,7 @@ struct device {
void *platform_data; /* Platform specific data, device
core doesn't touch it */
struct dev_pm_info power;
- struct dev_power_domain *pwr_domain;
+ struct dev_pm_domain *pm_domain;
#ifdef CONFIG_NUMA
int numa_node; /* NUMA node this device is close to */
diff --git a/include/linux/firewire-cdev.h b/include/linux/firewire-cdev.h
index 4ff09889c5c..357dbfc2829 100644
--- a/include/linux/firewire-cdev.h
+++ b/include/linux/firewire-cdev.h
@@ -30,10 +30,13 @@
#include <linux/types.h>
#include <linux/firewire-constants.h>
+/* available since kernel version 2.6.22 */
#define FW_CDEV_EVENT_BUS_RESET 0x00
#define FW_CDEV_EVENT_RESPONSE 0x01
#define FW_CDEV_EVENT_REQUEST 0x02
#define FW_CDEV_EVENT_ISO_INTERRUPT 0x03
+
+/* available since kernel version 2.6.30 */
#define FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED 0x04
#define FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED 0x05
@@ -120,24 +123,11 @@ struct fw_cdev_event_response {
/**
* struct fw_cdev_event_request - Old version of &fw_cdev_event_request2
- * @closure: See &fw_cdev_event_common; set by %FW_CDEV_IOC_ALLOCATE ioctl
* @type: See &fw_cdev_event_common; always %FW_CDEV_EVENT_REQUEST
- * @tcode: See &fw_cdev_event_request2
- * @offset: See &fw_cdev_event_request2
- * @handle: See &fw_cdev_event_request2
- * @length: See &fw_cdev_event_request2
- * @data: See &fw_cdev_event_request2
*
* This event is sent instead of &fw_cdev_event_request2 if the kernel or
- * the client implements ABI version <= 3.
- *
- * Unlike &fw_cdev_event_request2, the sender identity cannot be established,
- * broadcast write requests cannot be distinguished from unicast writes, and
- * @tcode of lock requests is %TCODE_LOCK_REQUEST.
- *
- * Requests to the FCP_REQUEST or FCP_RESPONSE register are responded to as
- * with &fw_cdev_event_request2, except in kernel 2.6.32 and older which send
- * the response packet of the client's %FW_CDEV_IOC_SEND_RESPONSE ioctl.
+ * the client implements ABI version <= 3. &fw_cdev_event_request lacks
+ * essential information; use &fw_cdev_event_request2 instead.
*/
struct fw_cdev_event_request {
__u64 closure;
@@ -452,29 +442,31 @@ union fw_cdev_event {
* %FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL, and
* %FW_CDEV_IOC_SET_ISO_CHANNELS
*/
-#define FW_CDEV_VERSION 3 /* Meaningless; don't use this macro. */
/**
* struct fw_cdev_get_info - General purpose information ioctl
* @version: The version field is just a running serial number. Both an
* input parameter (ABI version implemented by the client) and
* output parameter (ABI version implemented by the kernel).
- * A client must not fill in an %FW_CDEV_VERSION defined from an
- * included kernel header file but the actual version for which
- * the client was implemented. This is necessary for forward
- * compatibility. We never break backwards compatibility, but
- * may add more structs, events, and ioctls in later revisions.
- * @rom_length: If @rom is non-zero, at most rom_length bytes of configuration
+ * A client shall fill in the ABI @version for which the client
+ * was implemented. This is necessary for forward compatibility.
+ * @rom_length: If @rom is non-zero, up to @rom_length bytes of Configuration
* ROM will be copied into that user space address. In either
* case, @rom_length is updated with the actual length of the
- * configuration ROM.
+ * Configuration ROM.
* @rom: If non-zero, address of a buffer to be filled by a copy of the
- * device's configuration ROM
+ * device's Configuration ROM
* @bus_reset: If non-zero, address of a buffer to be filled by a
* &struct fw_cdev_event_bus_reset with the current state
* of the bus. This does not cause a bus reset to happen.
* @bus_reset_closure: Value of &closure in this and subsequent bus reset events
* @card: The index of the card this device belongs to
+ *
+ * The %FW_CDEV_IOC_GET_INFO ioctl is usually the very first one which a client
+ * performs right after it opened a /dev/fw* file.
+ *
+ * As a side effect, reception of %FW_CDEV_EVENT_BUS_RESET events to be read(2)
+ * is started by this ioctl.
*/
struct fw_cdev_get_info {
__u32 version;
@@ -612,7 +604,7 @@ struct fw_cdev_initiate_bus_reset {
* @handle: Handle to the descriptor, written by the kernel
*
* Add a descriptor block and optionally a preceding immediate key to the local
- * node's configuration ROM.
+ * node's Configuration ROM.
*
* The @key field specifies the upper 8 bits of the descriptor root directory
* pointer and the @data and @length fields specify the contents. The @key
@@ -627,9 +619,9 @@ struct fw_cdev_initiate_bus_reset {
* If successful, the kernel adds the descriptor and writes back a @handle to
* the kernel-side object to be used for later removal of the descriptor block
* and immediate key. The kernel will also generate a bus reset to signal the
- * change of the configuration ROM to other nodes.
+ * change of the Configuration ROM to other nodes.
*
- * This ioctl affects the configuration ROMs of all local nodes.
+ * This ioctl affects the Configuration ROMs of all local nodes.
* The ioctl only succeeds on device files which represent a local node.
*/
struct fw_cdev_add_descriptor {
@@ -641,13 +633,13 @@ struct fw_cdev_add_descriptor {
};
/**
- * struct fw_cdev_remove_descriptor - Remove contents from the configuration ROM
+ * struct fw_cdev_remove_descriptor - Remove contents from the Configuration ROM
* @handle: Handle to the descriptor, as returned by the kernel when the
* descriptor was added
*
* Remove a descriptor block and accompanying immediate key from the local
- * nodes' configuration ROMs. The kernel will also generate a bus reset to
- * signal the change of the configuration ROM to other nodes.
+ * nodes' Configuration ROMs. The kernel will also generate a bus reset to
+ * signal the change of the Configuration ROM to other nodes.
*/
struct fw_cdev_remove_descriptor {
__u32 handle;
@@ -863,13 +855,8 @@ struct fw_cdev_stop_iso {
* @local_time: system time, in microseconds since the Epoch
* @cycle_timer: Cycle Time register contents
*
- * The %FW_CDEV_IOC_GET_CYCLE_TIMER ioctl reads the isochronous cycle timer
- * and also the system clock (%CLOCK_REALTIME). This allows to express the
- * receive time of an isochronous packet as a system time.
- *
- * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
- * 12 bits cycleOffset, in host byte order. Cf. the Cycle Time register
- * per IEEE 1394 or Isochronous Cycle Timer register per OHCI-1394.
+ * Same as %FW_CDEV_IOC_GET_CYCLE_TIMER2, but fixed to use %CLOCK_REALTIME
+ * and only with microseconds resolution.
*
* In version 1 and 2 of the ABI, this ioctl returned unreliable (non-
* monotonic) @cycle_timer values on certain controllers.
@@ -886,10 +873,17 @@ struct fw_cdev_get_cycle_timer {
* @clk_id: input parameter, clock from which to get the system time
* @cycle_timer: Cycle Time register contents
*
- * The %FW_CDEV_IOC_GET_CYCLE_TIMER2 works like
- * %FW_CDEV_IOC_GET_CYCLE_TIMER but lets you choose a clock like with POSIX'
- * clock_gettime function. Supported @clk_id values are POSIX' %CLOCK_REALTIME
- * and %CLOCK_MONOTONIC and Linux' %CLOCK_MONOTONIC_RAW.
+ * The %FW_CDEV_IOC_GET_CYCLE_TIMER2 ioctl reads the isochronous cycle timer
+ * and also the system clock. This allows to correlate reception time of
+ * isochronous packets with system time.
+ *
+ * @clk_id lets you choose a clock like with POSIX' clock_gettime function.
+ * Supported @clk_id values are POSIX' %CLOCK_REALTIME and %CLOCK_MONOTONIC
+ * and Linux' %CLOCK_MONOTONIC_RAW.
+ *
+ * @cycle_timer consists of 7 bits cycleSeconds, 13 bits cycleCount, and
+ * 12 bits cycleOffset, in host byte order. Cf. the Cycle Time register
+ * per IEEE 1394 or Isochronous Cycle Timer register per OHCI-1394.
*/
struct fw_cdev_get_cycle_timer2 {
__s64 tv_sec;
@@ -1011,4 +1005,6 @@ struct fw_cdev_receive_phy_packets {
__u64 closure;
};
+#define FW_CDEV_VERSION 3 /* Meaningless legacy macro; don't use it. */
+
#endif /* _LINUX_FIREWIRE_CDEV_H */
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 42f7e2fb501..9cf8e7ae745 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -453,7 +453,8 @@ struct hid_input {
enum hid_type {
HID_TYPE_OTHER = 0,
- HID_TYPE_USBMOUSE
+ HID_TYPE_USBMOUSE,
+ HID_TYPE_USBNONE
};
struct hid_driver;
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 2fb1dcbcb5a..9962c6bb131 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -59,8 +59,6 @@ struct lguest_data {
unsigned long reserve_mem;
/* KHz for the TSC clock. */
u32 tsc_khz;
- /* Page where the top-level pagetable is */
- unsigned long pgdir;
/* Fields initialized by the Guest at boot: */
/* Instruction range to suppress interrupts even if enabled */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 5a90266c3a5..0dc98044d8b 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -68,6 +68,11 @@
* controller and report the event to the driver.
*/
#define TMIO_MMC_HAS_COLD_CD (1 << 3)
+/*
+ * Some controllers require waiting for the SD bus to become
+ * idle before writing to some registers.
+ */
+#define TMIO_MMC_HAS_IDLE_WAIT (1 << 4)
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
@@ -80,6 +85,8 @@ struct tmio_mmc_dma {
int alignment_shift;
};
+struct tmio_mmc_host;
+
/*
* data for the MMC controller
*/
@@ -94,6 +101,7 @@ struct tmio_mmc_data {
void (*set_pwr)(struct platform_device *host, int state);
void (*set_clk_div)(struct platform_device *host, int state);
int (*get_cd)(struct platform_device *host);
+ int (*write16_hook)(struct tmio_mmc_host *host, int addr);
};
static inline void tmio_mmc_cd_wakeup(struct tmio_mmc_data *pdata)
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 9a18667c13c..b56e4587208 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -123,6 +123,9 @@ enum {
/* debug commands */
MLX4_CMD_QUERY_DEBUG_MSG = 0x2a,
MLX4_CMD_SET_DEBUG_MSG = 0x2b,
+
+ /* statistics commands */
+ MLX4_CMD_QUERY_IF_STAT = 0X54,
};
enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8985768e2c0..387329e0230 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -58,22 +58,28 @@ enum {
};
enum {
- MLX4_DEV_CAP_FLAG_RC = 1 << 0,
- MLX4_DEV_CAP_FLAG_UC = 1 << 1,
- MLX4_DEV_CAP_FLAG_UD = 1 << 2,
- MLX4_DEV_CAP_FLAG_SRQ = 1 << 6,
- MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7,
- MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8,
- MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9,
- MLX4_DEV_CAP_FLAG_DPDP = 1 << 12,
- MLX4_DEV_CAP_FLAG_BLH = 1 << 15,
- MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16,
- MLX4_DEV_CAP_FLAG_APM = 1 << 17,
- MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18,
- MLX4_DEV_CAP_FLAG_RAW_MCAST = 1 << 19,
- MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1 << 20,
- MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21,
- MLX4_DEV_CAP_FLAG_IBOE = 1 << 30
+ MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
+ MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
+ MLX4_DEV_CAP_FLAG_UD = 1LL << 2,
+ MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6,
+ MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7,
+ MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
+ MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
+ MLX4_DEV_CAP_FLAG_DPDP = 1LL << 12,
+ MLX4_DEV_CAP_FLAG_BLH = 1LL << 15,
+ MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1LL << 16,
+ MLX4_DEV_CAP_FLAG_APM = 1LL << 17,
+ MLX4_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
+ MLX4_DEV_CAP_FLAG_RAW_MCAST = 1LL << 19,
+ MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1LL << 20,
+ MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21,
+ MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30,
+ MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32,
+ MLX4_DEV_CAP_FLAG_WOL = 1LL << 38,
+ MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40,
+ MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
+ MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
+ MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48
};
enum {
@@ -253,15 +259,10 @@ struct mlx4_caps {
int mtt_entry_sz;
u32 max_msg_sz;
u32 page_size_cap;
- u32 flags;
+ u64 flags;
u32 bmme_flags;
u32 reserved_lkey;
u16 stat_rate_support;
- int udp_rss;
- int loopback_support;
- int vep_uc_steering;
- int vep_mc_steering;
- int wol;
u8 port_width_cap[MLX4_MAX_PORTS + 1];
int max_gso_sz;
int reserved_qps_cnt[MLX4_NUM_QP_REGION];
@@ -274,6 +275,7 @@ struct mlx4_caps {
u8 supported_type[MLX4_MAX_PORTS + 1];
u32 port_mask;
enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
+ u32 max_counters;
};
struct mlx4_buf_list {
@@ -438,6 +440,17 @@ union mlx4_ext_av {
struct mlx4_eth_av eth;
};
+struct mlx4_counter {
+ u8 reserved1[3];
+ u8 counter_mode;
+ __be32 num_ifc;
+ u32 reserved2[2];
+ __be64 rx_frames;
+ __be64 rx_bytes;
+ __be64 tx_frames;
+ __be64 tx_bytes;
+};
+
struct mlx4_dev {
struct pci_dev *pdev;
unsigned long flags;
@@ -568,4 +581,7 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
+int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
+void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
+
#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 9e9eb21056c..4001c8249db 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -54,7 +54,8 @@ enum mlx4_qp_optpar {
MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12,
MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
- MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16
+ MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
+ MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20
};
enum mlx4_qp_state {
@@ -99,7 +100,7 @@ struct mlx4_qp_path {
u8 fl;
u8 reserved1[2];
u8 pkey_index;
- u8 reserved2;
+ u8 counter_index;
u8 grh_mylmc;
__be16 rlid;
u8 ackto;
@@ -111,8 +112,7 @@ struct mlx4_qp_path {
u8 sched_queue;
u8 vlan_index;
u8 reserved3[2];
- u8 counter_index;
- u8 reserved4;
+ u8 reserved4[2];
u8 dmac[6];
};
diff --git a/include/linux/mmc/boot.h b/include/linux/mmc/boot.h
index 39d787c229c..23acc3baa07 100644
--- a/include/linux/mmc/boot.h
+++ b/include/linux/mmc/boot.h
@@ -1,7 +1,7 @@
-#ifndef MMC_BOOT_H
-#define MMC_BOOT_H
+#ifndef LINUX_MMC_BOOT_H
+#define LINUX_MMC_BOOT_H
enum { MMC_PROGRESS_ENTER, MMC_PROGRESS_INIT,
MMC_PROGRESS_LOAD, MMC_PROGRESS_DONE };
-#endif
+#endif /* LINUX_MMC_BOOT_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 6ad43554ac0..b460fc2af8a 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -403,4 +403,4 @@ extern void mmc_unregister_driver(struct mmc_driver *);
extern void mmc_fixup_device(struct mmc_card *card,
const struct mmc_fixup *table);
-#endif
+#endif /* LINUX_MMC_CARD_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index b6718e549a5..b8b1b7a311f 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -117,6 +117,7 @@ struct mmc_data {
unsigned int sg_len; /* size of scatter list */
struct scatterlist *sg; /* I/O scatter list */
+ s32 host_cookie; /* host private data */
};
struct mmc_request {
@@ -125,13 +126,16 @@ struct mmc_request {
struct mmc_data *data;
struct mmc_command *stop;
- void *done_data; /* completion data */
+ struct completion completion;
void (*done)(struct mmc_request *);/* completion function */
};
struct mmc_host;
struct mmc_card;
+struct mmc_async_req;
+extern struct mmc_async_req *mmc_start_req(struct mmc_host *,
+ struct mmc_async_req *, int *);
extern void mmc_wait_for_req(struct mmc_host *, struct mmc_request *);
extern int mmc_wait_for_cmd(struct mmc_host *, struct mmc_command *, int);
extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
@@ -155,6 +159,7 @@ extern int mmc_can_trim(struct mmc_card *card);
extern int mmc_can_secure_erase_trim(struct mmc_card *card);
extern int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
unsigned int nr);
+extern unsigned int mmc_calc_max_discard(struct mmc_card *card);
extern int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen);
@@ -179,4 +184,4 @@ static inline void mmc_claim_host(struct mmc_host *host)
extern u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max);
-#endif
+#endif /* LINUX_MMC_CORE_H */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index bdd7ceeb99e..6b46819705d 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -11,8 +11,8 @@
* (at your option) any later version.
*/
-#ifndef _LINUX_MMC_DW_MMC_H_
-#define _LINUX_MMC_DW_MMC_H_
+#ifndef LINUX_MMC_DW_MMC_H
+#define LINUX_MMC_DW_MMC_H
#define MAX_MCI_SLOTS 2
@@ -48,6 +48,7 @@ struct mmc_data;
* @data: The data currently being transferred, or NULL if no data
* transfer is in progress.
* @use_dma: Whether DMA channel is initialized or not.
+ * @using_dma: Whether DMA is in use for the current transfer.
* @sg_dma: Bus address of DMA buffer.
* @sg_cpu: Virtual address of DMA buffer.
* @dma_ops: Pointer to platform-specific DMA callbacks.
@@ -74,7 +75,11 @@ struct mmc_data;
* @pdev: Platform device associated with the MMC controller.
* @pdata: Platform data associated with the MMC controller.
* @slot: Slots sharing this MMC controller.
+ * @fifo_depth: depth of FIFO.
* @data_shift: log2 of FIFO item size.
+ * @part_buf_start: Start index in part_buf.
+ * @part_buf_count: Bytes of partial data in part_buf.
+ * @part_buf: Simple buffer for partial fifo reads/writes.
* @push_data: Pointer to FIFO push function.
* @pull_data: Pointer to FIFO pull function.
* @quirks: Set of quirks that apply to specific versions of the IP.
@@ -117,6 +122,7 @@ struct dw_mci {
/* DMA interface members*/
int use_dma;
+ int using_dma;
dma_addr_t sg_dma;
void *sg_cpu;
@@ -131,7 +137,7 @@ struct dw_mci {
u32 stop_cmdr;
u32 dir_status;
struct tasklet_struct tasklet;
- struct tasklet_struct card_tasklet;
+ struct work_struct card_work;
unsigned long pending_events;
unsigned long completed_events;
enum dw_mci_state state;
@@ -146,7 +152,15 @@ struct dw_mci {
struct dw_mci_slot *slot[MAX_MCI_SLOTS];
/* FIFO push and pull */
+ int fifo_depth;
int data_shift;
+ u8 part_buf_start;
+ u8 part_buf_count;
+ union {
+ u16 part_buf16;
+ u32 part_buf32;
+ u64 part_buf;
+ };
void (*push_data)(struct dw_mci *host, void *buf, int cnt);
void (*pull_data)(struct dw_mci *host, void *buf, int cnt);
@@ -196,6 +210,12 @@ struct dw_mci_board {
unsigned int bus_hz; /* Bus speed */
unsigned int caps; /* Capabilities */
+ /*
+ * Override fifo depth. If 0, autodetect it from the FIFOTH register,
+ * but note that this may not be reliable after a bootloader has used
+ * it.
+ */
+ unsigned int fifo_depth;
/* delay in mS before detecting cards after interrupt */
u32 detect_delay_ms;
@@ -219,4 +239,4 @@ struct dw_mci_board {
struct block_settings *blk_settings;
};
-#endif /* _LINUX_MMC_DW_MMC_H_ */
+#endif /* LINUX_MMC_DW_MMC_H */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 1ee4424462e..0f83858147a 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -106,6 +106,15 @@ struct mmc_host_ops {
*/
int (*enable)(struct mmc_host *host);
int (*disable)(struct mmc_host *host, int lazy);
+ /*
+ * It is optional for the host to implement pre_req and post_req in
+ * order to support double buffering of requests (prepare one
+ * request while another request is active).
+ */
+ void (*post_req)(struct mmc_host *host, struct mmc_request *req,
+ int err);
+ void (*pre_req)(struct mmc_host *host, struct mmc_request *req,
+ bool is_first_req);
void (*request)(struct mmc_host *host, struct mmc_request *req);
/*
* Avoid calling these three functions too often or in a "fast path",
@@ -139,11 +148,22 @@ struct mmc_host_ops {
int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios);
int (*execute_tuning)(struct mmc_host *host);
void (*enable_preset_value)(struct mmc_host *host, bool enable);
+ int (*select_drive_strength)(unsigned int max_dtr, int host_drv, int card_drv);
};
struct mmc_card;
struct device;
+struct mmc_async_req {
+ /* active mmc request */
+ struct mmc_request *mrq;
+ /*
+ * Check error status of completed mmc request.
+ * Returns 0 if success otherwise non zero.
+ */
+ int (*err_check) (struct mmc_card *, struct mmc_async_req *);
+};
+
struct mmc_host {
struct device *parent;
struct device class_dev;
@@ -231,6 +251,7 @@ struct mmc_host {
unsigned int max_req_size; /* maximum number of bytes in one req */
unsigned int max_blk_size; /* maximum size of one mmc block */
unsigned int max_blk_count; /* maximum number of blocks in one req */
+ unsigned int max_discard_to; /* max. discard timeout in ms */
/* private data */
spinlock_t lock; /* lock for claim and bus ops */
@@ -281,6 +302,8 @@ struct mmc_host {
struct dentry *debugfs_root;
+ struct mmc_async_req *areq; /* active async req */
+
unsigned long private[0] ____cacheline_aligned;
};
@@ -373,5 +396,4 @@ static inline int mmc_host_cmd23(struct mmc_host *host)
{
return host->caps & MMC_CAP_CMD23;
}
-#endif
-
+#endif /* LINUX_MMC_HOST_H */
diff --git a/include/linux/mmc/ioctl.h b/include/linux/mmc/ioctl.h
index 5baf2983a12..8fa5bc5f805 100644
--- a/include/linux/mmc/ioctl.h
+++ b/include/linux/mmc/ioctl.h
@@ -51,4 +51,4 @@ struct mmc_ioc_cmd {
* block device operations.
*/
#define MMC_IOC_MAX_BYTES (512L * 256)
-#endif /* LINUX_MMC_IOCTL_H */
+#endif /* LINUX_MMC_IOCTL_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index ac26a685cca..5a794cb503e 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -21,8 +21,8 @@
* 15 May 2002
*/
-#ifndef MMC_MMC_H
-#define MMC_MMC_H
+#ifndef LINUX_MMC_MMC_H
+#define LINUX_MMC_MMC_H
/* Standard MMC commands (4.1) type argument response */
/* class 1 */
@@ -140,6 +140,16 @@ static inline bool mmc_op_multi(u32 opcode)
#define R1_SWITCH_ERROR (1 << 7) /* sx, c */
#define R1_APP_CMD (1 << 5) /* sr, c */
+#define R1_STATE_IDLE 0
+#define R1_STATE_READY 1
+#define R1_STATE_IDENT 2
+#define R1_STATE_STBY 3
+#define R1_STATE_TRAN 4
+#define R1_STATE_DATA 5
+#define R1_STATE_RCV 6
+#define R1_STATE_PRG 7
+#define R1_STATE_DIS 8
+
/*
* MMC/SD in SPI mode reports R1 status always, and R2 for SEND_STATUS
* R1 is the low order byte; R2 is the next highest byte, when present.
@@ -327,5 +337,4 @@ struct _mmc_csd {
#define MMC_SWITCH_MODE_CLEAR_BITS 0x02 /* Clear bits which are 1 in value */
#define MMC_SWITCH_MODE_WRITE_BYTE 0x03 /* Set target to value */
-#endif /* MMC_MMC_PROTOCOL_H */
-
+#endif /* LINUX_MMC_MMC_H */
diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
index d37aac49cf9..4a139204c20 100644
--- a/include/linux/mmc/pm.h
+++ b/include/linux/mmc/pm.h
@@ -27,4 +27,4 @@ typedef unsigned int mmc_pm_flag_t;
#define MMC_PM_KEEP_POWER (1 << 0) /* preserve card power during suspend */
#define MMC_PM_WAKE_SDIO_IRQ (1 << 1) /* wake up host system on SDIO IRQ assertion */
-#endif
+#endif /* LINUX_MMC_PM_H */
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h
index 7d35d52c3df..1ebcf9ba125 100644
--- a/include/linux/mmc/sd.h
+++ b/include/linux/mmc/sd.h
@@ -9,8 +9,8 @@
* your option) any later version.
*/
-#ifndef MMC_SD_H
-#define MMC_SD_H
+#ifndef LINUX_MMC_SD_H
+#define LINUX_MMC_SD_H
/* SD commands type argument response */
/* class 0 */
@@ -91,5 +91,4 @@
#define SD_SWITCH_ACCESS_DEF 0
#define SD_SWITCH_ACCESS_HS 1
-#endif
-
+#endif /* LINUX_MMC_SD_H */
diff --git a/include/linux/mmc/sdhci-pltfm.h b/include/linux/mmc/sdhci-pltfm.h
deleted file mode 100644
index 548d59d404c..00000000000
--- a/include/linux/mmc/sdhci-pltfm.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Platform data declarations for the sdhci-pltfm driver.
- *
- * Copyright (c) 2010 MontaVista Software, LLC.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-#ifndef _SDHCI_PLTFM_H
-#define _SDHCI_PLTFM_H
-
-struct sdhci_ops;
-struct sdhci_host;
-
-/**
- * struct sdhci_pltfm_data - SDHCI platform-specific information & hooks
- * @ops: optional pointer to the platform-provided SDHCI ops
- * @quirks: optional SDHCI quirks
- * @init: optional hook that is called during device probe, before the
- * driver tries to access any SDHCI registers
- * @exit: optional hook that is called during device removal
- */
-struct sdhci_pltfm_data {
- struct sdhci_ops *ops;
- unsigned int quirks;
- int (*init)(struct sdhci_host *host, struct sdhci_pltfm_data *pdata);
- void (*exit)(struct sdhci_host *host);
-};
-
-#endif /* _SDHCI_PLTFM_H */
diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h
index 9188c973f3e..5cdc96da9dd 100644
--- a/include/linux/mmc/sdhci-spear.h
+++ b/include/linux/mmc/sdhci-spear.h
@@ -11,8 +11,8 @@
* warranty of any kind, whether express or implied.
*/
-#ifndef MMC_SDHCI_SPEAR_H
-#define MMC_SDHCI_SPEAR_H
+#ifndef LINUX_MMC_SDHCI_SPEAR_H
+#define LINUX_MMC_SDHCI_SPEAR_H
#include <linux/platform_device.h>
/*
@@ -39,4 +39,4 @@ sdhci_set_plat_data(struct platform_device *pdev, struct sdhci_plat_data *data)
pdev->dev.platform_data = data;
}
-#endif /* MMC_SDHCI_SPEAR_H */
+#endif /* LINUX_MMC_SDHCI_SPEAR_H */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 6a68c4eb4e4..5666f3abfab 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -8,8 +8,8 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
-#ifndef __SDHCI_H
-#define __SDHCI_H
+#ifndef LINUX_MMC_SDHCI_H
+#define LINUX_MMC_SDHCI_H
#include <linux/scatterlist.h>
#include <linux/compiler.h>
@@ -162,4 +162,4 @@ struct sdhci_host {
unsigned long private[0] ____cacheline_aligned;
};
-#endif /* __SDHCI_H */
+#endif /* LINUX_MMC_SDHCI_H */
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 245cdacee54..2a2e9905a24 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -9,8 +9,8 @@
* your option) any later version.
*/
-#ifndef MMC_SDIO_H
-#define MMC_SDIO_H
+#ifndef LINUX_MMC_SDIO_H
+#define LINUX_MMC_SDIO_H
/* SDIO commands type argument response */
#define SD_IO_SEND_OP_COND 5 /* bcr [23:0] OCR R4 */
@@ -161,5 +161,4 @@
#define SDIO_FBR_BLKSIZE 0x10 /* block size (2 bytes) */
-#endif
-
+#endif /* LINUX_MMC_SDIO_H */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index 31baaf82f45..50f0bc95232 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -9,8 +9,8 @@
* your option) any later version.
*/
-#ifndef MMC_SDIO_FUNC_H
-#define MMC_SDIO_FUNC_H
+#ifndef LINUX_MMC_SDIO_FUNC_H
+#define LINUX_MMC_SDIO_FUNC_H
#include <linux/device.h>
#include <linux/mod_devicetable.h>
@@ -161,5 +161,4 @@ extern void sdio_f0_writeb(struct sdio_func *func, unsigned char b,
extern mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func);
extern int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags);
-#endif
-
+#endif /* LINUX_MMC_SDIO_FUNC_H */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index a36ab3bc7b0..9f03feedc8e 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -2,8 +2,8 @@
* SDIO Classes, Interface Types, Manufacturer IDs, etc.
*/
-#ifndef MMC_SDIO_IDS_H
-#define MMC_SDIO_IDS_H
+#ifndef LINUX_MMC_SDIO_IDS_H
+#define LINUX_MMC_SDIO_IDS_H
/*
* Standard SDIO Function Interfaces
@@ -44,4 +44,4 @@
#define SDIO_DEVICE_ID_SIANO_NOVA_A0 0x1100
#define SDIO_DEVICE_ID_SIANO_STELLAR 0x5347
-#endif
+#endif /* LINUX_MMC_SDIO_IDS_H */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index 9eb9b4b96f5..0222cd8ebe7 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -11,8 +11,8 @@
*
*/
-#ifndef __SH_MMCIF_H__
-#define __SH_MMCIF_H__
+#ifndef LINUX_MMC_SH_MMCIF_H
+#define LINUX_MMC_SH_MMCIF_H
#include <linux/io.h>
#include <linux/platform_device.h>
@@ -220,4 +220,4 @@ static inline void sh_mmcif_boot_init(void __iomem *base)
sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000);
}
-#endif /* __SH_MMCIF_H__ */
+#endif /* LINUX_MMC_SH_MMCIF_H */
diff --git a/include/linux/mmc/sh_mobile_sdhi.h b/include/linux/mmc/sh_mobile_sdhi.h
index faf32b6ec18..bd50b365167 100644
--- a/include/linux/mmc/sh_mobile_sdhi.h
+++ b/include/linux/mmc/sh_mobile_sdhi.h
@@ -1,5 +1,5 @@
-#ifndef __SH_MOBILE_SDHI_H__
-#define __SH_MOBILE_SDHI_H__
+#ifndef LINUX_MMC_SH_MOBILE_SDHI_H
+#define LINUX_MMC_SH_MOBILE_SDHI_H
#include <linux/types.h>
@@ -17,4 +17,4 @@ struct sh_mobile_sdhi_info {
int (*get_cd)(struct platform_device *pdev);
};
-#endif /* __SH_MOBILE_SDHI_H__ */
+#endif /* LINUX_MMC_SH_MOBILE_SDHI_H */
diff --git a/include/linux/mmc/tmio.h b/include/linux/mmc/tmio.h
index 19490b942db..a1c1f321e51 100644
--- a/include/linux/mmc/tmio.h
+++ b/include/linux/mmc/tmio.h
@@ -12,8 +12,8 @@
*
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
*/
-#ifndef _LINUX_MMC_TMIO_H_
-#define _LINUX_MMC_TMIO_H_
+#ifndef LINUX_MMC_TMIO_H
+#define LINUX_MMC_TMIO_H
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
@@ -21,6 +21,7 @@
#define CTL_XFER_BLK_COUNT 0xa
#define CTL_RESPONSE 0x0c
#define CTL_STATUS 0x1c
+#define CTL_STATUS2 0x1e
#define CTL_IRQ_MASK 0x20
#define CTL_SD_CARD_CLK_CTL 0x24
#define CTL_SD_XFER_LEN 0x26
@@ -30,6 +31,7 @@
#define CTL_TRANSACTION_CTL 0x34
#define CTL_SDIO_STATUS 0x36
#define CTL_SDIO_IRQ_MASK 0x38
+#define CTL_DMA_ENABLE 0xd8
#define CTL_RESET_SD 0xe0
#define CTL_SDIO_REGS 0x100
#define CTL_CLK_AND_WAIT_CTL 0x138
@@ -60,4 +62,4 @@
#define TMIO_BBS 512 /* Boot block size */
-#endif /* _LINUX_MMC_TMIO_H_ */
+#endif /* LINUX_MMC_TMIO_H */
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 15da0e99f48..db4836bed51 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -155,12 +155,14 @@ struct ubi_device_info {
};
/*
- * enum - volume notification types.
- * @UBI_VOLUME_ADDED: volume has been added
- * @UBI_VOLUME_REMOVED: start volume volume
- * @UBI_VOLUME_RESIZED: volume size has been re-sized
- * @UBI_VOLUME_RENAMED: volume name has been re-named
- * @UBI_VOLUME_UPDATED: volume name has been updated
+ * Volume notification types.
+ * @UBI_VOLUME_ADDED: a volume has been added (an UBI device was attached or a
+ * volume was created)
+ * @UBI_VOLUME_REMOVED: a volume has been removed (an UBI device was detached
+ * or a volume was removed)
+ * @UBI_VOLUME_RESIZED: a volume has been re-sized
+ * @UBI_VOLUME_RENAMED: a volume has been re-named
+ * @UBI_VOLUME_UPDATED: data has been written to a volume
*
* These constants define which type of event has happened when a volume
* notification function is invoked.
diff --git a/include/linux/of.h b/include/linux/of.h
index bfc0ed1b0ce..bd716f8908d 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -195,6 +195,13 @@ extern struct device_node *of_find_node_with_property(
extern struct property *of_find_property(const struct device_node *np,
const char *name,
int *lenp);
+extern int of_property_read_u32_array(const struct device_node *np,
+ char *propname,
+ u32 *out_values,
+ size_t sz);
+
+extern int of_property_read_string(struct device_node *np, char *propname,
+ const char **out_string);
extern int of_device_is_compatible(const struct device_node *device,
const char *);
extern int of_device_is_available(const struct device_node *device);
@@ -227,12 +234,32 @@ extern void of_attach_node(struct device_node *);
extern void of_detach_node(struct device_node *);
#endif
-#else
+#else /* CONFIG_OF */
static inline bool of_have_populated_dt(void)
{
return false;
}
+static inline int of_property_read_u32_array(const struct device_node *np,
+ char *propname, u32 *out_values, size_t sz)
+{
+ return -ENOSYS;
+}
+
+static inline int of_property_read_string(struct device_node *np,
+ char *propname, const char **out_string)
+{
+ return -ENOSYS;
+}
+
#endif /* CONFIG_OF */
+
+static inline int of_property_read_u32(const struct device_node *np,
+ char *propname,
+ u32 *out_value)
+{
+ return of_property_read_u32_array(np, propname, out_value, 1);
+}
+
#endif /* _LINUX_OF_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 2feda6ee614..3118623c2c1 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -1,11 +1,16 @@
#ifndef __OF_ADDRESS_H
#define __OF_ADDRESS_H
#include <linux/ioport.h>
+#include <linux/errno.h>
#include <linux/of.h>
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
extern int of_address_to_resource(struct device_node *dev, int index,
struct resource *r);
+extern struct device_node *of_find_matching_node_by_address(
+ struct device_node *from,
+ const struct of_device_id *matches,
+ u64 base_address);
extern void __iomem *of_iomap(struct device_node *device, int index);
/* Extract an address from a device, returns the region size and
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 6598c04dab0..aec8025c786 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -46,8 +46,9 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc)
return container_of(gc, struct of_mm_gpio_chip, gc);
}
-extern int of_get_gpio_flags(struct device_node *np, int index,
- enum of_gpio_flags *flags);
+extern int of_get_named_gpio_flags(struct device_node *np,
+ const char *list_name, int index, enum of_gpio_flags *flags);
+
extern unsigned int of_gpio_count(struct device_node *np);
extern int of_mm_gpiochip_add(struct device_node *np,
@@ -60,8 +61,8 @@ extern struct gpio_chip *of_node_to_gpiochip(struct device_node *np);
#else /* CONFIG_OF_GPIO */
/* Drivers may not strictly depend on the GPIO support, so let them link. */
-static inline int of_get_gpio_flags(struct device_node *np, int index,
- enum of_gpio_flags *flags)
+static inline int of_get_named_gpio_flags(struct device_node *np,
+ const char *list_name, int index, enum of_gpio_flags *flags)
{
return -ENOSYS;
}
@@ -77,7 +78,38 @@ static inline void of_gpiochip_remove(struct gpio_chip *gc) { }
#endif /* CONFIG_OF_GPIO */
/**
- * of_get_gpio - Get a GPIO number to use with GPIO API
+ * of_get_gpio_flags() - Get a GPIO number and flags to use with GPIO API
+ * @np: device node to get GPIO from
+ * @index: index of the GPIO
+ * @flags: a flags pointer to fill in
+ *
+ * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * value on the error condition. If @flags is not NULL the function also fills
+ * in flags for the GPIO.
+ */
+static inline int of_get_gpio_flags(struct device_node *np, int index,
+ enum of_gpio_flags *flags)
+{
+ return of_get_named_gpio_flags(np, "gpios", index, flags);
+}
+
+/**
+ * of_get_named_gpio() - Get a GPIO number to use with GPIO API
+ * @np: device node to get GPIO from
+ * @propname: Name of property containing gpio specifier(s)
+ * @index: index of the GPIO
+ *
+ * Returns GPIO number to use with Linux generic GPIO API, or one of the errno
+ * value on the error condition.
+ */
+static inline int of_get_named_gpio(struct device_node *np,
+ const char *propname, int index)
+{
+ return of_get_named_gpio_flags(np, propname, index, NULL);
+}
+
+/**
+ * of_get_gpio() - Get a GPIO number to use with GPIO API
* @np: device node to get GPIO from
* @index: index of the GPIO
*
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 85a27b650d7..f93e21700d3 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -6,4 +6,9 @@
struct pci_dev;
struct of_irq;
int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
+
+struct device_node;
+struct device_node *of_pci_find_child_device(struct device_node *parent,
+ unsigned int devfn);
+
#endif
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index fb51ae38cea..5a6f458a4bb 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -20,6 +20,40 @@
#include <linux/platform_device.h>
/**
+ * struct of_dev_auxdata - lookup table entry for device names & platform_data
+ * @compatible: compatible value of node to match against node
+ * @phys_addr: Start address of registers to match against node
+ * @name: Name to assign for matching nodes
+ * @platform_data: platform_data to assign for matching nodes
+ *
+ * This lookup table allows the caller of of_platform_populate() to override
+ * the names of devices when creating devices from the device tree. The table
+ * should be terminated with an empty entry. It also allows the platform_data
+ * pointer to be set.
+ *
+ * The reason for this functionality is that some Linux infrastructure uses
+ * the device name to look up a specific device, but the Linux-specific names
+ * are not encoded into the device tree, so the kernel needs to provide specific
+ * values.
+ *
+ * Note: Using an auxdata lookup table should be considered a last resort when
+ * converting a platform to use the DT. Normally the automatically generated
+ * device name will not matter, and drivers should obtain data from the device
+ * node instead of from an anonymouns platform_data pointer.
+ */
+struct of_dev_auxdata {
+ char *compatible;
+ resource_size_t phys_addr;
+ char *name;
+ void *platform_data;
+};
+
+/* Macro to simplify populating a lookup table */
+#define OF_DEV_AUXDATA(_compat,_phys,_name,_pdata) \
+ { .compatible = _compat, .phys_addr = _phys, .name = _name, \
+ .platform_data = _pdata }
+
+/**
* of_platform_driver - Legacy of-aware driver for platform devices.
*
* An of_platform_driver driver is attached to a basic platform_device on
@@ -40,6 +74,8 @@ struct of_platform_driver
#define to_of_platform_driver(drv) \
container_of(drv,struct of_platform_driver, driver)
+extern const struct of_device_id of_default_bus_match_table[];
+
/* Platform drivers register/unregister */
extern struct platform_device *of_device_alloc(struct device_node *np,
const char *bus_id,
@@ -55,6 +91,10 @@ extern struct platform_device *of_platform_device_create(struct device_node *np,
extern int of_platform_bus_probe(struct device_node *root,
const struct of_device_id *matches,
struct device *parent);
+extern int of_platform_populate(struct device_node *root,
+ const struct of_device_id *matches,
+ const struct of_dev_auxdata *lookup,
+ struct device *parent);
#endif /* !CONFIG_SPARC */
#endif /* CONFIG_OF_DEVICE */
diff --git a/include/linux/opp.h b/include/linux/opp.h
index 5449945d589..7020e9736fc 100644
--- a/include/linux/opp.h
+++ b/include/linux/opp.h
@@ -94,12 +94,20 @@ static inline int opp_disable(struct device *dev, unsigned long freq)
#if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP)
int opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table);
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table);
#else
static inline int opp_init_cpufreq_table(struct device *dev,
struct cpufreq_frequency_table **table)
{
return -EINVAL;
}
+
+static inline
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+}
#endif /* CONFIG_CPU_FREQ */
#endif /* __LINUX_OPP_H__ */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c446b5ca2d3..2d292182dde 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1589,5 +1589,33 @@ int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt);
int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
unsigned int len, const char *kw);
+/* PCI <-> OF binding helpers */
+#ifdef CONFIG_OF
+struct device_node;
+extern void pci_set_of_node(struct pci_dev *dev);
+extern void pci_release_of_node(struct pci_dev *dev);
+extern void pci_set_bus_of_node(struct pci_bus *bus);
+extern void pci_release_bus_of_node(struct pci_bus *bus);
+
+/* Arch may override this (weak) */
+extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus);
+
+static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
+{
+ return pdev ? pdev->dev.of_node : NULL;
+}
+
+static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus)
+{
+ return bus ? bus->dev.of_node : NULL;
+}
+
+#else /* CONFIG_OF */
+static inline void pci_set_of_node(struct pci_dev *dev) { }
+static inline void pci_release_of_node(struct pci_dev *dev) { }
+static inline void pci_set_bus_of_node(struct pci_bus *bus) { }
+static inline void pci_release_bus_of_node(struct pci_bus *bus) { }
+#endif /* CONFIG_OF */
+
#endif /* __KERNEL__ */
#endif /* LINUX_PCI_H */
diff --git a/include/linux/platform_data/pxa_sdhci.h b/include/linux/platform_data/pxa_sdhci.h
new file mode 100644
index 00000000000..51ad0995aba
--- /dev/null
+++ b/include/linux/platform_data/pxa_sdhci.h
@@ -0,0 +1,60 @@
+/*
+ * include/linux/platform_data/pxa_sdhci.h
+ *
+ * Copyright 2010 Marvell
+ * Zhangfei Gao <zhangfei.gao@marvell.com>
+ *
+ * PXA Platform - SDHCI platform data definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _PXA_SDHCI_H_
+#define _PXA_SDHCI_H_
+
+/* pxa specific flag */
+/* Require clock free running */
+#define PXA_FLAG_ENABLE_CLOCK_GATING (1<<0)
+/* card always wired to host, like on-chip emmc */
+#define PXA_FLAG_CARD_PERMANENT (1<<1)
+/* Board design supports 8-bit data on SD/SDIO BUS */
+#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2)
+
+/*
+ * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI
+ * @flags: flags for platform requirement
+ * @clk_delay_cycles:
+ * mmp2: each step is roughly 100ps, 5bits width
+ * pxa910: each step is 1ns, 4bits width
+ * @clk_delay_sel: select clk_delay, used on pxa910
+ * 0: choose feedback clk
+ * 1: choose feedback clk + delay value
+ * 2: choose internal clk
+ * @clk_delay_enable: enable clk_delay or not, used on pxa910
+ * @ext_cd_gpio: gpio pin used for external CD line
+ * @ext_cd_gpio_invert: invert values for external CD gpio line
+ * @max_speed: the maximum speed supported
+ * @host_caps: Standard MMC host capabilities bit field.
+ * @quirks: quirks of platfrom
+ * @pm_caps: pm_caps of platfrom
+ */
+struct sdhci_pxa_platdata {
+ unsigned int flags;
+ unsigned int clk_delay_cycles;
+ unsigned int clk_delay_sel;
+ bool clk_delay_enable;
+ unsigned int ext_cd_gpio;
+ bool ext_cd_gpio_invert;
+ unsigned int max_speed;
+ unsigned int host_caps;
+ unsigned int quirks;
+ unsigned int pm_caps;
+};
+
+struct sdhci_pxa {
+ u8 clk_enable;
+ u8 power_mode;
+};
+#endif /* _PXA_SDHCI_H_ */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 411e4f4be52..f7c84c9abd3 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -461,8 +461,8 @@ struct dev_pm_info {
unsigned long active_jiffies;
unsigned long suspended_jiffies;
unsigned long accounting_timestamp;
- void *subsys_data; /* Owned by the subsystem. */
#endif
+ void *subsys_data; /* Owned by the subsystem. */
};
extern void update_pm_runtime_accounting(struct device *dev);
@@ -472,7 +472,7 @@ extern void update_pm_runtime_accounting(struct device *dev);
* hibernation, system resume and during runtime PM transitions along with
* subsystem-level and driver-level callbacks.
*/
-struct dev_power_domain {
+struct dev_pm_domain {
struct dev_pm_ops ops;
};
@@ -553,11 +553,17 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
extern int device_pm_wait_for_dev(struct device *sub, struct device *dev);
extern int pm_generic_prepare(struct device *dev);
+extern int pm_generic_suspend_noirq(struct device *dev);
extern int pm_generic_suspend(struct device *dev);
+extern int pm_generic_resume_noirq(struct device *dev);
extern int pm_generic_resume(struct device *dev);
+extern int pm_generic_freeze_noirq(struct device *dev);
extern int pm_generic_freeze(struct device *dev);
+extern int pm_generic_thaw_noirq(struct device *dev);
extern int pm_generic_thaw(struct device *dev);
+extern int pm_generic_restore_noirq(struct device *dev);
extern int pm_generic_restore(struct device *dev);
+extern int pm_generic_poweroff_noirq(struct device *dev);
extern int pm_generic_poweroff(struct device *dev);
extern void pm_generic_complete(struct device *dev);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
new file mode 100644
index 00000000000..21097cb086f
--- /dev/null
+++ b/include/linux/pm_domain.h
@@ -0,0 +1,108 @@
+/*
+ * pm_domain.h - Definitions and headers related to device power domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#ifndef _LINUX_PM_DOMAIN_H
+#define _LINUX_PM_DOMAIN_H
+
+#include <linux/device.h>
+
+enum gpd_status {
+ GPD_STATE_ACTIVE = 0, /* PM domain is active */
+ GPD_STATE_BUSY, /* Something is happening to the PM domain */
+ GPD_STATE_REPEAT, /* Power off in progress, to be repeated */
+ GPD_STATE_POWER_OFF, /* PM domain is off */
+};
+
+struct dev_power_governor {
+ bool (*power_down_ok)(struct dev_pm_domain *domain);
+};
+
+struct generic_pm_domain {
+ struct dev_pm_domain domain; /* PM domain operations */
+ struct list_head gpd_list_node; /* Node in the global PM domains list */
+ struct list_head sd_node; /* Node in the parent's subdomain list */
+ struct generic_pm_domain *parent; /* Parent PM domain */
+ struct list_head sd_list; /* List of dubdomains */
+ struct list_head dev_list; /* List of devices */
+ struct mutex lock;
+ struct dev_power_governor *gov;
+ struct work_struct power_off_work;
+ unsigned int in_progress; /* Number of devices being suspended now */
+ unsigned int sd_count; /* Number of subdomains with power "on" */
+ enum gpd_status status; /* Current state of the domain */
+ wait_queue_head_t status_wait_queue;
+ struct task_struct *poweroff_task; /* Powering off task */
+ unsigned int resume_count; /* Number of devices being resumed */
+ unsigned int device_count; /* Number of devices */
+ unsigned int suspended_count; /* System suspend device counter */
+ unsigned int prepared_count; /* Suspend counter of prepared devices */
+ bool suspend_power_off; /* Power status before system suspend */
+ int (*power_off)(struct generic_pm_domain *domain);
+ int (*power_on)(struct generic_pm_domain *domain);
+ int (*start_device)(struct device *dev);
+ int (*stop_device)(struct device *dev);
+ bool (*active_wakeup)(struct device *dev);
+};
+
+static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
+{
+ return container_of(pd, struct generic_pm_domain, domain);
+}
+
+struct dev_list_entry {
+ struct list_head node;
+ struct device *dev;
+ bool need_restore;
+};
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+extern int pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev);
+extern int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ struct device *dev);
+extern int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *new_subdomain);
+extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *target);
+extern void pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off);
+extern int pm_genpd_poweron(struct generic_pm_domain *genpd);
+extern void pm_genpd_poweroff_unused(void);
+extern void genpd_queue_power_off_work(struct generic_pm_domain *genpd);
+#else
+static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_remove_device(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *new_sd)
+{
+ return -ENOSYS;
+}
+static inline int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
+ struct generic_pm_domain *target)
+{
+ return -ENOSYS;
+}
+static inline void pm_genpd_init(struct generic_pm_domain *genpd,
+ struct dev_power_governor *gov, bool is_off) {}
+static inline int pm_genpd_poweron(struct generic_pm_domain *genpd)
+{
+ return -ENOSYS;
+}
+static inline void pm_genpd_poweroff_unused(void) {}
+static inline void genpd_queue_power_off_work(struct generic_pm_domain *gpd) {}
+#endif
+
+#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 878cf84baeb..daac05d751b 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -82,6 +82,11 @@ static inline bool pm_runtime_suspended(struct device *dev)
&& !dev->power.disable_depth;
}
+static inline bool pm_runtime_status_suspended(struct device *dev)
+{
+ return dev->power.runtime_status == RPM_SUSPENDED;
+}
+
static inline bool pm_runtime_enabled(struct device *dev)
{
return !dev->power.disable_depth;
@@ -130,6 +135,7 @@ static inline void pm_runtime_put_noidle(struct device *dev) {}
static inline bool device_run_wake(struct device *dev) { return false; }
static inline void device_set_run_wake(struct device *dev, bool enable) {}
static inline bool pm_runtime_suspended(struct device *dev) { return false; }
+static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
static inline bool pm_runtime_enabled(struct device *dev) { return false; }
static inline int pm_generic_runtime_idle(struct device *dev) { return 0; }
@@ -247,41 +253,41 @@ static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
struct pm_clk_notifier_block {
struct notifier_block nb;
- struct dev_power_domain *pwr_domain;
+ struct dev_pm_domain *pm_domain;
char *con_ids[];
};
-#ifdef CONFIG_PM_RUNTIME_CLK
-extern int pm_runtime_clk_init(struct device *dev);
-extern void pm_runtime_clk_destroy(struct device *dev);
-extern int pm_runtime_clk_add(struct device *dev, const char *con_id);
-extern void pm_runtime_clk_remove(struct device *dev, const char *con_id);
-extern int pm_runtime_clk_suspend(struct device *dev);
-extern int pm_runtime_clk_resume(struct device *dev);
+#ifdef CONFIG_PM_CLK
+extern int pm_clk_init(struct device *dev);
+extern void pm_clk_destroy(struct device *dev);
+extern int pm_clk_add(struct device *dev, const char *con_id);
+extern void pm_clk_remove(struct device *dev, const char *con_id);
+extern int pm_clk_suspend(struct device *dev);
+extern int pm_clk_resume(struct device *dev);
#else
-static inline int pm_runtime_clk_init(struct device *dev)
+static inline int pm_clk_init(struct device *dev)
{
return -EINVAL;
}
-static inline void pm_runtime_clk_destroy(struct device *dev)
+static inline void pm_clk_destroy(struct device *dev)
{
}
-static inline int pm_runtime_clk_add(struct device *dev, const char *con_id)
+static inline int pm_clk_add(struct device *dev, const char *con_id)
{
return -EINVAL;
}
-static inline void pm_runtime_clk_remove(struct device *dev, const char *con_id)
+static inline void pm_clk_remove(struct device *dev, const char *con_id)
{
}
-#define pm_runtime_clock_suspend NULL
-#define pm_runtime_clock_resume NULL
+#define pm_clk_suspend NULL
+#define pm_clk_resume NULL
#endif
#ifdef CONFIG_HAVE_CLK
-extern void pm_runtime_clk_add_notifier(struct bus_type *bus,
+extern void pm_clk_add_notifier(struct bus_type *bus,
struct pm_clk_notifier_block *clknb);
#else
-static inline void pm_runtime_clk_add_notifier(struct bus_type *bus,
+static inline void pm_clk_add_notifier(struct bus_type *bus,
struct pm_clk_notifier_block *clknb)
{
}
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 9178d5cc0b0..800f113bea6 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -47,6 +47,13 @@
#define PTRACE_GETREGSET 0x4204
#define PTRACE_SETREGSET 0x4205
+#define PTRACE_SEIZE 0x4206
+#define PTRACE_INTERRUPT 0x4207
+#define PTRACE_LISTEN 0x4208
+
+/* flags in @data for PTRACE_SEIZE */
+#define PTRACE_SEIZE_DEVEL 0x80000000 /* temp flag for development */
+
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
#define PTRACE_O_TRACEFORK 0x00000002
@@ -65,6 +72,7 @@
#define PTRACE_EVENT_EXEC 4
#define PTRACE_EVENT_VFORK_DONE 5
#define PTRACE_EVENT_EXIT 6
+#define PTRACE_EVENT_STOP 7
#include <asm/ptrace.h>
@@ -77,16 +85,22 @@
* flags. When the a task is stopped the ptracer owns task->ptrace.
*/
+#define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */
#define PT_PTRACED 0x00000001
#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */
#define PT_TRACESYSGOOD 0x00000004
#define PT_PTRACE_CAP 0x00000008 /* ptracer can follow suid-exec */
-#define PT_TRACE_FORK 0x00000010
-#define PT_TRACE_VFORK 0x00000020
-#define PT_TRACE_CLONE 0x00000040
-#define PT_TRACE_EXEC 0x00000080
-#define PT_TRACE_VFORK_DONE 0x00000100
-#define PT_TRACE_EXIT 0x00000200
+
+/* PT_TRACE_* event enable flags */
+#define PT_EVENT_FLAG_SHIFT 4
+#define PT_EVENT_FLAG(event) (1 << (PT_EVENT_FLAG_SHIFT + (event) - 1))
+
+#define PT_TRACE_FORK PT_EVENT_FLAG(PTRACE_EVENT_FORK)
+#define PT_TRACE_VFORK PT_EVENT_FLAG(PTRACE_EVENT_VFORK)
+#define PT_TRACE_CLONE PT_EVENT_FLAG(PTRACE_EVENT_CLONE)
+#define PT_TRACE_EXEC PT_EVENT_FLAG(PTRACE_EVENT_EXEC)
+#define PT_TRACE_VFORK_DONE PT_EVENT_FLAG(PTRACE_EVENT_VFORK_DONE)
+#define PT_TRACE_EXIT PT_EVENT_FLAG(PTRACE_EVENT_EXIT)
#define PT_TRACE_MASK 0x000003f4
@@ -105,7 +119,7 @@ extern long arch_ptrace(struct task_struct *child, long request,
extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
extern void ptrace_disable(struct task_struct *);
-extern int ptrace_check_attach(struct task_struct *task, int kill);
+extern int ptrace_check_attach(struct task_struct *task, bool ignore_state);
extern int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data);
extern void ptrace_notify(int exit_code);
@@ -122,7 +136,7 @@ extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
static inline int ptrace_reparented(struct task_struct *child)
{
- return child->real_parent != child->parent;
+ return !same_thread_group(child->real_parent, child->parent);
}
static inline void ptrace_unlink(struct task_struct *child)
@@ -137,36 +151,56 @@ int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
unsigned long data);
/**
- * task_ptrace - return %PT_* flags that apply to a task
- * @task: pointer to &task_struct in question
+ * ptrace_parent - return the task that is tracing the given task
+ * @task: task to consider
*
- * Returns the %PT_* flags that apply to @task.
+ * Returns %NULL if no one is tracing @task, or the &struct task_struct
+ * pointer to its tracer.
+ *
+ * Must called under rcu_read_lock(). The pointer returned might be kept
+ * live only by RCU. During exec, this may be called with task_lock() held
+ * on @task, still held from when check_unsafe_exec() was called.
*/
-static inline int task_ptrace(struct task_struct *task)
+static inline struct task_struct *ptrace_parent(struct task_struct *task)
{
- return task->ptrace;
+ if (unlikely(task->ptrace))
+ return rcu_dereference(task->parent);
+ return NULL;
+}
+
+/**
+ * ptrace_event_enabled - test whether a ptrace event is enabled
+ * @task: ptracee of interest
+ * @event: %PTRACE_EVENT_* to test
+ *
+ * Test whether @event is enabled for ptracee @task.
+ *
+ * Returns %true if @event is enabled, %false otherwise.
+ */
+static inline bool ptrace_event_enabled(struct task_struct *task, int event)
+{
+ return task->ptrace & PT_EVENT_FLAG(event);
}
/**
* ptrace_event - possibly stop for a ptrace event notification
- * @mask: %PT_* bit to check in @current->ptrace
- * @event: %PTRACE_EVENT_* value to report if @mask is set
+ * @event: %PTRACE_EVENT_* value to report
* @message: value for %PTRACE_GETEVENTMSG to return
*
- * This checks the @mask bit to see if ptrace wants stops for this event.
- * If so we stop, reporting @event and @message to the ptrace parent.
- *
- * Returns nonzero if we did a ptrace notification, zero if not.
+ * Check whether @event is enabled and, if so, report @event and @message
+ * to the ptrace parent.
*
* Called without locks.
*/
-static inline int ptrace_event(int mask, int event, unsigned long message)
+static inline void ptrace_event(int event, unsigned long message)
{
- if (mask && likely(!(current->ptrace & mask)))
- return 0;
- current->ptrace_message = message;
- ptrace_notify((event << 8) | SIGTRAP);
- return 1;
+ if (unlikely(ptrace_event_enabled(current, event))) {
+ current->ptrace_message = message;
+ ptrace_notify((event << 8) | SIGTRAP);
+ } else if (event == PTRACE_EVENT_EXEC && unlikely(current->ptrace)) {
+ /* legacy EXEC report via SIGTRAP */
+ send_sig(SIGTRAP, current, 0);
+ }
}
/**
@@ -183,16 +217,24 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
{
INIT_LIST_HEAD(&child->ptrace_entry);
INIT_LIST_HEAD(&child->ptraced);
- child->parent = child->real_parent;
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ atomic_set(&child->ptrace_bp_refcnt, 1);
+#endif
+ child->jobctl = 0;
child->ptrace = 0;
- if (unlikely(ptrace) && (current->ptrace & PT_PTRACED)) {
+ child->parent = child->real_parent;
+
+ if (unlikely(ptrace) && current->ptrace) {
child->ptrace = current->ptrace;
__ptrace_link(child, current->parent);
- }
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- atomic_set(&child->ptrace_bp_refcnt, 1);
-#endif
+ if (child->ptrace & PT_SEIZED)
+ task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
+ else
+ sigaddset(&child->pending.signal, SIGSTOP);
+
+ set_tsk_thread_flag(child, TIF_SIGPENDING);
+ }
}
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 496770a9648..f6ef727ee4f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -844,6 +844,7 @@ enum cpu_idle_type {
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
+#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
enum powersavings_balance_level {
POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
@@ -893,16 +894,21 @@ static inline int sd_power_saving_flags(void)
return 0;
}
-struct sched_group {
- struct sched_group *next; /* Must be a circular list */
+struct sched_group_power {
atomic_t ref;
-
/*
* CPU power of this group, SCHED_LOAD_SCALE being max power for a
* single CPU.
*/
- unsigned int cpu_power, cpu_power_orig;
+ unsigned int power, power_orig;
+};
+
+struct sched_group {
+ struct sched_group *next; /* Must be a circular list */
+ atomic_t ref;
+
unsigned int group_weight;
+ struct sched_group_power *sgp;
/*
* The CPUs this group covers.
@@ -1254,6 +1260,9 @@ struct task_struct {
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
char rcu_read_unlock_special;
+#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
+ int rcu_boosted;
+#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
struct list_head rcu_node_entry;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU
@@ -1283,7 +1292,7 @@ struct task_struct {
int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
- unsigned int group_stop; /* GROUP_STOP_*, siglock protected */
+ unsigned int jobctl; /* JOBCTL_*, siglock protected */
/* ??? */
unsigned int personality;
unsigned did_exec:1;
@@ -1804,15 +1813,34 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define used_math() tsk_used_math(current)
/*
- * task->group_stop flags
+ * task->jobctl flags
*/
-#define GROUP_STOP_SIGMASK 0xffff /* signr of the last group stop */
-#define GROUP_STOP_PENDING (1 << 16) /* task should stop for group stop */
-#define GROUP_STOP_CONSUME (1 << 17) /* consume group stop count */
-#define GROUP_STOP_TRAPPING (1 << 18) /* switching from STOPPED to TRACED */
-#define GROUP_STOP_DEQUEUED (1 << 19) /* stop signal dequeued */
-
-extern void task_clear_group_stop_pending(struct task_struct *task);
+#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
+
+#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
+#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
+#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
+#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
+#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
+#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
+#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
+
+#define JOBCTL_STOP_DEQUEUED (1 << JOBCTL_STOP_DEQUEUED_BIT)
+#define JOBCTL_STOP_PENDING (1 << JOBCTL_STOP_PENDING_BIT)
+#define JOBCTL_STOP_CONSUME (1 << JOBCTL_STOP_CONSUME_BIT)
+#define JOBCTL_TRAP_STOP (1 << JOBCTL_TRAP_STOP_BIT)
+#define JOBCTL_TRAP_NOTIFY (1 << JOBCTL_TRAP_NOTIFY_BIT)
+#define JOBCTL_TRAPPING (1 << JOBCTL_TRAPPING_BIT)
+#define JOBCTL_LISTENING (1 << JOBCTL_LISTENING_BIT)
+
+#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
+#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
+
+extern bool task_set_jobctl_pending(struct task_struct *task,
+ unsigned int mask);
+extern void task_clear_jobctl_trapping(struct task_struct *task);
+extern void task_clear_jobctl_pending(struct task_struct *task,
+ unsigned int mask);
#ifdef CONFIG_PREEMPT_RCU
@@ -2127,7 +2155,7 @@ static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, s
spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
return ret;
-}
+}
extern void block_all_signals(int (*notifier)(void *priv), void *priv,
sigset_t *mask);
@@ -2142,7 +2170,7 @@ extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
-extern int do_notify_parent(struct task_struct *, int);
+extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
@@ -2266,8 +2294,10 @@ static inline int get_nr_threads(struct task_struct *tsk)
return tsk->signal->nr_threads;
}
-/* de_thread depends on thread_group_leader not being a pid based check */
-#define thread_group_leader(p) (p == p->group_leader)
+static inline bool thread_group_leader(struct task_struct *p)
+{
+ return p->exit_signal >= 0;
+}
/* Do to the insanities of de_thread it is possible for a process
* to have the pid of the thread group leader without actually being
@@ -2300,11 +2330,6 @@ static inline int thread_group_empty(struct task_struct *p)
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
-static inline int task_detached(struct task_struct *p)
-{
- return p->exit_signal == -1;
-}
-
/*
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
diff --git a/include/linux/slab.h b/include/linux/slab.h
index ad4dd1c8d30..573c809c33d 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -134,6 +134,26 @@ unsigned int kmem_cache_size(struct kmem_cache *);
#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
/*
+ * Some archs want to perform DMA into kmalloc caches and need a guaranteed
+ * alignment larger than the alignment of a 64-bit integer.
+ * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
+ */
+#ifdef ARCH_DMA_MINALIGN
+#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
+#else
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#endif
+
+/*
+ * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
+ * Intended for arches that get misalignment faults even for 64 bit integer
+ * aligned buffers.
+ */
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
+/*
* Common kmalloc functions provided by all allocators
*/
void * __must_check __krealloc(const void *, size_t, gfp_t);
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 83203ae9390..d00e0bacda9 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -18,53 +18,25 @@
#include <trace/events/kmem.h>
/*
- * Enforce a minimum alignment for the kmalloc caches.
- * Usually, the kmalloc caches are cache_line_size() aligned, except when
- * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
- * Some archs want to perform DMA into kmalloc caches and need a guaranteed
- * alignment larger than the alignment of a 64-bit integer.
- * ARCH_KMALLOC_MINALIGN allows that.
- * Note that increasing this value may disable some debug features.
- */
-#ifdef ARCH_DMA_MINALIGN
-#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
-#else
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
-#endif
-
-#ifndef ARCH_SLAB_MINALIGN
-/*
- * Enforce a minimum alignment for all caches.
- * Intended for archs that get misalignment faults even for BYTES_PER_WORD
- * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
- * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
- * some debug features.
- */
-#define ARCH_SLAB_MINALIGN 0
-#endif
-
-/*
* struct kmem_cache
*
* manages a cache.
*/
struct kmem_cache {
-/* 1) per-cpu data, touched during every alloc/free */
- struct array_cache *array[NR_CPUS];
-/* 2) Cache tunables. Protected by cache_chain_mutex */
+/* 1) Cache tunables. Protected by cache_chain_mutex */
unsigned int batchcount;
unsigned int limit;
unsigned int shared;
unsigned int buffer_size;
u32 reciprocal_buffer_size;
-/* 3) touched by every alloc & free from the backend */
+/* 2) touched by every alloc & free from the backend */
unsigned int flags; /* constant flags */
unsigned int num; /* # of objs per slab */
-/* 4) cache_grow/shrink */
+/* 3) cache_grow/shrink */
/* order of pgs per slab (2^n) */
unsigned int gfporder;
@@ -80,11 +52,11 @@ struct kmem_cache {
/* constructor func */
void (*ctor)(void *obj);
-/* 5) cache creation/removal */
+/* 4) cache creation/removal */
const char *name;
struct list_head next;
-/* 6) statistics */
+/* 5) statistics */
#ifdef CONFIG_DEBUG_SLAB
unsigned long num_active;
unsigned long num_allocations;
@@ -111,16 +83,18 @@ struct kmem_cache {
int obj_size;
#endif /* CONFIG_DEBUG_SLAB */
+/* 6) per-cpu/per-node data, touched during every alloc/free */
/*
- * We put nodelists[] at the end of kmem_cache, because we want to size
- * this array to nr_node_ids slots instead of MAX_NUMNODES
+ * We put array[] at the end of kmem_cache, because we want to size
+ * this array to nr_cpu_ids slots instead of NR_CPUS
* (see kmem_cache_init())
- * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
- * is statically defined, so we reserve the max number of nodes.
+ * We still use [NR_CPUS] and not [1] or [0] because cache_cache
+ * is statically defined, so we reserve the max number of cpus.
*/
- struct kmem_list3 *nodelists[MAX_NUMNODES];
+ struct kmem_list3 **nodelists;
+ struct array_cache *array[NR_CPUS];
/*
- * Do not add fields after nodelists[]
+ * Do not add fields after array[]
*/
};
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 4382db09df4..0ec00b39d00 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -1,16 +1,6 @@
#ifndef __LINUX_SLOB_DEF_H
#define __LINUX_SLOB_DEF_H
-#ifdef ARCH_DMA_MINALIGN
-#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
-#else
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long)
-#endif
-
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
-#endif
-
void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index c8668d161dd..4b35c06dfbc 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -113,16 +113,6 @@ struct kmem_cache {
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
-#ifdef ARCH_DMA_MINALIGN
-#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
-#else
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
-#endif
-
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-#endif
-
/*
* Maximum kmalloc object size handled by SLUB. Larger object allocations
* are passed through to the page allocator. The page allocator "fastpath"
@@ -228,6 +218,19 @@ kmalloc_order(size_t size, gfp_t flags, unsigned int order)
return ret;
}
+/**
+ * Calling this on allocated memory will check that the memory
+ * is expected to be in use, and print warnings if not.
+ */
+#ifdef CONFIG_SLUB_DEBUG
+extern bool verify_mem_not_deleted(const void *x);
+#else
+static inline bool verify_mem_not_deleted(const void *x)
+{
+ return true;
+}
+#endif
+
#ifdef CONFIG_TRACING
extern void *
kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
diff --git a/include/linux/spi/74x164.h b/include/linux/spi/74x164.h
index d85c52f294a..0aa6acc7331 100644
--- a/include/linux/spi/74x164.h
+++ b/include/linux/spi/74x164.h
@@ -1,8 +1,6 @@
#ifndef LINUX_SPI_74X164_H
#define LINUX_SPI_74X164_H
-#define GEN_74X164_DRIVER_NAME "74x164"
-
struct gen_74x164_chip_platform_data {
/* number assigned to the first GPIO */
unsigned base;
diff --git a/include/linux/spi/mcp23s08.h b/include/linux/spi/mcp23s08.h
index c42cff8ca19..2d676d5aaa8 100644
--- a/include/linux/spi/mcp23s08.h
+++ b/include/linux/spi/mcp23s08.h
@@ -22,13 +22,4 @@ struct mcp23s08_platform_data {
* base to base+15 (or base+31 for s17 variant).
*/
unsigned base;
-
- void *context; /* param to setup/teardown */
-
- int (*setup)(struct spi_device *spi,
- int gpio, unsigned ngpio,
- void *context);
- int (*teardown)(struct spi_device *spi,
- int gpio, unsigned ngpio,
- void *context);
};
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 083ffea7ba1..e1e3742733b 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -92,6 +92,13 @@ typedef int __bitwise suspend_state_t;
* @enter() and @wake(), even if any of them fails. It is executed after
* a failing @prepare.
*
+ * @suspend_again: Returns whether the system should suspend again (true) or
+ * not (false). If the platform wants to poll sensors or execute some
+ * code during suspended without invoking userspace and most of devices,
+ * suspend_again callback is the place assuming that periodic-wakeup or
+ * alarm-wakeup is already setup. This allows to execute some codes while
+ * being kept suspended in the view of userland and devices.
+ *
* @end: Called by the PM core right after resuming devices, to indicate to
* the platform that the system has returned to the working state or
* the transition to the sleep state has been aborted.
@@ -113,6 +120,7 @@ struct platform_suspend_ops {
int (*enter)(suspend_state_t state);
void (*wake)(void);
void (*finish)(void);
+ bool (*suspend_again)(void);
void (*end)(void);
void (*recover)(void);
};
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index e95f5236611..a71a2927a6a 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -51,27 +51,12 @@
#include <linux/security.h>
struct linux_binprm;
-/**
- * tracehook_expect_breakpoints - guess if task memory might be touched
- * @task: current task, making a new mapping
- *
- * Return nonzero if @task is expected to want breakpoint insertion in
- * its memory at some point. A zero return is no guarantee it won't
- * be done, but this is a hint that it's known to be likely.
- *
- * May be called with @task->mm->mmap_sem held for writing.
- */
-static inline int tracehook_expect_breakpoints(struct task_struct *task)
-{
- return (task_ptrace(task) & PT_PTRACED) != 0;
-}
-
/*
* ptrace report for syscall entry and exit looks identical.
*/
static inline void ptrace_report_syscall(struct pt_regs *regs)
{
- int ptrace = task_ptrace(current);
+ int ptrace = current->ptrace;
if (!(ptrace & PT_PTRACED))
return;
@@ -145,229 +130,6 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
}
/**
- * tracehook_unsafe_exec - check for exec declared unsafe due to tracing
- * @task: current task doing exec
- *
- * Return %LSM_UNSAFE_* bits applied to an exec because of tracing.
- *
- * @task->signal->cred_guard_mutex is held by the caller through the do_execve().
- */
-static inline int tracehook_unsafe_exec(struct task_struct *task)
-{
- int unsafe = 0;
- int ptrace = task_ptrace(task);
- if (ptrace & PT_PTRACED) {
- if (ptrace & PT_PTRACE_CAP)
- unsafe |= LSM_UNSAFE_PTRACE_CAP;
- else
- unsafe |= LSM_UNSAFE_PTRACE;
- }
- return unsafe;
-}
-
-/**
- * tracehook_tracer_task - return the task that is tracing the given task
- * @tsk: task to consider
- *
- * Returns NULL if no one is tracing @task, or the &struct task_struct
- * pointer to its tracer.
- *
- * Must called under rcu_read_lock(). The pointer returned might be kept
- * live only by RCU. During exec, this may be called with task_lock()
- * held on @task, still held from when tracehook_unsafe_exec() was called.
- */
-static inline struct task_struct *tracehook_tracer_task(struct task_struct *tsk)
-{
- if (task_ptrace(tsk) & PT_PTRACED)
- return rcu_dereference(tsk->parent);
- return NULL;
-}
-
-/**
- * tracehook_report_exec - a successful exec was completed
- * @fmt: &struct linux_binfmt that performed the exec
- * @bprm: &struct linux_binprm containing exec details
- * @regs: user-mode register state
- *
- * An exec just completed, we are shortly going to return to user mode.
- * The freshly initialized register state can be seen and changed in @regs.
- * The name, file and other pointers in @bprm are still on hand to be
- * inspected, but will be freed as soon as this returns.
- *
- * Called with no locks, but with some kernel resources held live
- * and a reference on @fmt->module.
- */
-static inline void tracehook_report_exec(struct linux_binfmt *fmt,
- struct linux_binprm *bprm,
- struct pt_regs *regs)
-{
- if (!ptrace_event(PT_TRACE_EXEC, PTRACE_EVENT_EXEC, 0) &&
- unlikely(task_ptrace(current) & PT_PTRACED))
- send_sig(SIGTRAP, current, 0);
-}
-
-/**
- * tracehook_report_exit - task has begun to exit
- * @exit_code: pointer to value destined for @current->exit_code
- *
- * @exit_code points to the value passed to do_exit(), which tracing
- * might change here. This is almost the first thing in do_exit(),
- * before freeing any resources or setting the %PF_EXITING flag.
- *
- * Called with no locks held.
- */
-static inline void tracehook_report_exit(long *exit_code)
-{
- ptrace_event(PT_TRACE_EXIT, PTRACE_EVENT_EXIT, *exit_code);
-}
-
-/**
- * tracehook_prepare_clone - prepare for new child to be cloned
- * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
- *
- * This is called before a new user task is to be cloned.
- * Its return value will be passed to tracehook_finish_clone().
- *
- * Called with no locks held.
- */
-static inline int tracehook_prepare_clone(unsigned clone_flags)
-{
- if (clone_flags & CLONE_UNTRACED)
- return 0;
-
- if (clone_flags & CLONE_VFORK) {
- if (current->ptrace & PT_TRACE_VFORK)
- return PTRACE_EVENT_VFORK;
- } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
- if (current->ptrace & PT_TRACE_CLONE)
- return PTRACE_EVENT_CLONE;
- } else if (current->ptrace & PT_TRACE_FORK)
- return PTRACE_EVENT_FORK;
-
- return 0;
-}
-
-/**
- * tracehook_finish_clone - new child created and being attached
- * @child: new child task
- * @clone_flags: %CLONE_* flags from clone/fork/vfork system call
- * @trace: return value from tracehook_prepare_clone()
- *
- * This is called immediately after adding @child to its parent's children list.
- * The @trace value is that returned by tracehook_prepare_clone().
- *
- * Called with current's siglock and write_lock_irq(&tasklist_lock) held.
- */
-static inline void tracehook_finish_clone(struct task_struct *child,
- unsigned long clone_flags, int trace)
-{
- ptrace_init_task(child, (clone_flags & CLONE_PTRACE) || trace);
-}
-
-/**
- * tracehook_report_clone - in parent, new child is about to start running
- * @regs: parent's user register state
- * @clone_flags: flags from parent's system call
- * @pid: new child's PID in the parent's namespace
- * @child: new child task
- *
- * Called after a child is set up, but before it has been started running.
- * This is not a good place to block, because the child has not started
- * yet. Suspend the child here if desired, and then block in
- * tracehook_report_clone_complete(). This must prevent the child from
- * self-reaping if tracehook_report_clone_complete() uses the @child
- * pointer; otherwise it might have died and been released by the time
- * tracehook_report_clone_complete() is called.
- *
- * Called with no locks held, but the child cannot run until this returns.
- */
-static inline void tracehook_report_clone(struct pt_regs *regs,
- unsigned long clone_flags,
- pid_t pid, struct task_struct *child)
-{
- if (unlikely(task_ptrace(child))) {
- /*
- * It doesn't matter who attached/attaching to this
- * task, the pending SIGSTOP is right in any case.
- */
- sigaddset(&child->pending.signal, SIGSTOP);
- set_tsk_thread_flag(child, TIF_SIGPENDING);
- }
-}
-
-/**
- * tracehook_report_clone_complete - new child is running
- * @trace: return value from tracehook_prepare_clone()
- * @regs: parent's user register state
- * @clone_flags: flags from parent's system call
- * @pid: new child's PID in the parent's namespace
- * @child: child task, already running
- *
- * This is called just after the child has started running. This is
- * just before the clone/fork syscall returns, or blocks for vfork
- * child completion if @clone_flags has the %CLONE_VFORK bit set.
- * The @child pointer may be invalid if a self-reaping child died and
- * tracehook_report_clone() took no action to prevent it from self-reaping.
- *
- * Called with no locks held.
- */
-static inline void tracehook_report_clone_complete(int trace,
- struct pt_regs *regs,
- unsigned long clone_flags,
- pid_t pid,
- struct task_struct *child)
-{
- if (unlikely(trace))
- ptrace_event(0, trace, pid);
-}
-
-/**
- * tracehook_report_vfork_done - vfork parent's child has exited or exec'd
- * @child: child task, already running
- * @pid: new child's PID in the parent's namespace
- *
- * Called after a %CLONE_VFORK parent has waited for the child to complete.
- * The clone/vfork system call will return immediately after this.
- * The @child pointer may be invalid if a self-reaping child died and
- * tracehook_report_clone() took no action to prevent it from self-reaping.
- *
- * Called with no locks held.
- */
-static inline void tracehook_report_vfork_done(struct task_struct *child,
- pid_t pid)
-{
- ptrace_event(PT_TRACE_VFORK_DONE, PTRACE_EVENT_VFORK_DONE, pid);
-}
-
-/**
- * tracehook_prepare_release_task - task is being reaped, clean up tracing
- * @task: task in %EXIT_DEAD state
- *
- * This is called in release_task() just before @task gets finally reaped
- * and freed. This would be the ideal place to remove and clean up any
- * tracing-related state for @task.
- *
- * Called with no locks held.
- */
-static inline void tracehook_prepare_release_task(struct task_struct *task)
-{
-}
-
-/**
- * tracehook_finish_release_task - final tracing clean-up
- * @task: task in %EXIT_DEAD state
- *
- * This is called in release_task() when @task is being in the middle of
- * being reaped. After this, there must be no tracing entanglements.
- *
- * Called with write_lock_irq(&tasklist_lock) held.
- */
-static inline void tracehook_finish_release_task(struct task_struct *task)
-{
- ptrace_release_task(task);
-}
-
-/**
* tracehook_signal_handler - signal handler setup is complete
* @sig: number of signal being delivered
* @info: siginfo_t of signal being delivered
@@ -390,151 +152,6 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
ptrace_notify(SIGTRAP);
}
-/**
- * tracehook_consider_ignored_signal - suppress short-circuit of ignored signal
- * @task: task receiving the signal
- * @sig: signal number being sent
- *
- * Return zero iff tracing doesn't care to examine this ignored signal,
- * so it can short-circuit normal delivery and never even get queued.
- *
- * Called with @task->sighand->siglock held.
- */
-static inline int tracehook_consider_ignored_signal(struct task_struct *task,
- int sig)
-{
- return (task_ptrace(task) & PT_PTRACED) != 0;
-}
-
-/**
- * tracehook_consider_fatal_signal - suppress special handling of fatal signal
- * @task: task receiving the signal
- * @sig: signal number being sent
- *
- * Return nonzero to prevent special handling of this termination signal.
- * Normally handler for signal is %SIG_DFL. It can be %SIG_IGN if @sig is
- * ignored, in which case force_sig() is about to reset it to %SIG_DFL.
- * When this returns zero, this signal might cause a quick termination
- * that does not give the debugger a chance to intercept the signal.
- *
- * Called with or without @task->sighand->siglock held.
- */
-static inline int tracehook_consider_fatal_signal(struct task_struct *task,
- int sig)
-{
- return (task_ptrace(task) & PT_PTRACED) != 0;
-}
-
-/**
- * tracehook_force_sigpending - let tracing force signal_pending(current) on
- *
- * Called when recomputing our signal_pending() flag. Return nonzero
- * to force the signal_pending() flag on, so that tracehook_get_signal()
- * will be called before the next return to user mode.
- *
- * Called with @current->sighand->siglock held.
- */
-static inline int tracehook_force_sigpending(void)
-{
- return 0;
-}
-
-/**
- * tracehook_get_signal - deliver synthetic signal to traced task
- * @task: @current
- * @regs: task_pt_regs(@current)
- * @info: details of synthetic signal
- * @return_ka: sigaction for synthetic signal
- *
- * Return zero to check for a real pending signal normally.
- * Return -1 after releasing the siglock to repeat the check.
- * Return a signal number to induce an artificial signal delivery,
- * setting *@info and *@return_ka to specify its details and behavior.
- *
- * The @return_ka->sa_handler value controls the disposition of the
- * signal, no matter the signal number. For %SIG_DFL, the return value
- * is a representative signal to indicate the behavior (e.g. %SIGTERM
- * for death, %SIGQUIT for core dump, %SIGSTOP for job control stop,
- * %SIGTSTP for stop unless in an orphaned pgrp), but the signal number
- * reported will be @info->si_signo instead.
- *
- * Called with @task->sighand->siglock held, before dequeuing pending signals.
- */
-static inline int tracehook_get_signal(struct task_struct *task,
- struct pt_regs *regs,
- siginfo_t *info,
- struct k_sigaction *return_ka)
-{
- return 0;
-}
-
-/**
- * tracehook_finish_jctl - report about return from job control stop
- *
- * This is called by do_signal_stop() after wakeup.
- */
-static inline void tracehook_finish_jctl(void)
-{
-}
-
-#define DEATH_REAP -1
-#define DEATH_DELAYED_GROUP_LEADER -2
-
-/**
- * tracehook_notify_death - task is dead, ready to notify parent
- * @task: @current task now exiting
- * @death_cookie: value to pass to tracehook_report_death()
- * @group_dead: nonzero if this was the last thread in the group to die
- *
- * A return value >= 0 means call do_notify_parent() with that signal
- * number. Negative return value can be %DEATH_REAP to self-reap right
- * now, or %DEATH_DELAYED_GROUP_LEADER to a zombie without notifying our
- * parent. Note that a return value of 0 means a do_notify_parent() call
- * that sends no signal, but still wakes up a parent blocked in wait*().
- *
- * Called with write_lock_irq(&tasklist_lock) held.
- */
-static inline int tracehook_notify_death(struct task_struct *task,
- void **death_cookie, int group_dead)
-{
- if (task_detached(task))
- return task->ptrace ? SIGCHLD : DEATH_REAP;
-
- /*
- * If something other than our normal parent is ptracing us, then
- * send it a SIGCHLD instead of honoring exit_signal. exit_signal
- * only has special meaning to our real parent.
- */
- if (thread_group_empty(task) && !ptrace_reparented(task))
- return task->exit_signal;
-
- return task->ptrace ? SIGCHLD : DEATH_DELAYED_GROUP_LEADER;
-}
-
-/**
- * tracehook_report_death - task is dead and ready to be reaped
- * @task: @current task now exiting
- * @signal: return value from tracheook_notify_death()
- * @death_cookie: value passed back from tracehook_notify_death()
- * @group_dead: nonzero if this was the last thread in the group to die
- *
- * Thread has just become a zombie or is about to self-reap. If positive,
- * @signal is the signal number just sent to the parent (usually %SIGCHLD).
- * If @signal is %DEATH_REAP, this thread will self-reap. If @signal is
- * %DEATH_DELAYED_GROUP_LEADER, this is a delayed_group_leader() zombie.
- * The @death_cookie was passed back by tracehook_notify_death().
- *
- * If normal reaping is not inhibited, @task->exit_state might be changing
- * in parallel.
- *
- * Called without locks.
- */
-static inline void tracehook_report_death(struct task_struct *task,
- int signal, void *death_cookie,
- int group_dead)
-{
-}
-
#ifdef TIF_NOTIFY_RESUME
/**
* set_notify_resume - cause tracehook_notify_resume() to be called
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index f584aba78ca..2be2887c695 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -255,7 +255,7 @@ enum {
WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
- WQ_DYING = 1 << 6, /* internal: workqueue is dying */
+ WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
@@ -355,6 +355,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
struct delayed_work *work, unsigned long delay);
extern void flush_workqueue(struct workqueue_struct *wq);
+extern void drain_workqueue(struct workqueue_struct *wq);
extern void flush_scheduled_work(void);
extern int schedule_work(struct work_struct *work);
@@ -412,21 +413,6 @@ static inline bool __cancel_delayed_work(struct delayed_work *work)
return ret;
}
-/* Obsolete. use cancel_delayed_work_sync() */
-static inline __deprecated
-void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
- struct delayed_work *work)
-{
- cancel_delayed_work_sync(work);
-}
-
-/* Obsolete. use cancel_delayed_work_sync() */
-static inline __deprecated
-void cancel_rearming_delayed_work(struct delayed_work *work)
-{
- cancel_delayed_work_sync(work);
-}
-
#ifndef CONFIG_SMP
static inline long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
{
diff --git a/include/rdma/ib_pma.h b/include/rdma/ib_pma.h
new file mode 100644
index 00000000000..a5889f18807
--- /dev/null
+++ b/include/rdma/ib_pma.h
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#if !defined(IB_PMA_H)
+#define IB_PMA_H
+
+#include <rdma/ib_mad.h>
+
+/*
+ * PMA class portinfo capability mask bits
+ */
+#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
+#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
+#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
+
+#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
+#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
+#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
+#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
+#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
+#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
+
+struct ib_pma_mad {
+ struct ib_mad_hdr mad_hdr;
+ u8 reserved[40];
+ u8 data[192];
+} __packed;
+
+struct ib_pma_portsamplescontrol {
+ u8 opcode;
+ u8 port_select;
+ u8 tick;
+ u8 counter_width; /* resv: 7:3, counter width: 2:0 */
+ __be32 counter_mask0_9; /* 2, 10 3-bit fields */
+ __be16 counter_mask10_14; /* 1, 5 3-bit fields */
+ u8 sample_mechanisms;
+ u8 sample_status; /* only lower 2 bits */
+ __be64 option_mask;
+ __be64 vendor_mask;
+ __be32 sample_start;
+ __be32 sample_interval;
+ __be16 tag;
+ __be16 counter_select[15];
+ __be32 reserved1;
+ __be64 samples_only_option_mask;
+ __be32 reserved2[28];
+};
+
+struct ib_pma_portsamplesresult {
+ __be16 tag;
+ __be16 sample_status; /* only lower 2 bits */
+ __be32 counter[15];
+};
+
+struct ib_pma_portsamplesresult_ext {
+ __be16 tag;
+ __be16 sample_status; /* only lower 2 bits */
+ __be32 extended_width; /* only upper 2 bits */
+ __be64 counter[15];
+};
+
+struct ib_pma_portcounters {
+ u8 reserved;
+ u8 port_select;
+ __be16 counter_select;
+ __be16 symbol_error_counter;
+ u8 link_error_recovery_counter;
+ u8 link_downed_counter;
+ __be16 port_rcv_errors;
+ __be16 port_rcv_remphys_errors;
+ __be16 port_rcv_switch_relay_errors;
+ __be16 port_xmit_discards;
+ u8 port_xmit_constraint_errors;
+ u8 port_rcv_constraint_errors;
+ u8 reserved1;
+ u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
+ __be16 reserved2;
+ __be16 vl15_dropped;
+ __be32 port_xmit_data;
+ __be32 port_rcv_data;
+ __be32 port_xmit_packets;
+ __be32 port_rcv_packets;
+ __be32 port_xmit_wait;
+} __packed;
+
+
+#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
+#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
+#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
+#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
+#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
+#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
+#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
+#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
+#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
+#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
+#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
+#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
+#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
+
+struct ib_pma_portcounters_ext {
+ u8 reserved;
+ u8 port_select;
+ __be16 counter_select;
+ __be32 reserved1;
+ __be64 port_xmit_data;
+ __be64 port_rcv_data;
+ __be64 port_xmit_packets;
+ __be64 port_rcv_packets;
+ __be64 port_unicast_xmit_packets;
+ __be64 port_unicast_rcv_packets;
+ __be64 port_multicast_xmit_packets;
+ __be64 port_multicast_rcv_packets;
+} __packed;
+
+#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
+#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
+#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
+#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
+#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
+#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
+#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
+#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
+
+#endif /* IB_PMA_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 55cd0a0bc97..bf4306aea16 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -350,7 +350,8 @@ enum ib_event_type {
IB_EVENT_SRQ_ERR,
IB_EVENT_SRQ_LIMIT_REACHED,
IB_EVENT_QP_LAST_WQE_REACHED,
- IB_EVENT_CLIENT_REREGISTER
+ IB_EVENT_CLIENT_REREGISTER,
+ IB_EVENT_GID_CHANGE,
};
struct ib_event {
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 561ac99def5..27040653005 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -9,12 +9,13 @@
#include <net/sock.h>
#include <net/tcp.h>
-#define TARGET_CORE_MOD_VERSION "v4.0.0-rc7-ml"
+#define TARGET_CORE_MOD_VERSION "v4.1.0-rc1-ml"
#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
/* Used by transport_generic_allocate_iovecs() */
#define TRANSPORT_IOV_DATA_BUFFER 5
/* Maximum Number of LUNs per Target Portal Group */
+/* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
#define TRANSPORT_MAX_LUNS_PER_TPG 256
/*
* By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
@@ -99,6 +100,7 @@ enum transport_state_table {
TRANSPORT_FREE = 15,
TRANSPORT_NEW_CMD_MAP = 16,
TRANSPORT_FREE_CMD_INTR = 17,
+ TRANSPORT_COMPLETE_QF_WP = 18,
};
/* Used for struct se_cmd->se_cmd_flags */
@@ -108,27 +110,22 @@ enum se_cmd_flags_table {
SCF_EMULATED_TASK_SENSE = 0x00000004,
SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
- SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020,
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
- SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200,
SCF_SE_CMD_FAILED = 0x00000400,
SCF_SE_LUN_CMD = 0x00000800,
SCF_SE_ALLOW_EOO = 0x00001000,
- SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000,
SCF_SENT_CHECK_CONDITION = 0x00004000,
SCF_OVERFLOW_BIT = 0x00008000,
SCF_UNDERFLOW_BIT = 0x00010000,
SCF_SENT_DELAYED_TAS = 0x00020000,
SCF_ALUA_NON_OPTIMIZED = 0x00040000,
SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
- SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000,
- SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000,
+ SCF_UNUSED = 0x00100000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
- SCF_EMULATE_SYNC_CACHE = 0x00800000,
SCF_EMULATE_CDB_ASYNC = 0x01000000,
- SCF_EMULATE_SYNC_UNMAP = 0x02000000
+ SCF_EMULATE_QUEUE_FULL = 0x02000000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -205,11 +202,6 @@ typedef enum {
SCSI_INDEX_TYPE_MAX
} scsi_index_t;
-struct scsi_index_table {
- spinlock_t lock;
- u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
-} ____cacheline_aligned;
-
struct se_cmd;
struct t10_alua {
@@ -235,7 +227,7 @@ struct t10_alua_lu_gp {
atomic_t lu_gp_ref_cnt;
spinlock_t lu_gp_lock;
struct config_group lu_gp_group;
- struct list_head lu_gp_list;
+ struct list_head lu_gp_node;
struct list_head lu_gp_mem_list;
} ____cacheline_aligned;
@@ -291,10 +283,10 @@ struct t10_vpd {
} ____cacheline_aligned;
struct t10_wwn {
- unsigned char vendor[8];
- unsigned char model[16];
- unsigned char revision[4];
- unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN];
+ char vendor[8];
+ char model[16];
+ char revision[4];
+ char unit_serial[INQUIRY_VPD_SERIAL_LEN];
spinlock_t t10_vpd_lock;
struct se_subsystem_dev *t10_sub_dev;
struct config_group t10_wwn_group;
@@ -366,13 +358,13 @@ struct t10_reservation_ops {
int (*t10_pr_clear)(struct se_cmd *);
};
-struct t10_reservation_template {
+struct t10_reservation {
/* Reservation effects all target ports */
int pr_all_tg_pt;
/* Activate Persistence across Target Power Loss enabled
* for SCSI device */
int pr_aptpl_active;
- /* Used by struct t10_reservation_template->pr_aptpl_buf_len */
+ /* Used by struct t10_reservation->pr_aptpl_buf_len */
#define PR_APTPL_BUF_LEN 8192
u32 pr_aptpl_buf_len;
u32 pr_generation;
@@ -397,7 +389,7 @@ struct t10_reservation_template {
struct se_queue_req {
int state;
- void *cmd;
+ struct se_cmd *cmd;
struct list_head qr_list;
} ____cacheline_aligned;
@@ -408,64 +400,10 @@ struct se_queue_obj {
wait_queue_head_t thread_wq;
} ____cacheline_aligned;
-/*
- * Used one per struct se_cmd to hold all extra struct se_task
- * metadata. This structure is setup and allocated in
- * drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
- */
-struct se_transport_task {
- unsigned char *t_task_cdb;
- unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
- unsigned long long t_task_lba;
- int t_tasks_failed;
- int t_tasks_fua;
- bool t_tasks_bidi;
- u32 t_task_cdbs;
- u32 t_tasks_check;
- u32 t_tasks_no;
- u32 t_tasks_sectors;
- u32 t_tasks_se_num;
- u32 t_tasks_se_bidi_num;
- u32 t_tasks_sg_chained_no;
- atomic_t t_fe_count;
- atomic_t t_se_count;
- atomic_t t_task_cdbs_left;
- atomic_t t_task_cdbs_ex_left;
- atomic_t t_task_cdbs_timeout_left;
- atomic_t t_task_cdbs_sent;
- atomic_t t_transport_aborted;
- atomic_t t_transport_active;
- atomic_t t_transport_complete;
- atomic_t t_transport_queue_active;
- atomic_t t_transport_sent;
- atomic_t t_transport_stop;
- atomic_t t_transport_timeout;
- atomic_t transport_dev_active;
- atomic_t transport_lun_active;
- atomic_t transport_lun_fe_stop;
- atomic_t transport_lun_stop;
- spinlock_t t_state_lock;
- struct completion t_transport_stop_comp;
- struct completion transport_lun_fe_stop_comp;
- struct completion transport_lun_stop_comp;
- struct scatterlist *t_tasks_sg_chained;
- struct scatterlist t_tasks_sg_bounce;
- void *t_task_buf;
- /*
- * Used for pre-registered fabric SGL passthrough WRITE and READ
- * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
- * and other HW target mode fabric modules.
- */
- struct scatterlist *t_task_pt_sgl;
- struct list_head *t_mem_list;
- /* Used for BIDI READ */
- struct list_head *t_mem_bidi_list;
- struct list_head t_task_list;
-} ____cacheline_aligned;
-
struct se_task {
unsigned char task_sense;
struct scatterlist *task_sg;
+ u32 task_sg_nents;
struct scatterlist *task_sg_bidi;
u8 task_scsi_status;
u8 task_flags;
@@ -476,8 +414,6 @@ struct se_task {
u32 task_no;
u32 task_sectors;
u32 task_size;
- u32 task_sg_num;
- u32 task_sg_offset;
enum dma_data_direction task_data_direction;
struct se_cmd *task_se_cmd;
struct se_device *se_dev;
@@ -495,9 +431,6 @@ struct se_task {
struct list_head t_state_list;
} ____cacheline_aligned;
-#define TASK_CMD(task) ((task)->task_se_cmd)
-#define TASK_DEV(task) ((task)->se_dev)
-
struct se_cmd {
/* SAM response code being sent to initiator */
u8 scsi_status;
@@ -531,9 +464,10 @@ struct se_cmd {
atomic_t transport_sent;
/* Used for sense data */
void *sense_buffer;
- struct list_head se_delayed_list;
- struct list_head se_ordered_list;
- struct list_head se_lun_list;
+ struct list_head se_delayed_node;
+ struct list_head se_ordered_node;
+ struct list_head se_lun_node;
+ struct list_head se_qf_node;
struct se_device *se_dev;
struct se_dev_entry *se_deve;
struct se_device *se_obj_ptr;
@@ -542,18 +476,62 @@ struct se_cmd {
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
- /* t_task is setup to t_task_backstore in transport_init_se_cmd() */
- struct se_transport_task *t_task;
- struct se_transport_task t_task_backstore;
+ struct list_head se_queue_node;
struct target_core_fabric_ops *se_tfo;
int (*transport_emulate_cdb)(struct se_cmd *);
- void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
+ void (*transport_split_cdb)(unsigned long long, u32, unsigned char *);
void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
void (*transport_complete_callback)(struct se_cmd *);
-} ____cacheline_aligned;
+ int (*transport_qf_callback)(struct se_cmd *);
-#define T_TASK(cmd) ((cmd)->t_task)
-#define CMD_TFO(cmd) ((cmd)->se_tfo)
+ unsigned char *t_task_cdb;
+ unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
+ unsigned long long t_task_lba;
+ int t_tasks_failed;
+ int t_tasks_fua;
+ bool t_tasks_bidi;
+ u32 t_tasks_sg_chained_no;
+ atomic_t t_fe_count;
+ atomic_t t_se_count;
+ atomic_t t_task_cdbs_left;
+ atomic_t t_task_cdbs_ex_left;
+ atomic_t t_task_cdbs_timeout_left;
+ atomic_t t_task_cdbs_sent;
+ atomic_t t_transport_aborted;
+ atomic_t t_transport_active;
+ atomic_t t_transport_complete;
+ atomic_t t_transport_queue_active;
+ atomic_t t_transport_sent;
+ atomic_t t_transport_stop;
+ atomic_t t_transport_timeout;
+ atomic_t transport_dev_active;
+ atomic_t transport_lun_active;
+ atomic_t transport_lun_fe_stop;
+ atomic_t transport_lun_stop;
+ spinlock_t t_state_lock;
+ struct completion t_transport_stop_comp;
+ struct completion transport_lun_fe_stop_comp;
+ struct completion transport_lun_stop_comp;
+ struct scatterlist *t_tasks_sg_chained;
+
+ /*
+ * Used for pre-registered fabric SGL passthrough WRITE and READ
+ * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
+ * and other HW target mode fabric modules.
+ */
+ struct scatterlist *t_task_pt_sgl;
+ u32 t_task_pt_sgl_num;
+
+ struct scatterlist *t_data_sg;
+ unsigned int t_data_nents;
+ struct scatterlist *t_bidi_data_sg;
+ unsigned int t_bidi_data_nents;
+
+ /* Used for BIDI READ */
+ struct list_head t_task_list;
+ u32 t_task_list_num;
+
+} ____cacheline_aligned;
struct se_tmr_req {
/* Task Management function to be preformed */
@@ -617,9 +595,6 @@ struct se_session {
struct list_head sess_acl_list;
} ____cacheline_aligned;
-#define SE_SESS(cmd) ((cmd)->se_sess)
-#define SE_NODE_ACL(sess) ((sess)->se_node_acl)
-
struct se_device;
struct se_transform_info;
struct scatterlist;
@@ -640,8 +615,6 @@ struct se_lun_acl {
struct se_ml_stat_grps ml_stat_grps;
} ____cacheline_aligned;
-#define ML_STAT_GRPS(lacl) (&(lacl)->ml_stat_grps)
-
struct se_dev_entry {
bool def_pr_registered;
/* See transport_lunflags_table */
@@ -688,6 +661,8 @@ struct se_dev_attrib {
int emulate_reservations;
int emulate_alua;
int enforce_pr_isids;
+ int is_nonrot;
+ int emulate_rest_reord;
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
@@ -727,10 +702,10 @@ struct se_subsystem_dev {
/* T10 Inquiry and VPD WWN Information */
struct t10_wwn t10_wwn;
/* T10 SPC-2 + SPC-3 Reservations */
- struct t10_reservation_template t10_reservation;
+ struct t10_reservation t10_pr;
spinlock_t se_dev_lock;
void *se_dev_su_ptr;
- struct list_head g_se_dev_list;
+ struct list_head se_dev_node;
struct config_group se_dev_group;
/* For T10 Reservations */
struct config_group se_dev_pr_group;
@@ -738,11 +713,6 @@ struct se_subsystem_dev {
struct se_dev_stat_grps dev_stat_grps;
} ____cacheline_aligned;
-#define T10_ALUA(su_dev) (&(su_dev)->t10_alua)
-#define T10_RES(su_dev) (&(su_dev)->t10_reservation)
-#define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops)
-#define DEV_STAT_GRP(dev) (&(dev)->dev_stat_grps)
-
struct se_device {
/* Set to 1 if thread is NOT sleeping on thread_sem */
u8 thread_active;
@@ -780,11 +750,11 @@ struct se_device {
atomic_t dev_status_thr_count;
atomic_t dev_hoq_count;
atomic_t dev_ordered_sync;
+ atomic_t dev_qf_count;
struct se_obj dev_obj;
struct se_obj dev_access_obj;
struct se_obj dev_export_obj;
- struct se_queue_obj *dev_queue_obj;
- struct se_queue_obj *dev_status_queue_obj;
+ struct se_queue_obj dev_queue_obj;
spinlock_t delayed_cmd_lock;
spinlock_t ordered_cmd_lock;
spinlock_t execute_task_lock;
@@ -796,6 +766,7 @@ struct se_device {
spinlock_t dev_status_thr_lock;
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
+ spinlock_t qf_cmd_lock;
/* Used for legacy SPC-2 reservationsa */
struct se_node_acl *dev_reserved_node_acl;
/* Used for ALUA Logical Unit Group membership */
@@ -809,10 +780,12 @@ struct se_device {
struct task_struct *process_thread;
pid_t process_thread_pid;
struct task_struct *dev_mgmt_thread;
+ struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
struct list_head ordered_cmd_list;
struct list_head execute_task_list;
struct list_head state_task_list;
+ struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;
struct se_subsystem_dev *se_sub_dev;
@@ -824,11 +797,6 @@ struct se_device {
struct list_head g_se_dev_list;
} ____cacheline_aligned;
-#define SE_DEV(cmd) ((cmd)->se_lun->lun_se_dev)
-#define SU_DEV(dev) ((dev)->se_sub_dev)
-#define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib)
-#define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn)
-
struct se_hba {
u16 hba_tpgt;
u32 hba_id;
@@ -837,24 +805,17 @@ struct se_hba {
/* Virtual iSCSI devices attached. */
u32 dev_count;
u32 hba_index;
- atomic_t load_balance_queue;
- atomic_t left_queue_depth;
- /* Maximum queue depth the HBA can handle. */
- atomic_t max_queue_depth;
/* Pointer to transport specific host structure. */
void *hba_ptr;
/* Linked list for struct se_device */
struct list_head hba_dev_list;
- struct list_head hba_list;
+ struct list_head hba_node;
spinlock_t device_lock;
- spinlock_t hba_queue_lock;
struct config_group hba_group;
struct mutex hba_access_mutex;
struct se_subsystem_api *transport;
} ____cacheline_aligned;
-#define SE_HBA(dev) ((dev)->se_hba)
-
struct se_port_stat_grps {
struct config_group stat_group;
struct config_group scsi_port_group;
@@ -881,9 +842,6 @@ struct se_lun {
struct se_port_stat_grps port_stat_grps;
} ____cacheline_aligned;
-#define SE_LUN(cmd) ((cmd)->se_lun)
-#define PORT_STAT_GRP(lun) (&(lun)->port_stat_grps)
-
struct scsi_port_stats {
u64 cmd_pdus;
u64 tx_data_octets;
@@ -930,7 +888,7 @@ struct se_portal_group {
spinlock_t tpg_lun_lock;
/* Pointer to $FABRIC_MOD portal group */
void *se_tpg_fabric_ptr;
- struct list_head se_tpg_list;
+ struct list_head se_tpg_node;
/* linked list for initiator ACL list */
struct list_head acl_node_list;
struct se_lun *tpg_lun_list;
@@ -949,8 +907,6 @@ struct se_portal_group {
struct config_group tpg_param_group;
} ____cacheline_aligned;
-#define TPG_TFO(se_tpg) ((se_tpg)->se_tpg_tfo)
-
struct se_wwn {
struct target_fabric_configfs *wwn_tf;
struct config_group wwn_group;
@@ -958,28 +914,4 @@ struct se_wwn {
struct config_group fabric_stat_group;
} ____cacheline_aligned;
-struct se_global {
- u16 alua_lu_gps_counter;
- int g_sub_api_initialized;
- u32 in_shutdown;
- u32 alua_lu_gps_count;
- u32 g_hba_id_counter;
- struct config_group target_core_hbagroup;
- struct config_group alua_group;
- struct config_group alua_lu_gps_group;
- struct list_head g_lu_gps_list;
- struct list_head g_se_tpg_list;
- struct list_head g_hba_list;
- struct list_head g_se_dev_list;
- struct se_hba *g_lun0_hba;
- struct se_subsystem_dev *g_lun0_su_dev;
- struct se_device *g_lun0_dev;
- struct t10_alua_lu_gp *default_lu_gp;
- spinlock_t g_device_lock;
- spinlock_t hba_lock;
- spinlock_t se_tpg_lock;
- spinlock_t lu_gps_lock;
- spinlock_t plugin_class_lock;
-} ____cacheline_aligned;
-
#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
index 52b18a5752c..46571912086 100644
--- a/include/target/target_core_device.h
+++ b/include/target/target_core_device.h
@@ -1,8 +1,8 @@
#ifndef TARGET_CORE_DEVICE_H
#define TARGET_CORE_DEVICE_H
-extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32);
-extern int transport_get_lun_for_tmr(struct se_cmd *, u32);
+extern int transport_lookup_cmd_lun(struct se_cmd *, u32);
+extern int transport_lookup_tmr_lun(struct se_cmd *, u32);
extern struct se_dev_entry *core_get_se_deve_from_rtpi(
struct se_node_acl *, u16);
extern int core_free_device_list_for_node(struct se_node_acl *,
@@ -39,6 +39,8 @@ extern int se_dev_set_emulate_tas(struct se_device *, int);
extern int se_dev_set_emulate_tpu(struct se_device *, int);
extern int se_dev_set_emulate_tpws(struct se_device *, int);
extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
+extern int se_dev_set_is_nonrot(struct se_device *, int);
+extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
extern int se_dev_set_queue_depth(struct se_device *, u32);
extern int se_dev_set_max_sectors(struct se_device *, u32);
extern int se_dev_set_optimal_sectors(struct se_device *, u32);
diff --git a/include/target/target_core_fabric_ops.h b/include/target/target_core_fabric_ops.h
index 747e1404dca..2de8fe90759 100644
--- a/include/target/target_core_fabric_ops.h
+++ b/include/target/target_core_fabric_ops.h
@@ -39,17 +39,11 @@ struct target_core_fabric_ops {
*/
int (*new_cmd_map)(struct se_cmd *);
/*
- * Optional function pointer for TCM fabric modules that use
- * Linux/NET sockets to allocate struct iovec array to struct se_cmd
- */
- int (*alloc_cmd_iovecs)(struct se_cmd *);
- /*
* Optional to release struct se_cmd and fabric dependent allocated
* I/O descriptor in transport_cmd_check_stop()
*/
void (*check_stop_free)(struct se_cmd *);
- void (*release_cmd_to_pool)(struct se_cmd *);
- void (*release_cmd_direct)(struct se_cmd *);
+ void (*release_cmd)(struct se_cmd *);
/*
* Called with spin_lock_bh(struct se_portal_group->session_lock held.
*/
@@ -70,7 +64,6 @@ struct target_core_fabric_ops {
void (*set_default_node_attributes)(struct se_node_acl *);
u32 (*get_task_tag)(struct se_cmd *);
int (*get_cmd_state)(struct se_cmd *);
- void (*new_cmd_failure)(struct se_cmd *);
int (*queue_data_in)(struct se_cmd *);
int (*queue_status)(struct se_cmd *);
int (*queue_tm_rsp)(struct se_cmd *);
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 24a1c6cb83c..46aae4f94ed 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -101,6 +101,10 @@
#define DA_ENFORCE_PR_ISIDS 1
#define DA_STATUS_MAX_SECTORS_MIN 16
#define DA_STATUS_MAX_SECTORS_MAX 8192
+/* By default don't report non-rotating (solid state) medium */
+#define DA_IS_NONROT 0
+/* Queue Algorithm Modifier default for restricted reordering in control mode page */
+#define DA_EMULATE_REST_REORD 0
#define SE_MODE_PAGE_BUF 512
@@ -111,9 +115,8 @@ struct se_subsystem_api;
extern struct kmem_cache *se_mem_cache;
-extern int init_se_global(void);
-extern void release_se_global(void);
-extern void init_scsi_index_table(void);
+extern int init_se_kmem_caches(void);
+extern void release_se_kmem_caches(void);
extern u32 scsi_get_new_index(scsi_index_t);
extern void transport_init_queue_obj(struct se_queue_obj *);
extern int transport_subsystem_check_init(void);
@@ -160,36 +163,38 @@ extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
struct se_subsystem_dev *, u32,
void *, struct se_dev_limits *,
const char *, const char *);
-extern void transport_device_setup_cmd(struct se_cmd *);
extern void transport_init_se_cmd(struct se_cmd *,
struct target_core_fabric_ops *,
struct se_session *, u32, int, int,
unsigned char *);
+void *transport_kmap_first_data_page(struct se_cmd *cmd);
+void transport_kunmap_first_data_page(struct se_cmd *cmd);
extern void transport_free_se_cmd(struct se_cmd *);
extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
extern int transport_generic_handle_cdb(struct se_cmd *);
+extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern void transport_generic_free_cmd_intr(struct se_cmd *);
extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
-extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
struct scatterlist *, u32);
extern int transport_clear_lun_from_sessions(struct se_lun *);
extern int transport_check_aborted_status(struct se_cmd *, int);
extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
extern void transport_send_task_abort(struct se_cmd *);
-extern void transport_release_cmd_to_pool(struct se_cmd *);
-extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
+extern void transport_release_cmd(struct se_cmd *);
+extern void transport_generic_free_cmd(struct se_cmd *, int, int);
extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
-extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32);
+extern int transport_init_task_sg(struct se_task *, struct se_mem *, u32);
extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
- void *, struct se_mem *,
+ struct scatterlist *, struct se_mem *,
struct se_mem **, u32 *, u32 *);
extern void transport_do_task_sg_chain(struct se_cmd *);
extern void transport_generic_process_write(struct se_cmd *);
+extern int transport_generic_new_cmd(struct se_cmd *);
extern int transport_generic_do_tmr(struct se_cmd *);
/* From target_core_alua.c */
extern int core_alua_check_nonop_delay(struct se_cmd *);
@@ -235,13 +240,13 @@ struct se_subsystem_api {
*/
int (*cdb_none)(struct se_task *);
/*
- * For SCF_SCSI_CONTROL_NONSG_IO_CDB
+ * For SCF_SCSI_DATA_SG_IO_CDB
*/
- int (*map_task_non_SG)(struct se_task *);
+ int (*map_data_SG)(struct se_task *);
/*
- * For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB
+ * For SCF_SCSI_CONTROL_SG_IO_CDB
*/
- int (*map_task_SG)(struct se_task *);
+ int (*map_control_SG)(struct se_task *);
/*
* attach_hba():
*/
@@ -292,7 +297,7 @@ struct se_subsystem_api {
* drivers. Provided out of convenience.
*/
int (*transport_complete)(struct se_task *task);
- struct se_task *(*alloc_task)(struct se_cmd *);
+ struct se_task *(*alloc_task)(unsigned char *cdb);
/*
* do_task():
*/
@@ -342,19 +347,9 @@ struct se_subsystem_api {
*/
sector_t (*get_blocks)(struct se_device *);
/*
- * do_se_mem_map():
- */
- int (*do_se_mem_map)(struct se_task *, struct list_head *, void *,
- struct se_mem *, struct se_mem **, u32 *, u32 *);
- /*
* get_sense_buffer():
*/
unsigned char *(*get_sense_buffer)(struct se_task *);
} ____cacheline_aligned;
-#define TRANSPORT(dev) ((dev)->transport)
-#define HBA_TRANSPORT(hba) ((hba)->transport)
-
-extern struct se_global *se_global;
-
#endif /* TARGET_CORE_TRANSPORT_H */
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index a2b22f01a51..4076ed72afb 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -23,3 +23,13 @@ void balloon_set_new_target(unsigned long target);
int alloc_xenballooned_pages(int nr_pages, struct page** pages);
void free_xenballooned_pages(int nr_pages, struct page** pages);
+
+struct sys_device;
+#ifdef CONFIG_XEN_SELFBALLOONING
+extern int register_xen_selfballooning(struct sys_device *sysdev);
+#else
+static inline int register_xen_selfballooning(struct sys_device *sysdev)
+{
+ return -ENOSYS;
+}
+#endif
diff --git a/include/xen/events.h b/include/xen/events.h
index 9af21e19545..d287997d3ea 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -74,8 +74,6 @@ int xen_set_callback_via(uint64_t via);
void xen_evtchn_do_upcall(struct pt_regs *regs);
void xen_hvm_evtchn_do_upcall(void);
-/* Allocate a pirq for a physical interrupt, given a gsi. */
-int xen_allocate_pirq_gsi(unsigned gsi);
/* Bind a pirq for a physical interrupt to an irq. */
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
unsigned pirq, int shareable, char *name);
diff --git a/include/xen/hvc-console.h b/include/xen/hvc-console.h
index c3adde32669..901724dc528 100644
--- a/include/xen/hvc-console.h
+++ b/include/xen/hvc-console.h
@@ -6,11 +6,13 @@ extern struct console xenboot_console;
#ifdef CONFIG_HVC_XEN
void xen_console_resume(void);
void xen_raw_console_write(const char *str);
+__attribute__((format(printf, 1, 2)))
void xen_raw_printk(const char *fmt, ...);
#else
static inline void xen_console_resume(void) { }
static inline void xen_raw_console_write(const char *str) { }
-static inline void xen_raw_printk(const char *fmt, ...) { }
+static inline __attribute__((format(printf, 1, 2)))
+void xen_raw_printk(const char *fmt, ...) { }
#endif
#endif /* XEN_HVC_CONSOLE_H */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 70213b4515e..6acd9cefd51 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -450,6 +450,45 @@ struct start_info {
int8_t cmd_line[MAX_GUEST_CMDLINE];
};
+struct dom0_vga_console_info {
+ uint8_t video_type;
+#define XEN_VGATYPE_TEXT_MODE_3 0x03
+#define XEN_VGATYPE_VESA_LFB 0x23
+
+ union {
+ struct {
+ /* Font height, in pixels. */
+ uint16_t font_height;
+ /* Cursor location (column, row). */
+ uint16_t cursor_x, cursor_y;
+ /* Number of rows and columns (dimensions in characters). */
+ uint16_t rows, columns;
+ } text_mode_3;
+
+ struct {
+ /* Width and height, in pixels. */
+ uint16_t width, height;
+ /* Bytes per scan line. */
+ uint16_t bytes_per_line;
+ /* Bits per pixel. */
+ uint16_t bits_per_pixel;
+ /* LFB physical address, and size (in units of 64kB). */
+ uint32_t lfb_base;
+ uint32_t lfb_size;
+ /* RGB mask offsets and sizes, as defined by VBE 1.2+ */
+ uint8_t red_pos, red_size;
+ uint8_t green_pos, green_size;
+ uint8_t blue_pos, blue_size;
+ uint8_t rsvd_pos, rsvd_size;
+
+ /* VESA capabilities (offset 0xa, VESA command 0x4f00). */
+ uint32_t gbl_caps;
+ /* Mode attributes (offset 0x0, VESA command 0x4f01). */
+ uint16_t mode_attrs;
+ } vesa_lfb;
+ } u;
+};
+
/* These flags are passed in the 'flags' field of start_info_t. */
#define SIF_PRIVILEGED (1<<0) /* Is the domain privileged? */
#define SIF_INITDOMAIN (1<<1) /* Is this the initial control domain? */
diff --git a/include/xen/tmem.h b/include/xen/tmem.h
new file mode 100644
index 00000000000..82e2c83a32f
--- /dev/null
+++ b/include/xen/tmem.h
@@ -0,0 +1,5 @@
+#ifndef _XEN_TMEM_H
+#define _XEN_TMEM_H
+/* defined in drivers/xen/tmem.c */
+extern int tmem_enabled;
+#endif /* _XEN_TMEM_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 5467369e088..aceeca799fd 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -223,7 +223,9 @@ int xenbus_free_evtchn(struct xenbus_device *dev, int port);
enum xenbus_state xenbus_read_driver_state(const char *path);
+__attribute__((format(printf, 3, 4)))
void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...);
+__attribute__((format(printf, 3, 4)))
void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...);
const char *xenbus_strstate(enum xenbus_state state);
diff --git a/kernel/exit.c b/kernel/exit.c
index f2b321bae44..73bb192a3d3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -169,7 +169,6 @@ void release_task(struct task_struct * p)
struct task_struct *leader;
int zap_leader;
repeat:
- tracehook_prepare_release_task(p);
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
rcu_read_lock();
@@ -179,7 +178,7 @@ repeat:
proc_flush_task(p);
write_lock_irq(&tasklist_lock);
- tracehook_finish_release_task(p);
+ ptrace_release_task(p);
__exit_signal(p);
/*
@@ -190,22 +189,12 @@ repeat:
zap_leader = 0;
leader = p->group_leader;
if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
- BUG_ON(task_detached(leader));
- do_notify_parent(leader, leader->exit_signal);
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
- *
- * do_notify_parent() will have marked it self-reaping in
- * that case.
- */
- zap_leader = task_detached(leader);
-
- /*
- * This maintains the invariant that release_task()
- * only runs on a task in EXIT_DEAD, just for sanity.
*/
+ zap_leader = do_notify_parent(leader, leader->exit_signal);
if (zap_leader)
leader->exit_state = EXIT_DEAD;
}
@@ -277,18 +266,16 @@ int is_current_pgrp_orphaned(void)
return retval;
}
-static int has_stopped_jobs(struct pid *pgrp)
+static bool has_stopped_jobs(struct pid *pgrp)
{
- int retval = 0;
struct task_struct *p;
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
- if (!task_is_stopped(p))
- continue;
- retval = 1;
- break;
+ if (p->signal->flags & SIGNAL_STOP_STOPPED)
+ return true;
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
- return retval;
+
+ return false;
}
/*
@@ -751,7 +738,7 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
{
list_move_tail(&p->sibling, &p->real_parent->children);
- if (task_detached(p))
+ if (p->exit_state == EXIT_DEAD)
return;
/*
* If this is a threaded reparent there is no need to
@@ -764,10 +751,9 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
p->exit_signal = SIGCHLD;
/* If it has exited notify the new parent about this child's death. */
- if (!task_ptrace(p) &&
+ if (!p->ptrace &&
p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
- do_notify_parent(p, p->exit_signal);
- if (task_detached(p)) {
+ if (do_notify_parent(p, p->exit_signal)) {
p->exit_state = EXIT_DEAD;
list_move_tail(&p->sibling, dead);
}
@@ -794,7 +780,7 @@ static void forget_original_parent(struct task_struct *father)
do {
t->real_parent = reaper;
if (t->parent == father) {
- BUG_ON(task_ptrace(t));
+ BUG_ON(t->ptrace);
t->parent = t->real_parent;
}
if (t->pdeath_signal)
@@ -819,8 +805,7 @@ static void forget_original_parent(struct task_struct *father)
*/
static void exit_notify(struct task_struct *tsk, int group_dead)
{
- int signal;
- void *cookie;
+ bool autoreap;
/*
* This does two things:
@@ -851,26 +836,33 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
* we have changed execution domain as these two values started
* the same after a fork.
*/
- if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
+ if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
(tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
tsk->self_exec_id != tsk->parent_exec_id))
tsk->exit_signal = SIGCHLD;
- signal = tracehook_notify_death(tsk, &cookie, group_dead);
- if (signal >= 0)
- signal = do_notify_parent(tsk, signal);
+ if (unlikely(tsk->ptrace)) {
+ int sig = thread_group_leader(tsk) &&
+ thread_group_empty(tsk) &&
+ !ptrace_reparented(tsk) ?
+ tsk->exit_signal : SIGCHLD;
+ autoreap = do_notify_parent(tsk, sig);
+ } else if (thread_group_leader(tsk)) {
+ autoreap = thread_group_empty(tsk) &&
+ do_notify_parent(tsk, tsk->exit_signal);
+ } else {
+ autoreap = true;
+ }
- tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
+ tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
/* mt-exec, de_thread() is waiting for group leader */
if (unlikely(tsk->signal->notify_count < 0))
wake_up_process(tsk->signal->group_exit_task);
write_unlock_irq(&tasklist_lock);
- tracehook_report_death(tsk, signal, cookie, group_dead);
-
/* If the process is dead, release it - nobody will wait for it */
- if (signal == DEATH_REAP)
+ if (autoreap)
release_task(tsk);
}
@@ -923,7 +915,7 @@ NORET_TYPE void do_exit(long code)
*/
set_fs(USER_DS);
- tracehook_report_exit(&code);
+ ptrace_event(PTRACE_EVENT_EXIT, code);
validate_creds_for_do_exit(tsk);
@@ -1235,9 +1227,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
traced = ptrace_reparented(p);
/*
* It can be ptraced but not reparented, check
- * !task_detached() to filter out sub-threads.
+ * thread_group_leader() to filter out sub-threads.
*/
- if (likely(!traced) && likely(!task_detached(p))) {
+ if (likely(!traced) && thread_group_leader(p)) {
struct signal_struct *psig;
struct signal_struct *sig;
unsigned long maxrss;
@@ -1345,16 +1337,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
/* We dropped tasklist, ptracer could die and untrace */
ptrace_unlink(p);
/*
- * If this is not a detached task, notify the parent.
- * If it's still not detached after that, don't release
- * it now.
+ * If this is not a sub-thread, notify the parent.
+ * If parent wants a zombie, don't release it now.
*/
- if (!task_detached(p)) {
- do_notify_parent(p, p->exit_signal);
- if (!task_detached(p)) {
- p->exit_state = EXIT_ZOMBIE;
- p = NULL;
- }
+ if (thread_group_leader(p) &&
+ !do_notify_parent(p, p->exit_signal)) {
+ p->exit_state = EXIT_ZOMBIE;
+ p = NULL;
}
write_unlock_irq(&tasklist_lock);
}
@@ -1367,7 +1356,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
static int *task_stopped_code(struct task_struct *p, bool ptrace)
{
if (ptrace) {
- if (task_is_stopped_or_traced(p))
+ if (task_is_stopped_or_traced(p) &&
+ !(p->jobctl & JOBCTL_LISTENING))
return &p->exit_code;
} else {
if (p->signal->flags & SIGNAL_STOP_STOPPED)
@@ -1563,7 +1553,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
* Notification and reaping will be cascaded to the real
* parent when the ptracer detaches.
*/
- if (likely(!ptrace) && unlikely(task_ptrace(p))) {
+ if (likely(!ptrace) && unlikely(p->ptrace)) {
/* it will become visible, clear notask_error */
wo->notask_error = 0;
return 0;
@@ -1606,8 +1596,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
* own children, it should create a separate process which
* takes the role of real parent.
*/
- if (likely(!ptrace) && task_ptrace(p) &&
- same_thread_group(p->parent, p->real_parent))
+ if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
return 0;
/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 0276c30401a..4d4117e0150 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -37,7 +37,6 @@
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
-#include <linux/tracehook.h>
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/kthread.h>
@@ -1340,7 +1339,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
}
if (likely(p->pid)) {
- tracehook_finish_clone(p, clone_flags, trace);
+ ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
if (thread_group_leader(p)) {
if (is_child_reaper(pid))
@@ -1481,10 +1480,22 @@ long do_fork(unsigned long clone_flags,
}
/*
- * When called from kernel_thread, don't do user tracing stuff.
+ * Determine whether and which event to report to ptracer. When
+ * called from kernel_thread or CLONE_UNTRACED is explicitly
+ * requested, no event is reported; otherwise, report if the event
+ * for the type of forking is enabled.
*/
- if (likely(user_mode(regs)))
- trace = tracehook_prepare_clone(clone_flags);
+ if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
+ if (clone_flags & CLONE_VFORK)
+ trace = PTRACE_EVENT_VFORK;
+ else if ((clone_flags & CSIGNAL) != SIGCHLD)
+ trace = PTRACE_EVENT_CLONE;
+ else
+ trace = PTRACE_EVENT_FORK;
+
+ if (likely(!ptrace_event_enabled(current, trace)))
+ trace = 0;
+ }
p = copy_process(clone_flags, stack_start, regs, stack_size,
child_tidptr, NULL, trace);
@@ -1508,26 +1519,26 @@ long do_fork(unsigned long clone_flags,
}
audit_finish_fork(p);
- tracehook_report_clone(regs, clone_flags, nr, p);
/*
* We set PF_STARTING at creation in case tracing wants to
* use this to distinguish a fully live task from one that
- * hasn't gotten to tracehook_report_clone() yet. Now we
- * clear it and set the child going.
+ * hasn't finished SIGSTOP raising yet. Now we clear it
+ * and set the child going.
*/
p->flags &= ~PF_STARTING;
wake_up_new_task(p);
- tracehook_report_clone_complete(trace, regs,
- clone_flags, nr, p);
+ /* forking complete and child started to run, tell ptracer */
+ if (unlikely(trace))
+ ptrace_event(trace, nr);
if (clone_flags & CLONE_VFORK) {
freezer_do_not_count();
wait_for_completion(&vfork);
freezer_count();
- tracehook_report_vfork_done(p, nr);
+ ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
}
} else {
nr = PTR_ERR(p);
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 87f4d24b55b..7b856b3458d 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -224,6 +224,10 @@ config PM_OPP
implementations a ready to use framework to manage OPPs.
For more information, read <file:Documentation/power/opp.txt>
-config PM_RUNTIME_CLK
+config PM_CLK
def_bool y
- depends on PM_RUNTIME && HAVE_CLK
+ depends on PM && HAVE_CLK
+
+config PM_GENERIC_DOMAINS
+ bool
+ depends on PM
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 2981af4ce7c..6c601f87196 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -37,8 +37,9 @@ EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int pm_notifier_call_chain(unsigned long val)
{
- return (blocking_notifier_call_chain(&pm_chain_head, val, NULL)
- == NOTIFY_BAD) ? -EINVAL : 0;
+ int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
+
+ return notifier_to_errno(ret);
}
/* If set, devices may be suspended and resumed asynchronously. */
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 1c41ba21541..b6b71ad2208 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -44,6 +44,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
suspend_ops = ops;
mutex_unlock(&pm_mutex);
}
+EXPORT_SYMBOL_GPL(suspend_set_ops);
bool valid_state(suspend_state_t state)
{
@@ -65,6 +66,7 @@ int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
+EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static int suspend_test(int level)
{
@@ -126,12 +128,13 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
}
/**
- * suspend_enter - enter the desired system sleep state.
- * @state: state to enter
+ * suspend_enter - enter the desired system sleep state.
+ * @state: State to enter
+ * @wakeup: Returns information that suspend should not be entered again.
*
- * This function should be called after devices have been suspended.
+ * This function should be called after devices have been suspended.
*/
-static int suspend_enter(suspend_state_t state)
+static int suspend_enter(suspend_state_t state, bool *wakeup)
{
int error;
@@ -165,7 +168,8 @@ static int suspend_enter(suspend_state_t state)
error = syscore_suspend();
if (!error) {
- if (!(suspend_test(TEST_CORE) || pm_wakeup_pending())) {
+ *wakeup = pm_wakeup_pending();
+ if (!(suspend_test(TEST_CORE) || *wakeup)) {
error = suspend_ops->enter(state);
events_check_enabled = false;
}
@@ -199,6 +203,7 @@ static int suspend_enter(suspend_state_t state)
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
+ bool wakeup = false;
if (!suspend_ops)
return -ENOSYS;
@@ -220,7 +225,10 @@ int suspend_devices_and_enter(suspend_state_t state)
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
- error = suspend_enter(state);
+ do {
+ error = suspend_enter(state, &wakeup);
+ } while (!error && !wakeup
+ && suspend_ops->suspend_again && suspend_ops->suspend_again());
Resume_devices:
suspend_test_start();
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 2df115790cd..9de3ecfd20f 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -23,8 +23,15 @@
#include <linux/uaccess.h>
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
+#include <linux/cn_proc.h>
+static int ptrace_trapping_sleep_fn(void *flags)
+{
+ schedule();
+ return 0;
+}
+
/*
* ptrace a task: make the debugger its new parent and
* move it to the ptrace list.
@@ -77,13 +84,20 @@ void __ptrace_unlink(struct task_struct *child)
spin_lock(&child->sighand->siglock);
/*
- * Reinstate GROUP_STOP_PENDING if group stop is in effect and
+ * Clear all pending traps and TRAPPING. TRAPPING should be
+ * cleared regardless of JOBCTL_STOP_PENDING. Do it explicitly.
+ */
+ task_clear_jobctl_pending(child, JOBCTL_TRAP_MASK);
+ task_clear_jobctl_trapping(child);
+
+ /*
+ * Reinstate JOBCTL_STOP_PENDING if group stop is in effect and
* @child isn't dead.
*/
if (!(child->flags & PF_EXITING) &&
(child->signal->flags & SIGNAL_STOP_STOPPED ||
child->signal->group_stop_count))
- child->group_stop |= GROUP_STOP_PENDING;
+ child->jobctl |= JOBCTL_STOP_PENDING;
/*
* If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
@@ -91,16 +105,30 @@ void __ptrace_unlink(struct task_struct *child)
* is in TASK_TRACED; otherwise, we might unduly disrupt
* TASK_KILLABLE sleeps.
*/
- if (child->group_stop & GROUP_STOP_PENDING || task_is_traced(child))
+ if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
signal_wake_up(child, task_is_traced(child));
spin_unlock(&child->sighand->siglock);
}
-/*
- * Check that we have indeed attached to the thing..
+/**
+ * ptrace_check_attach - check whether ptracee is ready for ptrace operation
+ * @child: ptracee to check for
+ * @ignore_state: don't check whether @child is currently %TASK_TRACED
+ *
+ * Check whether @child is being ptraced by %current and ready for further
+ * ptrace operations. If @ignore_state is %false, @child also should be in
+ * %TASK_TRACED state and on return the child is guaranteed to be traced
+ * and not executing. If @ignore_state is %true, @child can be in any
+ * state.
+ *
+ * CONTEXT:
+ * Grabs and releases tasklist_lock and @child->sighand->siglock.
+ *
+ * RETURNS:
+ * 0 on success, -ESRCH if %child is not ready.
*/
-int ptrace_check_attach(struct task_struct *child, int kill)
+int ptrace_check_attach(struct task_struct *child, bool ignore_state)
{
int ret = -ESRCH;
@@ -119,13 +147,14 @@ int ptrace_check_attach(struct task_struct *child, int kill)
*/
spin_lock_irq(&child->sighand->siglock);
WARN_ON_ONCE(task_is_stopped(child));
- if (task_is_traced(child) || kill)
+ if (ignore_state || (task_is_traced(child) &&
+ !(child->jobctl & JOBCTL_LISTENING)))
ret = 0;
spin_unlock_irq(&child->sighand->siglock);
}
read_unlock(&tasklist_lock);
- if (!ret && !kill)
+ if (!ret && !ignore_state)
ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
/* All systems go.. */
@@ -182,11 +211,28 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
return !err;
}
-static int ptrace_attach(struct task_struct *task)
+static int ptrace_attach(struct task_struct *task, long request,
+ unsigned long flags)
{
- bool wait_trap = false;
+ bool seize = (request == PTRACE_SEIZE);
int retval;
+ /*
+ * SEIZE will enable new ptrace behaviors which will be implemented
+ * gradually. SEIZE_DEVEL is used to prevent applications
+ * expecting full SEIZE behaviors trapping on kernel commits which
+ * are still in the process of implementing them.
+ *
+ * Only test programs for new ptrace behaviors being implemented
+ * should set SEIZE_DEVEL. If unset, SEIZE will fail with -EIO.
+ *
+ * Once SEIZE behaviors are completely implemented, this flag and
+ * the following test will be removed.
+ */
+ retval = -EIO;
+ if (seize && !(flags & PTRACE_SEIZE_DEVEL))
+ goto out;
+
audit_ptrace(task);
retval = -EPERM;
@@ -218,16 +264,21 @@ static int ptrace_attach(struct task_struct *task)
goto unlock_tasklist;
task->ptrace = PT_PTRACED;
+ if (seize)
+ task->ptrace |= PT_SEIZED;
if (task_ns_capable(task, CAP_SYS_PTRACE))
task->ptrace |= PT_PTRACE_CAP;
__ptrace_link(task, current);
- send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
+
+ /* SEIZE doesn't trap tracee on attach */
+ if (!seize)
+ send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
spin_lock(&task->sighand->siglock);
/*
- * If the task is already STOPPED, set GROUP_STOP_PENDING and
+ * If the task is already STOPPED, set JOBCTL_TRAP_STOP and
* TRAPPING, and kick it so that it transits to TRACED. TRAPPING
* will be cleared if the child completes the transition or any
* event which clears the group stop states happens. We'll wait
@@ -243,11 +294,9 @@ static int ptrace_attach(struct task_struct *task)
* The following task_is_stopped() test is safe as both transitions
* in and out of STOPPED are protected by siglock.
*/
- if (task_is_stopped(task)) {
- task->group_stop |= GROUP_STOP_PENDING | GROUP_STOP_TRAPPING;
+ if (task_is_stopped(task) &&
+ task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
signal_wake_up(task, 1);
- wait_trap = true;
- }
spin_unlock(&task->sighand->siglock);
@@ -257,9 +306,12 @@ unlock_tasklist:
unlock_creds:
mutex_unlock(&task->signal->cred_guard_mutex);
out:
- if (wait_trap)
- wait_event(current->signal->wait_chldexit,
- !(task->group_stop & GROUP_STOP_TRAPPING));
+ if (!retval) {
+ wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
+ ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
+ proc_ptrace_connector(task, PTRACE_ATTACH);
+ }
+
return retval;
}
@@ -322,25 +374,27 @@ static int ignoring_children(struct sighand_struct *sigh)
*/
static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
{
+ bool dead;
+
__ptrace_unlink(p);
- if (p->exit_state == EXIT_ZOMBIE) {
- if (!task_detached(p) && thread_group_empty(p)) {
- if (!same_thread_group(p->real_parent, tracer))
- do_notify_parent(p, p->exit_signal);
- else if (ignoring_children(tracer->sighand)) {
- __wake_up_parent(p, tracer);
- p->exit_signal = -1;
- }
- }
- if (task_detached(p)) {
- /* Mark it as in the process of being reaped. */
- p->exit_state = EXIT_DEAD;
- return true;
+ if (p->exit_state != EXIT_ZOMBIE)
+ return false;
+
+ dead = !thread_group_leader(p);
+
+ if (!dead && thread_group_empty(p)) {
+ if (!same_thread_group(p->real_parent, tracer))
+ dead = do_notify_parent(p, p->exit_signal);
+ else if (ignoring_children(tracer->sighand)) {
+ __wake_up_parent(p, tracer);
+ dead = true;
}
}
-
- return false;
+ /* Mark it as in the process of being reaped. */
+ if (dead)
+ p->exit_state = EXIT_DEAD;
+ return dead;
}
static int ptrace_detach(struct task_struct *child, unsigned int data)
@@ -365,6 +419,7 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
}
write_unlock_irq(&tasklist_lock);
+ proc_ptrace_connector(child, PTRACE_DETACH);
if (unlikely(dead))
release_task(child);
@@ -611,10 +666,12 @@ static int ptrace_regset(struct task_struct *task, int req, unsigned int type,
int ptrace_request(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
+ bool seized = child->ptrace & PT_SEIZED;
int ret = -EIO;
- siginfo_t siginfo;
+ siginfo_t siginfo, *si;
void __user *datavp = (void __user *) data;
unsigned long __user *datalp = datavp;
+ unsigned long flags;
switch (request) {
case PTRACE_PEEKTEXT:
@@ -647,6 +704,62 @@ int ptrace_request(struct task_struct *child, long request,
ret = ptrace_setsiginfo(child, &siginfo);
break;
+ case PTRACE_INTERRUPT:
+ /*
+ * Stop tracee without any side-effect on signal or job
+ * control. At least one trap is guaranteed to happen
+ * after this request. If @child is already trapped, the
+ * current trap is not disturbed and another trap will
+ * happen after the current trap is ended with PTRACE_CONT.
+ *
+ * The actual trap might not be PTRACE_EVENT_STOP trap but
+ * the pending condition is cleared regardless.
+ */
+ if (unlikely(!seized || !lock_task_sighand(child, &flags)))
+ break;
+
+ /*
+ * INTERRUPT doesn't disturb existing trap sans one
+ * exception. If ptracer issued LISTEN for the current
+ * STOP, this INTERRUPT should clear LISTEN and re-trap
+ * tracee into STOP.
+ */
+ if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
+ signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
+
+ unlock_task_sighand(child, &flags);
+ ret = 0;
+ break;
+
+ case PTRACE_LISTEN:
+ /*
+ * Listen for events. Tracee must be in STOP. It's not
+ * resumed per-se but is not considered to be in TRACED by
+ * wait(2) or ptrace(2). If an async event (e.g. group
+ * stop state change) happens, tracee will enter STOP trap
+ * again. Alternatively, ptracer can issue INTERRUPT to
+ * finish listening and re-trap tracee into STOP.
+ */
+ if (unlikely(!seized || !lock_task_sighand(child, &flags)))
+ break;
+
+ si = child->last_siginfo;
+ if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP))
+ break;
+
+ child->jobctl |= JOBCTL_LISTENING;
+
+ /*
+ * If NOTIFY is set, it means event happened between start
+ * of this trap and now. Trigger re-trap immediately.
+ */
+ if (child->jobctl & JOBCTL_TRAP_NOTIFY)
+ signal_wake_up(child, true);
+
+ unlock_task_sighand(child, &flags);
+ ret = 0;
+ break;
+
case PTRACE_DETACH: /* detach a process that was attached. */
ret = ptrace_detach(child, data);
break;
@@ -761,8 +874,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
goto out;
}
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+ ret = ptrace_attach(child, request, data);
/*
* Some architectures need to do book-keeping after
* a ptrace attach.
@@ -772,7 +885,8 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
goto out_put_task_struct;
}
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ ret = ptrace_check_attach(child, request == PTRACE_KILL ||
+ request == PTRACE_INTERRUPT);
if (ret < 0)
goto out_put_task_struct;
@@ -903,8 +1017,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
goto out;
}
- if (request == PTRACE_ATTACH) {
- ret = ptrace_attach(child);
+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
+ ret = ptrace_attach(child, request, data);
/*
* Some architectures need to do book-keeping after
* a ptrace attach.
@@ -914,7 +1028,8 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
goto out_put_task_struct;
}
- ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ ret = ptrace_check_attach(child, request == PTRACE_KILL ||
+ request == PTRACE_INTERRUPT);
if (!ret)
ret = compat_arch_ptrace(child, request, addr, data);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 75113cb7c4f..8aafbb80b8b 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -68,6 +68,7 @@ struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
static struct rcu_state *rcu_state = &rcu_preempt_state;
+static void rcu_read_unlock_special(struct task_struct *t);
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
/*
@@ -147,7 +148,7 @@ static void rcu_preempt_note_context_switch(int cpu)
struct rcu_data *rdp;
struct rcu_node *rnp;
- if (t->rcu_read_lock_nesting &&
+ if (t->rcu_read_lock_nesting > 0 &&
(t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
/* Possibly blocking in an RCU read-side critical section. */
@@ -190,6 +191,14 @@ static void rcu_preempt_note_context_switch(int cpu)
rnp->gp_tasks = &t->rcu_node_entry;
}
raw_spin_unlock_irqrestore(&rnp->lock, flags);
+ } else if (t->rcu_read_lock_nesting < 0 &&
+ t->rcu_read_unlock_special) {
+
+ /*
+ * Complete exit from RCU read-side critical section on
+ * behalf of preempted instance of __rcu_read_unlock().
+ */
+ rcu_read_unlock_special(t);
}
/*
@@ -284,7 +293,7 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
* notify RCU core processing or task having blocked during the RCU
* read-side critical section.
*/
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
{
int empty;
int empty_exp;
@@ -309,7 +318,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
}
/* Hardware IRQ handlers cannot block. */
- if (in_irq()) {
+ if (in_irq() || in_serving_softirq()) {
local_irq_restore(flags);
return;
}
@@ -342,6 +351,11 @@ static void rcu_read_unlock_special(struct task_struct *t)
#ifdef CONFIG_RCU_BOOST
if (&t->rcu_node_entry == rnp->boost_tasks)
rnp->boost_tasks = np;
+ /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
+ if (t->rcu_boosted) {
+ special |= RCU_READ_UNLOCK_BOOSTED;
+ t->rcu_boosted = 0;
+ }
#endif /* #ifdef CONFIG_RCU_BOOST */
t->rcu_blocked_node = NULL;
@@ -358,7 +372,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
#ifdef CONFIG_RCU_BOOST
/* Unboost if we were boosted. */
if (special & RCU_READ_UNLOCK_BOOSTED) {
- t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
rt_mutex_unlock(t->rcu_boost_mutex);
t->rcu_boost_mutex = NULL;
}
@@ -387,13 +400,22 @@ void __rcu_read_unlock(void)
struct task_struct *t = current;
barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
- --t->rcu_read_lock_nesting;
- barrier(); /* decrement before load of ->rcu_read_unlock_special */
- if (t->rcu_read_lock_nesting == 0 &&
- unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
- rcu_read_unlock_special(t);
+ if (t->rcu_read_lock_nesting != 1)
+ --t->rcu_read_lock_nesting;
+ else {
+ t->rcu_read_lock_nesting = INT_MIN;
+ barrier(); /* assign before ->rcu_read_unlock_special load */
+ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+ rcu_read_unlock_special(t);
+ barrier(); /* ->rcu_read_unlock_special load before assign */
+ t->rcu_read_lock_nesting = 0;
+ }
#ifdef CONFIG_PROVE_LOCKING
- WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
+ {
+ int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+
+ WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+ }
#endif /* #ifdef CONFIG_PROVE_LOCKING */
}
EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -589,7 +611,8 @@ static void rcu_preempt_check_callbacks(int cpu)
rcu_preempt_qs(cpu);
return;
}
- if (per_cpu(rcu_preempt_data, cpu).qs_pending)
+ if (t->rcu_read_lock_nesting > 0 &&
+ per_cpu(rcu_preempt_data, cpu).qs_pending)
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
}
@@ -695,9 +718,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
raw_spin_lock_irqsave(&rnp->lock, flags);
for (;;) {
- if (!sync_rcu_preempt_exp_done(rnp))
+ if (!sync_rcu_preempt_exp_done(rnp)) {
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
break;
+ }
if (rnp->parent == NULL) {
+ raw_spin_unlock_irqrestore(&rnp->lock, flags);
wake_up(&sync_rcu_preempt_exp_wq);
break;
}
@@ -707,7 +733,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
raw_spin_lock(&rnp->lock); /* irqs already disabled */
rnp->expmask &= ~mask;
}
- raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
/*
@@ -1174,7 +1199,7 @@ static int rcu_boost(struct rcu_node *rnp)
t = container_of(tb, struct task_struct, rcu_node_entry);
rt_mutex_init_proxy_locked(&mtx, t);
t->rcu_boost_mutex = &mtx;
- t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
+ t->rcu_boosted = 1;
raw_spin_unlock_irqrestore(&rnp->lock, flags);
rt_mutex_lock(&mtx); /* Side effect: boosts task t's priority. */
rt_mutex_unlock(&mtx); /* Keep lockdep happy. */
diff --git a/kernel/sched.c b/kernel/sched.c
index 3dc716f6d8a..fde6ff90352 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2544,13 +2544,9 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
}
#ifdef CONFIG_SMP
-static void sched_ttwu_pending(void)
+static void sched_ttwu_do_pending(struct task_struct *list)
{
struct rq *rq = this_rq();
- struct task_struct *list = xchg(&rq->wake_list, NULL);
-
- if (!list)
- return;
raw_spin_lock(&rq->lock);
@@ -2563,9 +2559,45 @@ static void sched_ttwu_pending(void)
raw_spin_unlock(&rq->lock);
}
+#ifdef CONFIG_HOTPLUG_CPU
+
+static void sched_ttwu_pending(void)
+{
+ struct rq *rq = this_rq();
+ struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+ if (!list)
+ return;
+
+ sched_ttwu_do_pending(list);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
void scheduler_ipi(void)
{
- sched_ttwu_pending();
+ struct rq *rq = this_rq();
+ struct task_struct *list = xchg(&rq->wake_list, NULL);
+
+ if (!list)
+ return;
+
+ /*
+ * Not all reschedule IPI handlers call irq_enter/irq_exit, since
+ * traditionally all their work was done from the interrupt return
+ * path. Now that we actually do some work, we need to make sure
+ * we do call them.
+ *
+ * Some archs already do call them, luckily irq_enter/exit nest
+ * properly.
+ *
+ * Arguably we should visit all archs and update all handlers,
+ * however a fair share of IPIs are still resched only so this would
+ * somewhat pessimize the simple resched case.
+ */
+ irq_enter();
+ sched_ttwu_do_pending(list);
+ irq_exit();
}
static void ttwu_queue_remote(struct task_struct *p, int cpu)
@@ -6557,7 +6589,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
break;
}
- if (!group->cpu_power) {
+ if (!group->sgp->power) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: domain->cpu_power not "
"set\n");
@@ -6581,9 +6613,9 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
printk(KERN_CONT " %s", str);
- if (group->cpu_power != SCHED_POWER_SCALE) {
+ if (group->sgp->power != SCHED_POWER_SCALE) {
printk(KERN_CONT " (cpu_power = %d)",
- group->cpu_power);
+ group->sgp->power);
}
group = group->next;
@@ -6774,11 +6806,39 @@ static struct root_domain *alloc_rootdomain(void)
return rd;
}
+static void free_sched_groups(struct sched_group *sg, int free_sgp)
+{
+ struct sched_group *tmp, *first;
+
+ if (!sg)
+ return;
+
+ first = sg;
+ do {
+ tmp = sg->next;
+
+ if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
+ kfree(sg->sgp);
+
+ kfree(sg);
+ sg = tmp;
+ } while (sg != first);
+}
+
static void free_sched_domain(struct rcu_head *rcu)
{
struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
- if (atomic_dec_and_test(&sd->groups->ref))
+
+ /*
+ * If its an overlapping domain it has private groups, iterate and
+ * nuke them all.
+ */
+ if (sd->flags & SD_OVERLAP) {
+ free_sched_groups(sd->groups, 1);
+ } else if (atomic_dec_and_test(&sd->groups->ref)) {
+ kfree(sd->groups->sgp);
kfree(sd->groups);
+ }
kfree(sd);
}
@@ -6945,6 +7005,7 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_group **__percpu sg;
+ struct sched_group_power **__percpu sgp;
};
struct s_data {
@@ -6964,15 +7025,73 @@ struct sched_domain_topology_level;
typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
+#define SDTL_OVERLAP 0x01
+
struct sched_domain_topology_level {
sched_domain_init_f init;
sched_domain_mask_f mask;
+ int flags;
struct sd_data data;
};
-/*
- * Assumes the sched_domain tree is fully constructed
- */
+static int
+build_overlap_sched_groups(struct sched_domain *sd, int cpu)
+{
+ struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
+ const struct cpumask *span = sched_domain_span(sd);
+ struct cpumask *covered = sched_domains_tmpmask;
+ struct sd_data *sdd = sd->private;
+ struct sched_domain *child;
+ int i;
+
+ cpumask_clear(covered);
+
+ for_each_cpu(i, span) {
+ struct cpumask *sg_span;
+
+ if (cpumask_test_cpu(i, covered))
+ continue;
+
+ sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
+ GFP_KERNEL, cpu_to_node(i));
+
+ if (!sg)
+ goto fail;
+
+ sg_span = sched_group_cpus(sg);
+
+ child = *per_cpu_ptr(sdd->sd, i);
+ if (child->child) {
+ child = child->child;
+ cpumask_copy(sg_span, sched_domain_span(child));
+ } else
+ cpumask_set_cpu(i, sg_span);
+
+ cpumask_or(covered, covered, sg_span);
+
+ sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
+ atomic_inc(&sg->sgp->ref);
+
+ if (cpumask_test_cpu(cpu, sg_span))
+ groups = sg;
+
+ if (!first)
+ first = sg;
+ if (last)
+ last->next = sg;
+ last = sg;
+ last->next = first;
+ }
+ sd->groups = groups;
+
+ return 0;
+
+fail:
+ free_sched_groups(first, 0);
+
+ return -ENOMEM;
+}
+
static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
{
struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
@@ -6981,24 +7100,24 @@ static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
if (child)
cpu = cpumask_first(sched_domain_span(child));
- if (sg)
+ if (sg) {
*sg = *per_cpu_ptr(sdd->sg, cpu);
+ (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
+ atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
+ }
return cpu;
}
/*
- * build_sched_groups takes the cpumask we wish to span, and a pointer
- * to a function which identifies what group(along with sched group) a CPU
- * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
- * (due to the fact that we keep track of groups covered with a struct cpumask).
- *
* build_sched_groups will build a circular linked list of the groups
* covered by the given span, and will set each group's ->cpumask correctly,
* and ->cpu_power to 0.
+ *
+ * Assumes the sched_domain tree is fully constructed
*/
-static void
-build_sched_groups(struct sched_domain *sd)
+static int
+build_sched_groups(struct sched_domain *sd, int cpu)
{
struct sched_group *first = NULL, *last = NULL;
struct sd_data *sdd = sd->private;
@@ -7006,6 +7125,12 @@ build_sched_groups(struct sched_domain *sd)
struct cpumask *covered;
int i;
+ get_group(cpu, sdd, &sd->groups);
+ atomic_inc(&sd->groups->ref);
+
+ if (cpu != cpumask_first(sched_domain_span(sd)))
+ return 0;
+
lockdep_assert_held(&sched_domains_mutex);
covered = sched_domains_tmpmask;
@@ -7020,7 +7145,7 @@ build_sched_groups(struct sched_domain *sd)
continue;
cpumask_clear(sched_group_cpus(sg));
- sg->cpu_power = 0;
+ sg->sgp->power = 0;
for_each_cpu(j, span) {
if (get_group(j, sdd, NULL) != group)
@@ -7037,6 +7162,8 @@ build_sched_groups(struct sched_domain *sd)
last = sg;
}
last->next = first;
+
+ return 0;
}
/*
@@ -7051,12 +7178,17 @@ build_sched_groups(struct sched_domain *sd)
*/
static void init_sched_groups_power(int cpu, struct sched_domain *sd)
{
- WARN_ON(!sd || !sd->groups);
+ struct sched_group *sg = sd->groups;
- if (cpu != group_first_cpu(sd->groups))
- return;
+ WARN_ON(!sd || !sg);
- sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups));
+ do {
+ sg->group_weight = cpumask_weight(sched_group_cpus(sg));
+ sg = sg->next;
+ } while (sg != sd->groups);
+
+ if (cpu != group_first_cpu(sg))
+ return;
update_group_power(sd, cpu);
}
@@ -7177,15 +7309,15 @@ static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
static void claim_allocations(int cpu, struct sched_domain *sd)
{
struct sd_data *sdd = sd->private;
- struct sched_group *sg = sd->groups;
WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
*per_cpu_ptr(sdd->sd, cpu) = NULL;
- if (cpu == cpumask_first(sched_group_cpus(sg))) {
- WARN_ON_ONCE(*per_cpu_ptr(sdd->sg, cpu) != sg);
+ if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
*per_cpu_ptr(sdd->sg, cpu) = NULL;
- }
+
+ if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
+ *per_cpu_ptr(sdd->sgp, cpu) = NULL;
}
#ifdef CONFIG_SCHED_SMT
@@ -7210,7 +7342,7 @@ static struct sched_domain_topology_level default_topology[] = {
#endif
{ sd_init_CPU, cpu_cpu_mask, },
#ifdef CONFIG_NUMA
- { sd_init_NODE, cpu_node_mask, },
+ { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
{ sd_init_ALLNODES, cpu_allnodes_mask, },
#endif
{ NULL, },
@@ -7234,9 +7366,14 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
if (!sdd->sg)
return -ENOMEM;
+ sdd->sgp = alloc_percpu(struct sched_group_power *);
+ if (!sdd->sgp)
+ return -ENOMEM;
+
for_each_cpu(j, cpu_map) {
struct sched_domain *sd;
struct sched_group *sg;
+ struct sched_group_power *sgp;
sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
GFP_KERNEL, cpu_to_node(j));
@@ -7251,6 +7388,13 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
return -ENOMEM;
*per_cpu_ptr(sdd->sg, j) = sg;
+
+ sgp = kzalloc_node(sizeof(struct sched_group_power),
+ GFP_KERNEL, cpu_to_node(j));
+ if (!sgp)
+ return -ENOMEM;
+
+ *per_cpu_ptr(sdd->sgp, j) = sgp;
}
}
@@ -7266,11 +7410,15 @@ static void __sdt_free(const struct cpumask *cpu_map)
struct sd_data *sdd = &tl->data;
for_each_cpu(j, cpu_map) {
- kfree(*per_cpu_ptr(sdd->sd, j));
+ struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
+ if (sd && (sd->flags & SD_OVERLAP))
+ free_sched_groups(sd->groups, 0);
kfree(*per_cpu_ptr(sdd->sg, j));
+ kfree(*per_cpu_ptr(sdd->sgp, j));
}
free_percpu(sdd->sd);
free_percpu(sdd->sg);
+ free_percpu(sdd->sgp);
}
}
@@ -7316,8 +7464,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
struct sched_domain_topology_level *tl;
sd = NULL;
- for (tl = sched_domain_topology; tl->init; tl++)
+ for (tl = sched_domain_topology; tl->init; tl++) {
sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
+ if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
+ sd->flags |= SD_OVERLAP;
+ if (cpumask_equal(cpu_map, sched_domain_span(sd)))
+ break;
+ }
while (sd->child)
sd = sd->child;
@@ -7329,13 +7482,13 @@ static int build_sched_domains(const struct cpumask *cpu_map,
for_each_cpu(i, cpu_map) {
for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
sd->span_weight = cpumask_weight(sched_domain_span(sd));
- get_group(i, sd->private, &sd->groups);
- atomic_inc(&sd->groups->ref);
-
- if (i != cpumask_first(sched_domain_span(sd)))
- continue;
-
- build_sched_groups(sd);
+ if (sd->flags & SD_OVERLAP) {
+ if (build_overlap_sched_groups(sd, i))
+ goto error;
+ } else {
+ if (build_sched_groups(sd, i))
+ goto error;
+ }
}
}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 433491c2dc8..c768588e180 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1585,7 +1585,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
}
/* Adjust by relative CPU power of the group */
- avg_load = (avg_load * SCHED_POWER_SCALE) / group->cpu_power;
+ avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
if (local_group) {
this_load = avg_load;
@@ -2631,7 +2631,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
power >>= SCHED_POWER_SHIFT;
}
- sdg->cpu_power_orig = power;
+ sdg->sgp->power_orig = power;
if (sched_feat(ARCH_POWER))
power *= arch_scale_freq_power(sd, cpu);
@@ -2647,7 +2647,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)
power = 1;
cpu_rq(cpu)->cpu_power = power;
- sdg->cpu_power = power;
+ sdg->sgp->power = power;
}
static void update_group_power(struct sched_domain *sd, int cpu)
@@ -2665,11 +2665,11 @@ static void update_group_power(struct sched_domain *sd, int cpu)
group = child->groups;
do {
- power += group->cpu_power;
+ power += group->sgp->power;
group = group->next;
} while (group != child->groups);
- sdg->cpu_power = power;
+ sdg->sgp->power = power;
}
/*
@@ -2691,7 +2691,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
/*
* If ~90% of the cpu_power is still there, we're good.
*/
- if (group->cpu_power * 32 > group->cpu_power_orig * 29)
+ if (group->sgp->power * 32 > group->sgp->power_orig * 29)
return 1;
return 0;
@@ -2771,7 +2771,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
}
/* Adjust by relative CPU power of the group */
- sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->cpu_power;
+ sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
/*
* Consider the group unbalanced when the imbalance is larger
@@ -2788,7 +2788,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
sgs->group_imb = 1;
- sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power,
+ sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
SCHED_POWER_SCALE);
if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(sd, group);
@@ -2877,7 +2877,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
return;
sds->total_load += sgs.group_load;
- sds->total_pwr += sg->cpu_power;
+ sds->total_pwr += sg->sgp->power;
/*
* In case the child domain prefers tasks go to siblings
@@ -2962,7 +2962,7 @@ static int check_asym_packing(struct sched_domain *sd,
if (this_cpu > busiest_cpu)
return 0;
- *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
+ *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
SCHED_POWER_SCALE);
return 1;
}
@@ -2993,7 +2993,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
scaled_busy_load_per_task = sds->busiest_load_per_task
* SCHED_POWER_SCALE;
- scaled_busy_load_per_task /= sds->busiest->cpu_power;
+ scaled_busy_load_per_task /= sds->busiest->sgp->power;
if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
(scaled_busy_load_per_task * imbn)) {
@@ -3007,28 +3007,28 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
* moving them.
*/
- pwr_now += sds->busiest->cpu_power *
+ pwr_now += sds->busiest->sgp->power *
min(sds->busiest_load_per_task, sds->max_load);
- pwr_now += sds->this->cpu_power *
+ pwr_now += sds->this->sgp->power *
min(sds->this_load_per_task, sds->this_load);
pwr_now /= SCHED_POWER_SCALE;
/* Amount of load we'd subtract */
tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->busiest->cpu_power;
+ sds->busiest->sgp->power;
if (sds->max_load > tmp)
- pwr_move += sds->busiest->cpu_power *
+ pwr_move += sds->busiest->sgp->power *
min(sds->busiest_load_per_task, sds->max_load - tmp);
/* Amount of load we'd add */
- if (sds->max_load * sds->busiest->cpu_power <
+ if (sds->max_load * sds->busiest->sgp->power <
sds->busiest_load_per_task * SCHED_POWER_SCALE)
- tmp = (sds->max_load * sds->busiest->cpu_power) /
- sds->this->cpu_power;
+ tmp = (sds->max_load * sds->busiest->sgp->power) /
+ sds->this->sgp->power;
else
tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->this->cpu_power;
- pwr_move += sds->this->cpu_power *
+ sds->this->sgp->power;
+ pwr_move += sds->this->sgp->power *
min(sds->this_load_per_task, sds->this_load + tmp);
pwr_move /= SCHED_POWER_SCALE;
@@ -3074,7 +3074,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
- load_above_capacity /= sds->busiest->cpu_power;
+ load_above_capacity /= sds->busiest->sgp->power;
}
/*
@@ -3090,8 +3090,8 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
/* How much load to actually move to equalise the imbalance */
- *imbalance = min(max_pull * sds->busiest->cpu_power,
- (sds->avg_load - sds->this_load) * sds->this->cpu_power)
+ *imbalance = min(max_pull * sds->busiest->sgp->power,
+ (sds->avg_load - sds->this_load) * sds->this->sgp->power)
/ SCHED_POWER_SCALE;
/*
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index be40f7371ee..1e7066d76c2 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -70,3 +70,5 @@ SCHED_FEAT(NONIRQ_POWER, 1)
* using the scheduler IPI. Reduces rq->lock contention/bounces.
*/
SCHED_FEAT(TTWU_QUEUE, 1)
+
+SCHED_FEAT(FORCE_SD_OVERLAP, 0)
diff --git a/kernel/signal.c b/kernel/signal.c
index ff767860332..d7f70aed1cc 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -87,7 +87,7 @@ static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
/*
* Tracers may want to know about even ignored signals.
*/
- return !tracehook_consider_ignored_signal(t, sig);
+ return !t->ptrace;
}
/*
@@ -124,7 +124,7 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
static int recalc_sigpending_tsk(struct task_struct *t)
{
- if ((t->group_stop & GROUP_STOP_PENDING) ||
+ if ((t->jobctl & JOBCTL_PENDING_MASK) ||
PENDING(&t->pending, &t->blocked) ||
PENDING(&t->signal->shared_pending, &t->blocked)) {
set_tsk_thread_flag(t, TIF_SIGPENDING);
@@ -150,9 +150,7 @@ void recalc_sigpending_and_wake(struct task_struct *t)
void recalc_sigpending(void)
{
- if (unlikely(tracehook_force_sigpending()))
- set_thread_flag(TIF_SIGPENDING);
- else if (!recalc_sigpending_tsk(current) && !freezing(current))
+ if (!recalc_sigpending_tsk(current) && !freezing(current))
clear_thread_flag(TIF_SIGPENDING);
}
@@ -224,47 +222,93 @@ static inline void print_dropped_signal(int sig)
}
/**
- * task_clear_group_stop_trapping - clear group stop trapping bit
+ * task_set_jobctl_pending - set jobctl pending bits
* @task: target task
+ * @mask: pending bits to set
*
- * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it
- * and wake up the ptracer. Note that we don't need any further locking.
- * @task->siglock guarantees that @task->parent points to the ptracer.
+ * Clear @mask from @task->jobctl. @mask must be subset of
+ * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
+ * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
+ * cleared. If @task is already being killed or exiting, this function
+ * becomes noop.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ *
+ * RETURNS:
+ * %true if @mask is set, %false if made noop because @task was dying.
+ */
+bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
+{
+ BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
+ JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
+ BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
+
+ if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
+ return false;
+
+ if (mask & JOBCTL_STOP_SIGMASK)
+ task->jobctl &= ~JOBCTL_STOP_SIGMASK;
+
+ task->jobctl |= mask;
+ return true;
+}
+
+/**
+ * task_clear_jobctl_trapping - clear jobctl trapping bit
+ * @task: target task
+ *
+ * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
+ * Clear it and wake up the ptracer. Note that we don't need any further
+ * locking. @task->siglock guarantees that @task->parent points to the
+ * ptracer.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*/
-static void task_clear_group_stop_trapping(struct task_struct *task)
+void task_clear_jobctl_trapping(struct task_struct *task)
{
- if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) {
- task->group_stop &= ~GROUP_STOP_TRAPPING;
- __wake_up_sync_key(&task->parent->signal->wait_chldexit,
- TASK_UNINTERRUPTIBLE, 1, task);
+ if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
+ task->jobctl &= ~JOBCTL_TRAPPING;
+ wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
}
}
/**
- * task_clear_group_stop_pending - clear pending group stop
+ * task_clear_jobctl_pending - clear jobctl pending bits
* @task: target task
+ * @mask: pending bits to clear
*
- * Clear group stop states for @task.
+ * Clear @mask from @task->jobctl. @mask must be subset of
+ * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
+ * STOP bits are cleared together.
+ *
+ * If clearing of @mask leaves no stop or trap pending, this function calls
+ * task_clear_jobctl_trapping().
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
*/
-void task_clear_group_stop_pending(struct task_struct *task)
+void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
{
- task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME |
- GROUP_STOP_DEQUEUED);
+ BUG_ON(mask & ~JOBCTL_PENDING_MASK);
+
+ if (mask & JOBCTL_STOP_PENDING)
+ mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
+
+ task->jobctl &= ~mask;
+
+ if (!(task->jobctl & JOBCTL_PENDING_MASK))
+ task_clear_jobctl_trapping(task);
}
/**
* task_participate_group_stop - participate in a group stop
* @task: task participating in a group stop
*
- * @task has GROUP_STOP_PENDING set and is participating in a group stop.
+ * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
* Group stop states are cleared and the group stop count is consumed if
- * %GROUP_STOP_CONSUME was set. If the consumption completes the group
+ * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
* stop, the appropriate %SIGNAL_* flags are set.
*
* CONTEXT:
@@ -277,11 +321,11 @@ void task_clear_group_stop_pending(struct task_struct *task)
static bool task_participate_group_stop(struct task_struct *task)
{
struct signal_struct *sig = task->signal;
- bool consume = task->group_stop & GROUP_STOP_CONSUME;
+ bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
- WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
+ WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
- task_clear_group_stop_pending(task);
+ task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
if (!consume)
return false;
@@ -449,7 +493,8 @@ int unhandled_signal(struct task_struct *tsk, int sig)
return 1;
if (handler != SIG_IGN && handler != SIG_DFL)
return 0;
- return !tracehook_consider_fatal_signal(tsk, sig);
+ /* if ptraced, let the tracer determine */
+ return !tsk->ptrace;
}
/*
@@ -604,7 +649,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
* is to alert stop-signal processing code when another
* processor has come along and cleared the flag.
*/
- current->group_stop |= GROUP_STOP_DEQUEUED;
+ current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
/*
@@ -773,6 +818,32 @@ static int check_kill_permission(int sig, struct siginfo *info,
return security_task_kill(t, info, sig, 0);
}
+/**
+ * ptrace_trap_notify - schedule trap to notify ptracer
+ * @t: tracee wanting to notify tracer
+ *
+ * This function schedules sticky ptrace trap which is cleared on the next
+ * TRAP_STOP to notify ptracer of an event. @t must have been seized by
+ * ptracer.
+ *
+ * If @t is running, STOP trap will be taken. If trapped for STOP and
+ * ptracer is listening for events, tracee is woken up so that it can
+ * re-trap for the new event. If trapped otherwise, STOP trap will be
+ * eventually taken without returning to userland after the existing traps
+ * are finished by PTRACE_CONT.
+ *
+ * CONTEXT:
+ * Must be called with @task->sighand->siglock held.
+ */
+static void ptrace_trap_notify(struct task_struct *t)
+{
+ WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
+ assert_spin_locked(&t->sighand->siglock);
+
+ task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
+ signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
+}
+
/*
* Handle magic process-wide effects of stop/continue signals. Unlike
* the signal actions, these happen immediately at signal-generation
@@ -809,9 +880,12 @@ static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
t = p;
do {
- task_clear_group_stop_pending(t);
+ task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
- wake_up_state(t, __TASK_STOPPED);
+ if (likely(!(t->ptrace & PT_SEIZED)))
+ wake_up_state(t, __TASK_STOPPED);
+ else
+ ptrace_trap_notify(t);
} while_each_thread(p, t);
/*
@@ -908,8 +982,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
if (sig_fatal(p, sig) &&
!(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
!sigismember(&t->real_blocked, sig) &&
- (sig == SIGKILL ||
- !tracehook_consider_fatal_signal(t, sig))) {
+ (sig == SIGKILL || !t->ptrace)) {
/*
* This signal will be fatal to the whole group.
*/
@@ -925,7 +998,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
signal->group_stop_count = 0;
t = p;
do {
- task_clear_group_stop_pending(t);
+ task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
sigaddset(&t->pending.signal, SIGKILL);
signal_wake_up(t, 1);
} while_each_thread(p, t);
@@ -1160,7 +1233,7 @@ int zap_other_threads(struct task_struct *p)
p->signal->group_stop_count = 0;
while_each_thread(p, t) {
- task_clear_group_stop_pending(t);
+ task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
count++;
/* Don't bother with already dead threads */
@@ -1178,18 +1251,25 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
{
struct sighand_struct *sighand;
- rcu_read_lock();
for (;;) {
+ local_irq_save(*flags);
+ rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
- if (unlikely(sighand == NULL))
+ if (unlikely(sighand == NULL)) {
+ rcu_read_unlock();
+ local_irq_restore(*flags);
break;
+ }
- spin_lock_irqsave(&sighand->siglock, *flags);
- if (likely(sighand == tsk->sighand))
+ spin_lock(&sighand->siglock);
+ if (likely(sighand == tsk->sighand)) {
+ rcu_read_unlock();
break;
- spin_unlock_irqrestore(&sighand->siglock, *flags);
+ }
+ spin_unlock(&sighand->siglock);
+ rcu_read_unlock();
+ local_irq_restore(*flags);
}
- rcu_read_unlock();
return sighand;
}
@@ -1504,22 +1584,22 @@ ret:
* Let a parent know about the death of a child.
* For a stopped/continued status change, use do_notify_parent_cldstop instead.
*
- * Returns -1 if our parent ignored us and so we've switched to
- * self-reaping, or else @sig.
+ * Returns true if our parent ignored us and so we've switched to
+ * self-reaping.
*/
-int do_notify_parent(struct task_struct *tsk, int sig)
+bool do_notify_parent(struct task_struct *tsk, int sig)
{
struct siginfo info;
unsigned long flags;
struct sighand_struct *psig;
- int ret = sig;
+ bool autoreap = false;
BUG_ON(sig == -1);
/* do_notify_parent_cldstop should have been called instead. */
BUG_ON(task_is_stopped_or_traced(tsk));
- BUG_ON(!task_ptrace(tsk) &&
+ BUG_ON(!tsk->ptrace &&
(tsk->group_leader != tsk || !thread_group_empty(tsk)));
info.si_signo = sig;
@@ -1558,7 +1638,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
psig = tsk->parent->sighand;
spin_lock_irqsave(&psig->siglock, flags);
- if (!task_ptrace(tsk) && sig == SIGCHLD &&
+ if (!tsk->ptrace && sig == SIGCHLD &&
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
(psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
/*
@@ -1576,16 +1656,16 @@ int do_notify_parent(struct task_struct *tsk, int sig)
* is implementation-defined: we do (if you don't want
* it, just use SIG_IGN instead).
*/
- ret = tsk->exit_signal = -1;
+ autoreap = true;
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
- sig = -1;
+ sig = 0;
}
- if (valid_signal(sig) && sig > 0)
+ if (valid_signal(sig) && sig)
__group_send_sig_info(sig, &info, tsk->parent);
__wake_up_parent(tsk, tsk->parent);
spin_unlock_irqrestore(&psig->siglock, flags);
- return ret;
+ return autoreap;
}
/**
@@ -1658,7 +1738,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
static inline int may_ptrace_stop(void)
{
- if (!likely(task_ptrace(current)))
+ if (!likely(current->ptrace))
return 0;
/*
* Are we in the middle of do_coredump?
@@ -1687,15 +1767,6 @@ static int sigkill_pending(struct task_struct *tsk)
}
/*
- * Test whether the target task of the usual cldstop notification - the
- * real_parent of @child - is in the same group as the ptracer.
- */
-static bool real_parent_is_ptracer(struct task_struct *child)
-{
- return same_thread_group(child->parent, child->real_parent);
-}
-
-/*
* This must be called with current->sighand->siglock held.
*
* This should be the path for all ptrace stops.
@@ -1732,31 +1803,34 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
}
/*
- * If @why is CLD_STOPPED, we're trapping to participate in a group
- * stop. Do the bookkeeping. Note that if SIGCONT was delievered
- * while siglock was released for the arch hook, PENDING could be
- * clear now. We act as if SIGCONT is received after TASK_TRACED
- * is entered - ignore it.
+ * We're committing to trapping. TRACED should be visible before
+ * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
+ * Also, transition to TRACED and updates to ->jobctl should be
+ * atomic with respect to siglock and should be done after the arch
+ * hook as siglock is released and regrabbed across it.
*/
- if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING))
- gstop_done = task_participate_group_stop(current);
+ set_current_state(TASK_TRACED);
current->last_siginfo = info;
current->exit_code = exit_code;
/*
- * TRACED should be visible before TRAPPING is cleared; otherwise,
- * the tracer might fail do_wait().
+ * If @why is CLD_STOPPED, we're trapping to participate in a group
+ * stop. Do the bookkeeping. Note that if SIGCONT was delievered
+ * across siglock relocks since INTERRUPT was scheduled, PENDING
+ * could be clear now. We act as if SIGCONT is received after
+ * TASK_TRACED is entered - ignore it.
*/
- set_current_state(TASK_TRACED);
+ if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
+ gstop_done = task_participate_group_stop(current);
- /*
- * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and
- * transition to TASK_TRACED should be atomic with respect to
- * siglock. This hsould be done after the arch hook as siglock is
- * released and regrabbed across it.
- */
- task_clear_group_stop_trapping(current);
+ /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
+ task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
+ if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
+ task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
+
+ /* entering a trap, clear TRAPPING */
+ task_clear_jobctl_trapping(current);
spin_unlock_irq(&current->sighand->siglock);
read_lock(&tasklist_lock);
@@ -1772,7 +1846,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
* separately unless they're gonna be duplicates.
*/
do_notify_parent_cldstop(current, true, why);
- if (gstop_done && !real_parent_is_ptracer(current))
+ if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
/*
@@ -1792,9 +1866,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
*
* If @gstop_done, the ptracer went away between group stop
* completion and here. During detach, it would have set
- * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED
- * in do_signal_stop() on return, so notifying the real
- * parent of the group stop completion is enough.
+ * JOBCTL_STOP_PENDING on us and we'll re-enter
+ * TASK_STOPPED in do_signal_stop() on return, so notifying
+ * the real parent of the group stop completion is enough.
*/
if (gstop_done)
do_notify_parent_cldstop(current, false, why);
@@ -1820,6 +1894,9 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
spin_lock_irq(&current->sighand->siglock);
current->last_siginfo = NULL;
+ /* LISTENING can be set only during STOP traps, clear it */
+ current->jobctl &= ~JOBCTL_LISTENING;
+
/*
* Queued signals ignored us while we were stopped for tracing.
* So check for any that we should take before resuming user mode.
@@ -1828,44 +1905,66 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
recalc_sigpending_tsk(current);
}
-void ptrace_notify(int exit_code)
+static void ptrace_do_notify(int signr, int exit_code, int why)
{
siginfo_t info;
- BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
-
memset(&info, 0, sizeof info);
- info.si_signo = SIGTRAP;
+ info.si_signo = signr;
info.si_code = exit_code;
info.si_pid = task_pid_vnr(current);
info.si_uid = current_uid();
/* Let the debugger run. */
+ ptrace_stop(exit_code, why, 1, &info);
+}
+
+void ptrace_notify(int exit_code)
+{
+ BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
+
spin_lock_irq(&current->sighand->siglock);
- ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
+ ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
spin_unlock_irq(&current->sighand->siglock);
}
-/*
- * This performs the stopping for SIGSTOP and other stop signals.
- * We have to stop all threads in the thread group.
- * Returns non-zero if we've actually stopped and released the siglock.
- * Returns zero if we didn't stop and still hold the siglock.
+/**
+ * do_signal_stop - handle group stop for SIGSTOP and other stop signals
+ * @signr: signr causing group stop if initiating
+ *
+ * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
+ * and participate in it. If already set, participate in the existing
+ * group stop. If participated in a group stop (and thus slept), %true is
+ * returned with siglock released.
+ *
+ * If ptraced, this function doesn't handle stop itself. Instead,
+ * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
+ * untouched. The caller must ensure that INTERRUPT trap handling takes
+ * places afterwards.
+ *
+ * CONTEXT:
+ * Must be called with @current->sighand->siglock held, which is released
+ * on %true return.
+ *
+ * RETURNS:
+ * %false if group stop is already cancelled or ptrace trap is scheduled.
+ * %true if participated in group stop.
*/
-static int do_signal_stop(int signr)
+static bool do_signal_stop(int signr)
+ __releases(&current->sighand->siglock)
{
struct signal_struct *sig = current->signal;
- if (!(current->group_stop & GROUP_STOP_PENDING)) {
- unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
+ if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
+ unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
struct task_struct *t;
- /* signr will be recorded in task->group_stop for retries */
- WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK);
+ /* signr will be recorded in task->jobctl for retries */
+ WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
- if (!likely(current->group_stop & GROUP_STOP_DEQUEUED) ||
+ if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
unlikely(signal_group_exit(sig)))
- return 0;
+ return false;
/*
* There is no group stop already in progress. We must
* initiate one now.
@@ -1888,28 +1987,32 @@ static int do_signal_stop(int signr)
if (!(sig->flags & SIGNAL_STOP_STOPPED))
sig->group_exit_code = signr;
else
- WARN_ON_ONCE(!task_ptrace(current));
+ WARN_ON_ONCE(!current->ptrace);
+
+ sig->group_stop_count = 0;
+
+ if (task_set_jobctl_pending(current, signr | gstop))
+ sig->group_stop_count++;
- current->group_stop &= ~GROUP_STOP_SIGMASK;
- current->group_stop |= signr | gstop;
- sig->group_stop_count = 1;
for (t = next_thread(current); t != current;
t = next_thread(t)) {
- t->group_stop &= ~GROUP_STOP_SIGMASK;
/*
* Setting state to TASK_STOPPED for a group
* stop is always done with the siglock held,
* so this check has no races.
*/
- if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
- t->group_stop |= signr | gstop;
+ if (!task_is_stopped(t) &&
+ task_set_jobctl_pending(t, signr | gstop)) {
sig->group_stop_count++;
- signal_wake_up(t, 0);
+ if (likely(!(t->ptrace & PT_SEIZED)))
+ signal_wake_up(t, 0);
+ else
+ ptrace_trap_notify(t);
}
}
}
-retry:
- if (likely(!task_ptrace(current))) {
+
+ if (likely(!current->ptrace)) {
int notify = 0;
/*
@@ -1940,43 +2043,65 @@ retry:
/* Now we don't run again until woken by SIGCONT or SIGKILL */
schedule();
-
- spin_lock_irq(&current->sighand->siglock);
+ return true;
} else {
- ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK,
- CLD_STOPPED, 0, NULL);
- current->exit_code = 0;
+ /*
+ * While ptraced, group stop is handled by STOP trap.
+ * Schedule it and let the caller deal with it.
+ */
+ task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
+ return false;
}
+}
- /*
- * GROUP_STOP_PENDING could be set if another group stop has
- * started since being woken up or ptrace wants us to transit
- * between TASK_STOPPED and TRACED. Retry group stop.
- */
- if (current->group_stop & GROUP_STOP_PENDING) {
- WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK));
- goto retry;
+/**
+ * do_jobctl_trap - take care of ptrace jobctl traps
+ *
+ * When PT_SEIZED, it's used for both group stop and explicit
+ * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
+ * accompanying siginfo. If stopped, lower eight bits of exit_code contain
+ * the stop signal; otherwise, %SIGTRAP.
+ *
+ * When !PT_SEIZED, it's used only for group stop trap with stop signal
+ * number as exit_code and no siginfo.
+ *
+ * CONTEXT:
+ * Must be called with @current->sighand->siglock held, which may be
+ * released and re-acquired before returning with intervening sleep.
+ */
+static void do_jobctl_trap(void)
+{
+ struct signal_struct *signal = current->signal;
+ int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
+
+ if (current->ptrace & PT_SEIZED) {
+ if (!signal->group_stop_count &&
+ !(signal->flags & SIGNAL_STOP_STOPPED))
+ signr = SIGTRAP;
+ WARN_ON_ONCE(!signr);
+ ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
+ CLD_STOPPED);
+ } else {
+ WARN_ON_ONCE(!signr);
+ ptrace_stop(signr, CLD_STOPPED, 0, NULL);
+ current->exit_code = 0;
}
-
- /* PTRACE_ATTACH might have raced with task killing, clear trapping */
- task_clear_group_stop_trapping(current);
-
- spin_unlock_irq(&current->sighand->siglock);
-
- tracehook_finish_jctl();
-
- return 1;
}
static int ptrace_signal(int signr, siginfo_t *info,
struct pt_regs *regs, void *cookie)
{
- if (!task_ptrace(current))
- return signr;
-
ptrace_signal_deliver(regs, cookie);
-
- /* Let the debugger run. */
+ /*
+ * We do not check sig_kernel_stop(signr) but set this marker
+ * unconditionally because we do not know whether debugger will
+ * change signr. This flag has no meaning unless we are going
+ * to stop after return from ptrace_stop(). In this case it will
+ * be checked in do_signal_stop(), we should only stop if it was
+ * not cleared by SIGCONT while we were sleeping. See also the
+ * comment in dequeue_signal().
+ */
+ current->jobctl |= JOBCTL_STOP_DEQUEUED;
ptrace_stop(signr, CLD_TRAPPED, 0, info);
/* We're back. Did the debugger cancel the sig? */
@@ -2032,7 +2157,6 @@ relock:
* the CLD_ si_code into SIGNAL_CLD_MASK bits.
*/
if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
- struct task_struct *leader;
int why;
if (signal->flags & SIGNAL_CLD_CONTINUED)
@@ -2053,13 +2177,11 @@ relock:
* a duplicate.
*/
read_lock(&tasklist_lock);
-
do_notify_parent_cldstop(current, false, why);
- leader = current->group_leader;
- if (task_ptrace(leader) && !real_parent_is_ptracer(leader))
- do_notify_parent_cldstop(leader, true, why);
-
+ if (ptrace_reparented(current->group_leader))
+ do_notify_parent_cldstop(current->group_leader,
+ true, why);
read_unlock(&tasklist_lock);
goto relock;
@@ -2067,37 +2189,31 @@ relock:
for (;;) {
struct k_sigaction *ka;
- /*
- * Tracing can induce an artificial signal and choose sigaction.
- * The return value in @signr determines the default action,
- * but @info->si_signo is the signal number we will report.
- */
- signr = tracehook_get_signal(current, regs, info, return_ka);
- if (unlikely(signr < 0))
+
+ if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
+ do_signal_stop(0))
goto relock;
- if (unlikely(signr != 0))
- ka = return_ka;
- else {
- if (unlikely(current->group_stop &
- GROUP_STOP_PENDING) && do_signal_stop(0))
- goto relock;
- signr = dequeue_signal(current, &current->blocked,
- info);
+ if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
+ do_jobctl_trap();
+ spin_unlock_irq(&sighand->siglock);
+ goto relock;
+ }
- if (!signr)
- break; /* will return 0 */
+ signr = dequeue_signal(current, &current->blocked, info);
- if (signr != SIGKILL) {
- signr = ptrace_signal(signr, info,
- regs, cookie);
- if (!signr)
- continue;
- }
+ if (!signr)
+ break; /* will return 0 */
- ka = &sighand->action[signr-1];
+ if (unlikely(current->ptrace) && signr != SIGKILL) {
+ signr = ptrace_signal(signr, info,
+ regs, cookie);
+ if (!signr)
+ continue;
}
+ ka = &sighand->action[signr-1];
+
/* Trace actually delivered signals. */
trace_signal_deliver(signr, info, ka);
@@ -2253,7 +2369,7 @@ void exit_signals(struct task_struct *tsk)
signotset(&unblocked);
retarget_shared_pending(tsk, &unblocked);
- if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
+ if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
task_participate_group_stop(tsk))
group_stop = CLD_STOPPED;
out:
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 40cf63ddd4b..fca82c32042 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -315,16 +315,24 @@ static inline void invoke_softirq(void)
{
if (!force_irqthreads)
__do_softirq();
- else
+ else {
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+ SOFTIRQ_OFFSET);
wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
}
#else
static inline void invoke_softirq(void)
{
if (!force_irqthreads)
do_softirq();
- else
+ else {
+ __local_bh_disable((unsigned long)__builtin_return_address(0),
+ SOFTIRQ_OFFSET);
wakeup_softirqd();
+ __local_bh_enable(SOFTIRQ_OFFSET);
+ }
}
#endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 0400553f0d0..25fb1b0e53f 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -221,7 +221,7 @@ typedef unsigned long mayday_mask_t;
* per-CPU workqueues:
*/
struct workqueue_struct {
- unsigned int flags; /* I: WQ_* flags */
+ unsigned int flags; /* W: WQ_* flags */
union {
struct cpu_workqueue_struct __percpu *pcpu;
struct cpu_workqueue_struct *single;
@@ -240,6 +240,7 @@ struct workqueue_struct {
mayday_mask_t mayday_mask; /* cpus requesting rescue */
struct worker *rescuer; /* I: rescue worker */
+ int nr_drainers; /* W: drain in progress */
int saved_max_active; /* W: saved cwq max_active */
const char *name; /* I: workqueue name */
#ifdef CONFIG_LOCKDEP
@@ -990,7 +991,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
debug_work_activate(work);
/* if dying, only works from the same workqueue are allowed */
- if (unlikely(wq->flags & WQ_DYING) &&
+ if (unlikely(wq->flags & WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -2381,6 +2382,54 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(flush_workqueue);
+/**
+ * drain_workqueue - drain a workqueue
+ * @wq: workqueue to drain
+ *
+ * Wait until the workqueue becomes empty. While draining is in progress,
+ * only chain queueing is allowed. IOW, only currently pending or running
+ * work items on @wq can queue further work items on it. @wq is flushed
+ * repeatedly until it becomes empty. The number of flushing is detemined
+ * by the depth of chaining and should be relatively short. Whine if it
+ * takes too long.
+ */
+void drain_workqueue(struct workqueue_struct *wq)
+{
+ unsigned int flush_cnt = 0;
+ unsigned int cpu;
+
+ /*
+ * __queue_work() needs to test whether there are drainers, is much
+ * hotter than drain_workqueue() and already looks at @wq->flags.
+ * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
+ */
+ spin_lock(&workqueue_lock);
+ if (!wq->nr_drainers++)
+ wq->flags |= WQ_DRAINING;
+ spin_unlock(&workqueue_lock);
+reflush:
+ flush_workqueue(wq);
+
+ for_each_cwq_cpu(cpu, wq) {
+ struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+
+ if (!cwq->nr_active && list_empty(&cwq->delayed_works))
+ continue;
+
+ if (++flush_cnt == 10 ||
+ (flush_cnt % 100 == 0 && flush_cnt <= 1000))
+ pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
+ wq->name, flush_cnt);
+ goto reflush;
+ }
+
+ spin_lock(&workqueue_lock);
+ if (!--wq->nr_drainers)
+ wq->flags &= ~WQ_DRAINING;
+ spin_unlock(&workqueue_lock);
+}
+EXPORT_SYMBOL_GPL(drain_workqueue);
+
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool wait_executing)
{
@@ -3009,34 +3058,10 @@ EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
*/
void destroy_workqueue(struct workqueue_struct *wq)
{
- unsigned int flush_cnt = 0;
unsigned int cpu;
- /*
- * Mark @wq dying and drain all pending works. Once WQ_DYING is
- * set, only chain queueing is allowed. IOW, only currently
- * pending or running work items on @wq can queue further work
- * items on it. @wq is flushed repeatedly until it becomes empty.
- * The number of flushing is detemined by the depth of chaining and
- * should be relatively short. Whine if it takes too long.
- */
- wq->flags |= WQ_DYING;
-reflush:
- flush_workqueue(wq);
-
- for_each_cwq_cpu(cpu, wq) {
- struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
-
- if (!cwq->nr_active && list_empty(&cwq->delayed_works))
- continue;
-
- if (++flush_cnt == 10 ||
- (flush_cnt % 100 == 0 && flush_cnt <= 1000))
- printk(KERN_WARNING "workqueue %s: flush on "
- "destruction isn't complete after %u tries\n",
- wq->name, flush_cnt);
- goto reflush;
- }
+ /* drain it before proceeding with destruction */
+ drain_workqueue(wq);
/*
* wq list is used to freeze wq, remove from list after
diff --git a/mm/nommu.c b/mm/nommu.c
index 9edc897a397..5c5c2d4b180 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -22,7 +22,6 @@
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/tracehook.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/mount.h>
@@ -1087,7 +1086,7 @@ static unsigned long determine_vm_flags(struct file *file,
* it's being traced - otherwise breakpoints set in it may interfere
* with another untraced process
*/
- if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current))
+ if ((flags & MAP_PRIVATE) && current->ptrace)
vm_flags &= ~VM_MAYSHARE;
return vm_flags;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index e4b0991ca35..b0be989d436 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -339,8 +339,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
* then wait for it to finish before killing
* some other task unnecessarily.
*/
- if (!(task_ptrace(p->group_leader) &
- PT_TRACE_EXIT))
+ if (!(p->group_leader->ptrace & PT_TRACE_EXIT))
return ERR_PTR(-1UL);
}
}
diff --git a/mm/slab.c b/mm/slab.c
index d96e223de77..1e523ed47c6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -574,7 +574,9 @@ static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
+static struct kmem_list3 *cache_cache_nodelists[MAX_NUMNODES];
static struct kmem_cache cache_cache = {
+ .nodelists = cache_cache_nodelists,
.batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
@@ -1492,11 +1494,10 @@ void __init kmem_cache_init(void)
cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
/*
- * struct kmem_cache size depends on nr_node_ids, which
- * can be less than MAX_NUMNODES.
+ * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
*/
- cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
- nr_node_ids * sizeof(struct kmem_list3 *);
+ cache_cache.buffer_size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
+ nr_node_ids * sizeof(struct kmem_list3 *);
#if DEBUG
cache_cache.obj_size = cache_cache.buffer_size;
#endif
@@ -2308,6 +2309,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (!cachep)
goto oops;
+ cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
#if DEBUG
cachep->obj_size = size;
@@ -3153,12 +3155,11 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON)
cachep->ctor(objp);
-#if ARCH_SLAB_MINALIGN
- if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
+ if (ARCH_SLAB_MINALIGN &&
+ ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
- objp, ARCH_SLAB_MINALIGN);
+ objp, (int)ARCH_SLAB_MINALIGN);
}
-#endif
return objp;
}
#else
diff --git a/mm/slob.c b/mm/slob.c
index 46e0aee33a2..0ae881831ae 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -482,6 +482,8 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
void *ret;
+ gfp &= gfp_allowed_mask;
+
lockdep_trace_alloc(gfp);
if (size < PAGE_SIZE - align) {
@@ -608,6 +610,10 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
{
void *b;
+ flags &= gfp_allowed_mask;
+
+ lockdep_trace_alloc(flags);
+
if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node);
trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
diff --git a/mm/slub.c b/mm/slub.c
index 35f351f2619..ba83f3fd075 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -27,6 +27,7 @@
#include <linux/memory.h>
#include <linux/math64.h>
#include <linux/fault-inject.h>
+#include <linux/stacktrace.h>
#include <trace/events/kmem.h>
@@ -191,8 +192,12 @@ static LIST_HEAD(slab_caches);
/*
* Tracking user of a slab.
*/
+#define TRACK_ADDRS_COUNT 16
struct track {
unsigned long addr; /* Called from address */
+#ifdef CONFIG_STACKTRACE
+ unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
+#endif
int cpu; /* Was running on cpu */
int pid; /* Pid context */
unsigned long when; /* When did the operation occur */
@@ -420,6 +425,24 @@ static void set_track(struct kmem_cache *s, void *object,
struct track *p = get_track(s, object, alloc);
if (addr) {
+#ifdef CONFIG_STACKTRACE
+ struct stack_trace trace;
+ int i;
+
+ trace.nr_entries = 0;
+ trace.max_entries = TRACK_ADDRS_COUNT;
+ trace.entries = p->addrs;
+ trace.skip = 3;
+ save_stack_trace(&trace);
+
+ /* See rant in lockdep.c */
+ if (trace.nr_entries != 0 &&
+ trace.entries[trace.nr_entries - 1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
+ p->addrs[i] = 0;
+#endif
p->addr = addr;
p->cpu = smp_processor_id();
p->pid = current->pid;
@@ -444,6 +467,16 @@ static void print_track(const char *s, struct track *t)
printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
+#ifdef CONFIG_STACKTRACE
+ {
+ int i;
+ for (i = 0; i < TRACK_ADDRS_COUNT; i++)
+ if (t->addrs[i])
+ printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
+ else
+ break;
+ }
+#endif
}
static void print_tracking(struct kmem_cache *s, void *object)
@@ -557,10 +590,10 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
memset(p + s->objsize, val, s->inuse - s->objsize);
}
-static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
+static u8 *check_bytes8(u8 *start, u8 value, unsigned int bytes)
{
while (bytes) {
- if (*start != (u8)value)
+ if (*start != value)
return start;
start++;
bytes--;
@@ -568,6 +601,38 @@ static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
return NULL;
}
+static u8 *check_bytes(u8 *start, u8 value, unsigned int bytes)
+{
+ u64 value64;
+ unsigned int words, prefix;
+
+ if (bytes <= 16)
+ return check_bytes8(start, value, bytes);
+
+ value64 = value | value << 8 | value << 16 | value << 24;
+ value64 = value64 | value64 << 32;
+ prefix = 8 - ((unsigned long)start) % 8;
+
+ if (prefix) {
+ u8 *r = check_bytes8(start, value, prefix);
+ if (r)
+ return r;
+ start += prefix;
+ bytes -= prefix;
+ }
+
+ words = bytes / 8;
+
+ while (words) {
+ if (*(u64 *)start != value64)
+ return check_bytes8(start, value, 8);
+ start += 8;
+ words--;
+ }
+
+ return check_bytes8(start, value, bytes % 8);
+}
+
static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
void *from, void *to)
{
@@ -2928,6 +2993,42 @@ size_t ksize(const void *object)
}
EXPORT_SYMBOL(ksize);
+#ifdef CONFIG_SLUB_DEBUG
+bool verify_mem_not_deleted(const void *x)
+{
+ struct page *page;
+ void *object = (void *)x;
+ unsigned long flags;
+ bool rv;
+
+ if (unlikely(ZERO_OR_NULL_PTR(x)))
+ return false;
+
+ local_irq_save(flags);
+
+ page = virt_to_head_page(x);
+ if (unlikely(!PageSlab(page))) {
+ /* maybe it was from stack? */
+ rv = true;
+ goto out_unlock;
+ }
+
+ slab_lock(page);
+ if (on_freelist(page->slab, page, object)) {
+ object_err(page->slab, page, object, "Object is on free-list");
+ rv = false;
+ } else {
+ rv = true;
+ }
+ slab_unlock(page);
+
+out_unlock:
+ local_irq_restore(flags);
+ return rv;
+}
+EXPORT_SYMBOL(verify_mem_not_deleted);
+#endif
+
void kfree(const void *x)
{
struct page *page;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 1bacca4cb67..3176e2e13d9 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -388,7 +388,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
br_ifinfo_notify(RTM_NEWLINK, p);
if (changed_addr)
- call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
dev_set_mtu(br->dev, br_min_mtu(br));
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 6814083a92f..5b1ed1ba9aa 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -188,6 +188,8 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
p->state = new_state;
br_log_state(p);
+ br_ifinfo_notify(RTM_NEWLINK, p);
+
return 0;
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 54578f274d8..78cc364997d 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -124,6 +124,7 @@ struct net_bridge_port
bridge_id designated_bridge;
u32 path_cost;
u32 designated_cost;
+ unsigned long designated_age;
struct timer_list forward_delay_timer;
struct timer_list hold_timer;
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 642ef47a867..05ed9bc7e42 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -56,7 +56,8 @@ extern void br_become_root_bridge(struct net_bridge *br);
extern void br_config_bpdu_generation(struct net_bridge *);
extern void br_configuration_update(struct net_bridge *);
extern void br_port_state_selection(struct net_bridge *);
-extern void br_received_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu);
+extern void br_received_config_bpdu(struct net_bridge_port *p,
+ const struct br_config_bpdu *bpdu);
extern void br_received_tcn_bpdu(struct net_bridge_port *p);
extern void br_transmit_config(struct net_bridge_port *p);
extern void br_transmit_tcn(struct net_bridge *br);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index bb4383e84de..ad0a3f7cf6c 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -109,7 +109,6 @@ static void br_root_selection(struct net_bridge *br)
list_for_each_entry(p, &br->port_list, list) {
if (br_should_become_root_port(p, root_port))
root_port = p->port_no;
-
}
br->root_port = root_port;
@@ -145,7 +144,6 @@ void br_transmit_config(struct net_bridge_port *p)
struct br_config_bpdu bpdu;
struct net_bridge *br;
-
if (timer_pending(&p->hold_timer)) {
p->config_pending = 1;
return;
@@ -164,8 +162,7 @@ void br_transmit_config(struct net_bridge_port *p)
else {
struct net_bridge_port *root
= br_get_port(br, br->root_port);
- bpdu.message_age = br->max_age
- - (root->message_age_timer.expires - jiffies)
+ bpdu.message_age = (jiffies - root->designated_age)
+ MESSAGE_AGE_INCR;
}
bpdu.max_age = br->max_age;
@@ -182,20 +179,21 @@ void br_transmit_config(struct net_bridge_port *p)
}
/* called under bridge lock */
-static inline void br_record_config_information(struct net_bridge_port *p,
- const struct br_config_bpdu *bpdu)
+static void br_record_config_information(struct net_bridge_port *p,
+ const struct br_config_bpdu *bpdu)
{
p->designated_root = bpdu->root;
p->designated_cost = bpdu->root_path_cost;
p->designated_bridge = bpdu->bridge_id;
p->designated_port = bpdu->port_id;
+ p->designated_age = jiffies + bpdu->message_age;
mod_timer(&p->message_age_timer, jiffies
+ (p->br->max_age - bpdu->message_age));
}
/* called under bridge lock */
-static inline void br_record_config_timeout_values(struct net_bridge *br,
+static void br_record_config_timeout_values(struct net_bridge *br,
const struct br_config_bpdu *bpdu)
{
br->max_age = bpdu->max_age;
@@ -254,7 +252,8 @@ static void br_designated_port_selection(struct net_bridge *br)
}
/* called under bridge lock */
-static int br_supersedes_port_info(struct net_bridge_port *p, struct br_config_bpdu *bpdu)
+static int br_supersedes_port_info(const struct net_bridge_port *p,
+ const struct br_config_bpdu *bpdu)
{
int t;
@@ -285,7 +284,7 @@ static int br_supersedes_port_info(struct net_bridge_port *p, struct br_config_b
}
/* called under bridge lock */
-static inline void br_topology_change_acknowledged(struct net_bridge *br)
+static void br_topology_change_acknowledged(struct net_bridge *br)
{
br->topology_change_detected = 0;
del_timer(&br->tcn_timer);
@@ -327,7 +326,7 @@ void br_config_bpdu_generation(struct net_bridge *br)
}
/* called under bridge lock */
-static inline void br_reply(struct net_bridge_port *p)
+static void br_reply(struct net_bridge_port *p)
{
br_transmit_config(p);
}
@@ -363,6 +362,8 @@ static void br_make_blocking(struct net_bridge_port *p)
p->state = BR_STATE_BLOCKING;
br_log_state(p);
+ br_ifinfo_notify(RTM_NEWLINK, p);
+
del_timer(&p->forward_delay_timer);
}
}
@@ -379,15 +380,14 @@ static void br_make_forwarding(struct net_bridge_port *p)
p->state = BR_STATE_FORWARDING;
br_topology_change_detection(br);
del_timer(&p->forward_delay_timer);
- }
- else if (br->stp_enabled == BR_KERNEL_STP)
+ } else if (br->stp_enabled == BR_KERNEL_STP)
p->state = BR_STATE_LISTENING;
else
p->state = BR_STATE_LEARNING;
br_multicast_enable_port(p);
-
br_log_state(p);
+ br_ifinfo_notify(RTM_NEWLINK, p);
if (br->forward_delay != 0)
mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay);
@@ -431,14 +431,15 @@ void br_port_state_selection(struct net_bridge *br)
}
/* called under bridge lock */
-static inline void br_topology_change_acknowledge(struct net_bridge_port *p)
+static void br_topology_change_acknowledge(struct net_bridge_port *p)
{
p->topology_change_ack = 1;
br_transmit_config(p);
}
/* called under bridge lock */
-void br_received_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu)
+void br_received_config_bpdu(struct net_bridge_port *p,
+ const struct br_config_bpdu *bpdu)
{
struct net_bridge *br;
int was_root;
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 289646ec9b7..e16aade51ae 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -210,10 +210,19 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
bpdu.hello_time = br_get_ticks(buf+28);
bpdu.forward_delay = br_get_ticks(buf+30);
- br_received_config_bpdu(p, &bpdu);
- }
+ if (bpdu.message_age > bpdu.max_age) {
+ if (net_ratelimit())
+ br_notice(p->br,
+ "port %u config from %pM"
+ " (message_age %ul > max_age %ul)\n",
+ p->port_no,
+ eth_hdr(skb)->h_source,
+ bpdu.message_age, bpdu.max_age);
+ goto out;
+ }
- else if (buf[0] == BPDU_TYPE_TCN) {
+ br_received_config_bpdu(p, &bpdu);
+ } else if (buf[0] == BPDU_TYPE_TCN) {
br_received_tcn_bpdu(p);
}
out:
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 6f615b8192f..10eda3cd1d7 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -88,6 +88,7 @@ void br_stp_enable_port(struct net_bridge_port *p)
br_init_port(p);
br_port_state_selection(p->br);
br_log_state(p);
+ br_ifinfo_notify(RTM_NEWLINK, p);
}
/* called under bridge lock */
@@ -104,6 +105,8 @@ void br_stp_disable_port(struct net_bridge_port *p)
p->topology_change_ack = 0;
p->config_pending = 0;
+ br_ifinfo_notify(RTM_NEWLINK, p);
+
del_timer(&p->message_age_timer);
del_timer(&p->forward_delay_timer);
del_timer(&p->hold_timer);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 3e965140051..58de2a0f997 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -97,6 +97,7 @@ static void br_forward_delay_timer_expired(unsigned long arg)
netif_carrier_on(br->dev);
}
br_log_state(p);
+ br_ifinfo_notify(RTM_NEWLINK, p);
spin_unlock(&br->lock);
}
diff --git a/net/core/link_watch.c b/net/core/link_watch.c
index a7b34213186..357bd4ee4ba 100644
--- a/net/core/link_watch.c
+++ b/net/core/link_watch.c
@@ -126,7 +126,7 @@ static void linkwatch_schedule_work(int urgent)
return;
/* It's already running which is good enough. */
- if (!cancel_delayed_work(&linkwatch_work))
+ if (!__cancel_delayed_work(&linkwatch_work))
return;
/* Otherwise we reschedule it again for immediate execution. */
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index c825c6e0b63..7312bf9f7af 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -67,7 +67,7 @@ static int may_change_ptraced_domain(struct task_struct *task,
int error = 0;
rcu_read_lock();
- tracer = tracehook_tracer_task(task);
+ tracer = ptrace_parent(task);
if (tracer) {
/* released below */
cred = get_task_cred(tracer);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 20219ef5439..422515509f3 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2053,7 +2053,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
u32 ptsid = 0;
rcu_read_lock();
- tracer = tracehook_tracer_task(current);
+ tracer = ptrace_parent(current);
if (likely(tracer != NULL)) {
sec = __task_cred(tracer)->security;
ptsid = sec->sid;
@@ -5319,7 +5319,7 @@ static int selinux_setprocattr(struct task_struct *p,
Otherwise, leave SID unchanged and fail. */
ptsid = 0;
task_lock(p);
- tracer = tracehook_tracer_task(p);
+ tracer = ptrace_parent(p);
if (tracer)
ptsid = task_sid(tracer);
task_unlock(p);
diff --git a/sound/soc/ep93xx/ep93xx-ac97.c b/sound/soc/ep93xx/ep93xx-ac97.c
index 104e95cda0a..c7417c76552 100644
--- a/sound/soc/ep93xx/ep93xx-ac97.c
+++ b/sound/soc/ep93xx/ep93xx-ac97.c
@@ -106,12 +106,12 @@ static struct ep93xx_ac97_info *ep93xx_ac97_info;
static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_out = {
.name = "ac97-pcm-out",
- .dma_port = EP93XX_DMA_M2P_PORT_AAC1,
+ .dma_port = EP93XX_DMA_AAC1,
};
static struct ep93xx_pcm_dma_params ep93xx_ac97_pcm_in = {
.name = "ac97-pcm-in",
- .dma_port = EP93XX_DMA_M2P_PORT_AAC1,
+ .dma_port = EP93XX_DMA_AAC1,
};
static inline unsigned ep93xx_ac97_read_reg(struct ep93xx_ac97_info *info,
diff --git a/sound/soc/ep93xx/ep93xx-i2s.c b/sound/soc/ep93xx/ep93xx-i2s.c
index 042f4e93746..30df42568db 100644
--- a/sound/soc/ep93xx/ep93xx-i2s.c
+++ b/sound/soc/ep93xx/ep93xx-i2s.c
@@ -70,11 +70,11 @@ struct ep93xx_i2s_info {
struct ep93xx_pcm_dma_params ep93xx_i2s_dma_params[] = {
[SNDRV_PCM_STREAM_PLAYBACK] = {
.name = "i2s-pcm-out",
- .dma_port = EP93XX_DMA_M2P_PORT_I2S1,
+ .dma_port = EP93XX_DMA_I2S1,
},
[SNDRV_PCM_STREAM_CAPTURE] = {
.name = "i2s-pcm-in",
- .dma_port = EP93XX_DMA_M2P_PORT_I2S1,
+ .dma_port = EP93XX_DMA_I2S1,
},
};
diff --git a/sound/soc/ep93xx/ep93xx-pcm.c b/sound/soc/ep93xx/ep93xx-pcm.c
index a456e491155..a07f99c9c37 100644
--- a/sound/soc/ep93xx/ep93xx-pcm.c
+++ b/sound/soc/ep93xx/ep93xx-pcm.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/slab.h>
+#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
@@ -53,43 +54,34 @@ static const struct snd_pcm_hardware ep93xx_pcm_hardware = {
struct ep93xx_runtime_data
{
- struct ep93xx_dma_m2p_client cl;
- struct ep93xx_pcm_dma_params *params;
int pointer_bytes;
- struct tasklet_struct period_tasklet;
int periods;
- struct ep93xx_dma_buffer buf[32];
+ int period_bytes;
+ struct dma_chan *dma_chan;
+ struct ep93xx_dma_data dma_data;
};
-static void ep93xx_pcm_period_elapsed(unsigned long data)
+static void ep93xx_pcm_dma_callback(void *data)
{
- struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data;
- snd_pcm_period_elapsed(substream);
-}
+ struct snd_pcm_substream *substream = data;
+ struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
-static void ep93xx_pcm_buffer_started(void *cookie,
- struct ep93xx_dma_buffer *buf)
-{
+ rtd->pointer_bytes += rtd->period_bytes;
+ rtd->pointer_bytes %= rtd->period_bytes * rtd->periods;
+
+ snd_pcm_period_elapsed(substream);
}
-static void ep93xx_pcm_buffer_finished(void *cookie,
- struct ep93xx_dma_buffer *buf,
- int bytes, int error)
+static bool ep93xx_pcm_dma_filter(struct dma_chan *chan, void *filter_param)
{
- struct snd_pcm_substream *substream = cookie;
- struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
-
- if (buf == rtd->buf + rtd->periods - 1)
- rtd->pointer_bytes = 0;
- else
- rtd->pointer_bytes += buf->size;
+ struct ep93xx_dma_data *data = filter_param;
- if (!error) {
- ep93xx_dma_m2p_submit_recursive(&rtd->cl, buf);
- tasklet_schedule(&rtd->period_tasklet);
- } else {
- snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
+ if (data->direction == ep93xx_dma_chan_direction(chan)) {
+ chan->private = data;
+ return true;
}
+
+ return false;
}
static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
@@ -98,30 +90,38 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
struct snd_soc_dai *cpu_dai = soc_rtd->cpu_dai;
struct ep93xx_pcm_dma_params *dma_params;
struct ep93xx_runtime_data *rtd;
+ dma_cap_mask_t mask;
int ret;
- dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream);
+ ret = snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ return ret;
+
snd_soc_set_runtime_hwparams(substream, &ep93xx_pcm_hardware);
rtd = kmalloc(sizeof(*rtd), GFP_KERNEL);
if (!rtd)
return -ENOMEM;
- memset(&rtd->period_tasklet, 0, sizeof(rtd->period_tasklet));
- rtd->period_tasklet.func = ep93xx_pcm_period_elapsed;
- rtd->period_tasklet.data = (unsigned long)substream;
-
- rtd->cl.name = dma_params->name;
- rtd->cl.flags = dma_params->dma_port | EP93XX_DMA_M2P_IGNORE_ERROR |
- ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
- EP93XX_DMA_M2P_TX : EP93XX_DMA_M2P_RX);
- rtd->cl.cookie = substream;
- rtd->cl.buffer_started = ep93xx_pcm_buffer_started;
- rtd->cl.buffer_finished = ep93xx_pcm_buffer_finished;
- ret = ep93xx_dma_m2p_client_register(&rtd->cl);
- if (ret < 0) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ dma_params = snd_soc_dai_get_dma_data(cpu_dai, substream);
+ rtd->dma_data.port = dma_params->dma_port;
+ rtd->dma_data.name = dma_params->name;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ rtd->dma_data.direction = DMA_TO_DEVICE;
+ else
+ rtd->dma_data.direction = DMA_FROM_DEVICE;
+
+ rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter,
+ &rtd->dma_data);
+ if (!rtd->dma_chan) {
kfree(rtd);
- return ret;
+ return -EINVAL;
}
substream->runtime->private_data = rtd;
@@ -132,31 +132,52 @@ static int ep93xx_pcm_close(struct snd_pcm_substream *substream)
{
struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
- ep93xx_dma_m2p_client_unregister(&rtd->cl);
+ dma_release_channel(rtd->dma_chan);
kfree(rtd);
return 0;
}
+static int ep93xx_pcm_dma_submit(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct ep93xx_runtime_data *rtd = runtime->private_data;
+ struct dma_chan *chan = rtd->dma_chan;
+ struct dma_device *dma_dev = chan->device;
+ struct dma_async_tx_descriptor *desc;
+
+ rtd->pointer_bytes = 0;
+ desc = dma_dev->device_prep_dma_cyclic(chan, runtime->dma_addr,
+ rtd->period_bytes * rtd->periods,
+ rtd->period_bytes,
+ rtd->dma_data.direction);
+ if (!desc)
+ return -EINVAL;
+
+ desc->callback = ep93xx_pcm_dma_callback;
+ desc->callback_param = substream;
+
+ dmaengine_submit(desc);
+ return 0;
+}
+
+static void ep93xx_pcm_dma_flush(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct ep93xx_runtime_data *rtd = runtime->private_data;
+
+ dmaengine_terminate_all(rtd->dma_chan);
+}
+
static int ep93xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct ep93xx_runtime_data *rtd = runtime->private_data;
- size_t totsize = params_buffer_bytes(params);
- size_t period = params_period_bytes(params);
- int i;
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
- runtime->dma_bytes = totsize;
-
- rtd->periods = (totsize + period - 1) / period;
- for (i = 0; i < rtd->periods; i++) {
- rtd->buf[i].bus_addr = runtime->dma_addr + (i * period);
- rtd->buf[i].size = period;
- if ((i + 1) * period > totsize)
- rtd->buf[i].size = totsize - (i * period);
- }
+ rtd->periods = params_periods(params);
+ rtd->period_bytes = params_period_bytes(params);
return 0;
}
@@ -168,24 +189,20 @@ static int ep93xx_pcm_hw_free(struct snd_pcm_substream *substream)
static int ep93xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
- struct ep93xx_runtime_data *rtd = substream->runtime->private_data;
int ret;
- int i;
ret = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- rtd->pointer_bytes = 0;
- for (i = 0; i < rtd->periods; i++)
- ep93xx_dma_m2p_submit(&rtd->cl, rtd->buf + i);
+ ret = ep93xx_pcm_dma_submit(substream);
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- ep93xx_dma_m2p_flush(&rtd->cl);
+ ep93xx_pcm_dma_flush(substream);
break;
default: